From c909950c384e8234a7b3c5a76b7f79e3f7012ceb Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 20 Apr 2012 06:31:50 +0000 Subject: Convert some uses of XXXRegisterClass to &XXXRegClass. No functional change since they are equivalent. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155186 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 214 ++++++++++++++++++------------------- 1 file changed, 107 insertions(+), 107 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 04299f3..48ec8a2 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -215,11 +215,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } // Set up the register classes. - addRegisterClass(MVT::i8, X86::GR8RegisterClass); - addRegisterClass(MVT::i16, X86::GR16RegisterClass); - addRegisterClass(MVT::i32, X86::GR32RegisterClass); + addRegisterClass(MVT::i8, &X86::GR8RegClass); + addRegisterClass(MVT::i16, &X86::GR16RegClass); + addRegisterClass(MVT::i32, &X86::GR32RegClass); if (Subtarget->is64Bit()) - addRegisterClass(MVT::i64, X86::GR64RegisterClass); + addRegisterClass(MVT::i64, &X86::GR64RegClass); setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); @@ -567,8 +567,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { // f32 and f64 use SSE. // Set up the FP register classes. - addRegisterClass(MVT::f32, X86::FR32RegisterClass); - addRegisterClass(MVT::f64, X86::FR64RegisterClass); + addRegisterClass(MVT::f32, &X86::FR32RegClass); + addRegisterClass(MVT::f64, &X86::FR64RegClass); // Use ANDPD to simulate FABS. setOperationAction(ISD::FABS , MVT::f64, Custom); @@ -599,8 +599,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { // Use SSE for f32, x87 for f64. // Set up the FP register classes. - addRegisterClass(MVT::f32, X86::FR32RegisterClass); - addRegisterClass(MVT::f64, X86::RFP64RegisterClass); + addRegisterClass(MVT::f32, &X86::FR32RegClass); + addRegisterClass(MVT::f64, &X86::RFP64RegClass); // Use ANDPS to simulate FABS. setOperationAction(ISD::FABS , MVT::f32, Custom); @@ -632,8 +632,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } else if (!TM.Options.UseSoftFloat) { // f32 and f64 in x87. // Set up the FP register classes. - addRegisterClass(MVT::f64, X86::RFP64RegisterClass); - addRegisterClass(MVT::f32, X86::RFP32RegisterClass); + addRegisterClass(MVT::f64, &X86::RFP64RegClass); + addRegisterClass(MVT::f32, &X86::RFP32RegClass); setOperationAction(ISD::UNDEF, MVT::f64, Expand); setOperationAction(ISD::UNDEF, MVT::f32, Expand); @@ -660,7 +660,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // Long double always uses X87. if (!TM.Options.UseSoftFloat) { - addRegisterClass(MVT::f80, X86::RFP80RegisterClass); + addRegisterClass(MVT::f80, &X86::RFP80RegClass); setOperationAction(ISD::UNDEF, MVT::f80, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); { @@ -776,7 +776,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // FIXME: In order to prevent SSE instructions being expanded to MMX ones // with -msoft-float, disable use of MMX as well. if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { - addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); + addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); // No operations on x86mmx supported, everything uses intrinsics. } @@ -813,7 +813,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { - addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); + addRegisterClass(MVT::v4f32, &X86::VR128RegClass); setOperationAction(ISD::FADD, MVT::v4f32, Legal); setOperationAction(ISD::FSUB, MVT::v4f32, Legal); @@ -830,14 +830,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { - addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); + addRegisterClass(MVT::v2f64, &X86::VR128RegClass); // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM // registers cannot be used even for integer operations. - addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); - addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); - addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); - addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); + addRegisterClass(MVT::v16i8, &X86::VR128RegClass); + addRegisterClass(MVT::v8i16, &X86::VR128RegClass); + addRegisterClass(MVT::v4i32, &X86::VR128RegClass); + addRegisterClass(MVT::v2i64, &X86::VR128RegClass); setOperationAction(ISD::ADD, MVT::v16i8, Legal); setOperationAction(ISD::ADD, MVT::v8i16, Legal); @@ -1011,12 +1011,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SETCC, MVT::v2i64, Custom); if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { - addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); - addRegisterClass(MVT::v16i16, X86::VR256RegisterClass); - addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); - addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); - addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); - addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); + addRegisterClass(MVT::v32i8, &X86::VR256RegClass); + addRegisterClass(MVT::v16i16, &X86::VR256RegClass); + addRegisterClass(MVT::v8i32, &X86::VR256RegClass); + addRegisterClass(MVT::v8f32, &X86::VR256RegClass); + addRegisterClass(MVT::v4i64, &X86::VR256RegClass); + addRegisterClass(MVT::v4f64, &X86::VR256RegClass); setOperationAction(ISD::LOAD, MVT::v8f32, Legal); setOperationAction(ISD::LOAD, MVT::v4f64, Legal); @@ -1411,18 +1411,19 @@ X86TargetLowering::findRepresentativeClass(EVT VT) const{ default: return TargetLowering::findRepresentativeClass(VT); case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: - RRC = (Subtarget->is64Bit() - ? X86::GR64RegisterClass : X86::GR32RegisterClass); + RRC = Subtarget->is64Bit() ? + (const TargetRegisterClass*)&X86::GR64RegClass : + (const TargetRegisterClass*)&X86::GR32RegClass; break; case MVT::x86mmx: - RRC = X86::VR64RegisterClass; + RRC = &X86::VR64RegClass; break; case MVT::f32: case MVT::f64: case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: case MVT::v4f32: case MVT::v2f64: case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: case MVT::v4f64: - RRC = X86::VR128RegisterClass; + RRC = &X86::VR128RegClass; break; } return std::make_pair(RRC, Cost); @@ -1850,19 +1851,19 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, EVT RegVT = VA.getLocVT(); const TargetRegisterClass *RC; if (RegVT == MVT::i32) - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; else if (Is64Bit && RegVT == MVT::i64) - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; else if (RegVT == MVT::f32) - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; else if (RegVT == MVT::f64) - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) - RC = X86::VR256RegisterClass; + RC = &X86::VR256RegClass; else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) - RC = X86::VR128RegisterClass; + RC = &X86::VR128RegClass; else if (RegVT == MVT::x86mmx) - RC = X86::VR64RegisterClass; + RC = &X86::VR64RegClass; else llvm_unreachable("Unknown argument type!"); @@ -2004,7 +2005,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, DAG.getIntPtrConstant(Offset)); unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], - X86::GR64RegisterClass); + &X86::GR64RegClass); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, @@ -2020,7 +2021,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, SmallVector SaveXMMOps; SaveXMMOps.push_back(Chain); - unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); + unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); SaveXMMOps.push_back(ALVal); @@ -2031,7 +2032,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], - X86::VR128RegisterClass); + &X86::VR128RegClass); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); SaveXMMOps.push_back(Val); } @@ -11460,7 +11461,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, // result in out1, out2 // fallthrough -->nextMBB - const TargetRegisterClass *RC = X86::GR32RegisterClass; + const TargetRegisterClass *RC = &X86::GR32RegClass; const unsigned LoadOpc = X86::MOV32rm; const unsigned NotOpc = X86::NOT32r; const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); @@ -11662,7 +11663,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] int valArgIndx = lastAddrIndx + 1; - unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); + unsigned t1 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); for (int i=0; i <= lastAddrIndx; ++i) (*MIB).addOperand(*argOpers[i]); @@ -11672,7 +11673,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, argOpers[valArgIndx]->isImm()) && "invalid operand"); - unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); + unsigned t2 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); if (argOpers[valArgIndx]->isReg()) MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); else @@ -11687,7 +11688,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, MIB.addReg(t2); // Generate movc - unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); + unsigned t3 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); MIB.addReg(t2); MIB.addReg(t1); @@ -12517,7 +12518,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Load the old value of the high byte of the control word... unsigned OldCW = - F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); + F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); @@ -12605,25 +12606,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, X86::AND32ri, X86::MOV32rm, X86::LCMPXCHG32, X86::NOT32r, X86::EAX, - X86::GR32RegisterClass); + &X86::GR32RegClass); case X86::ATOMOR32: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, X86::OR32ri, X86::MOV32rm, X86::LCMPXCHG32, X86::NOT32r, X86::EAX, - X86::GR32RegisterClass); + &X86::GR32RegClass); case X86::ATOMXOR32: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, X86::XOR32ri, X86::MOV32rm, X86::LCMPXCHG32, X86::NOT32r, X86::EAX, - X86::GR32RegisterClass); + &X86::GR32RegClass); case X86::ATOMNAND32: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, X86::AND32ri, X86::MOV32rm, X86::LCMPXCHG32, X86::NOT32r, X86::EAX, - X86::GR32RegisterClass, true); + &X86::GR32RegClass, true); case X86::ATOMMIN32: return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); case X86::ATOMMAX32: @@ -12638,25 +12639,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, X86::AND16ri, X86::MOV16rm, X86::LCMPXCHG16, X86::NOT16r, X86::AX, - X86::GR16RegisterClass); + &X86::GR16RegClass); case X86::ATOMOR16: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, X86::OR16ri, X86::MOV16rm, X86::LCMPXCHG16, X86::NOT16r, X86::AX, - X86::GR16RegisterClass); + &X86::GR16RegClass); case X86::ATOMXOR16: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, X86::XOR16ri, X86::MOV16rm, X86::LCMPXCHG16, X86::NOT16r, X86::AX, - X86::GR16RegisterClass); + &X86::GR16RegClass); case X86::ATOMNAND16: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, X86::AND16ri, X86::MOV16rm, X86::LCMPXCHG16, X86::NOT16r, X86::AX, - X86::GR16RegisterClass, true); + &X86::GR16RegClass, true); case X86::ATOMMIN16: return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); case X86::ATOMMAX16: @@ -12671,25 +12672,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, X86::AND8ri, X86::MOV8rm, X86::LCMPXCHG8, X86::NOT8r, X86::AL, - X86::GR8RegisterClass); + &X86::GR8RegClass); case X86::ATOMOR8: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, X86::OR8ri, X86::MOV8rm, X86::LCMPXCHG8, X86::NOT8r, X86::AL, - X86::GR8RegisterClass); + &X86::GR8RegClass); case X86::ATOMXOR8: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, X86::XOR8ri, X86::MOV8rm, X86::LCMPXCHG8, X86::NOT8r, X86::AL, - X86::GR8RegisterClass); + &X86::GR8RegClass); case X86::ATOMNAND8: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, X86::AND8ri, X86::MOV8rm, X86::LCMPXCHG8, X86::NOT8r, X86::AL, - X86::GR8RegisterClass, true); + &X86::GR8RegClass, true); // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. // This group is for 64-bit host. case X86::ATOMAND64: @@ -12697,25 +12698,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, X86::AND64ri32, X86::MOV64rm, X86::LCMPXCHG64, X86::NOT64r, X86::RAX, - X86::GR64RegisterClass); + &X86::GR64RegClass); case X86::ATOMOR64: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, X86::OR64ri32, X86::MOV64rm, X86::LCMPXCHG64, X86::NOT64r, X86::RAX, - X86::GR64RegisterClass); + &X86::GR64RegClass); case X86::ATOMXOR64: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, X86::XOR64ri32, X86::MOV64rm, X86::LCMPXCHG64, X86::NOT64r, X86::RAX, - X86::GR64RegisterClass); + &X86::GR64RegClass); case X86::ATOMNAND64: return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, X86::AND64ri32, X86::MOV64rm, X86::LCMPXCHG64, X86::NOT64r, X86::RAX, - X86::GR64RegisterClass, true); + &X86::GR64RegClass, true); case X86::ATOMMIN64: return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); case X86::ATOMMAX64: @@ -15652,55 +15653,55 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // in the normal allocation? case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. if (Subtarget->is64Bit()) { - if (VT == MVT::i32 || VT == MVT::f32) - return std::make_pair(0U, X86::GR32RegisterClass); - else if (VT == MVT::i16) - return std::make_pair(0U, X86::GR16RegisterClass); - else if (VT == MVT::i8 || VT == MVT::i1) - return std::make_pair(0U, X86::GR8RegisterClass); - else if (VT == MVT::i64 || VT == MVT::f64) - return std::make_pair(0U, X86::GR64RegisterClass); - break; + if (VT == MVT::i32 || VT == MVT::f32) + return std::make_pair(0U, &X86::GR32RegClass); + if (VT == MVT::i16) + return std::make_pair(0U, &X86::GR16RegClass); + if (VT == MVT::i8 || VT == MVT::i1) + return std::make_pair(0U, &X86::GR8RegClass); + if (VT == MVT::i64 || VT == MVT::f64) + return std::make_pair(0U, &X86::GR64RegClass); + break; } // 32-bit fallthrough case 'Q': // Q_REGS if (VT == MVT::i32 || VT == MVT::f32) - return std::make_pair(0U, X86::GR32_ABCDRegisterClass); - else if (VT == MVT::i16) - return std::make_pair(0U, X86::GR16_ABCDRegisterClass); - else if (VT == MVT::i8 || VT == MVT::i1) - return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass); - else if (VT == MVT::i64) - return std::make_pair(0U, X86::GR64_ABCDRegisterClass); + return std::make_pair(0U, &X86::GR32_ABCDRegClass); + if (VT == MVT::i16) + return std::make_pair(0U, &X86::GR16_ABCDRegClass); + if (VT == MVT::i8 || VT == MVT::i1) + return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); + if (VT == MVT::i64) + return std::make_pair(0U, &X86::GR64_ABCDRegClass); break; case 'r': // GENERAL_REGS case 'l': // INDEX_REGS if (VT == MVT::i8 || VT == MVT::i1) - return std::make_pair(0U, X86::GR8RegisterClass); + return std::make_pair(0U, &X86::GR8RegClass); if (VT == MVT::i16) - return std::make_pair(0U, X86::GR16RegisterClass); + return std::make_pair(0U, &X86::GR16RegClass); if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) - return std::make_pair(0U, X86::GR32RegisterClass); - return std::make_pair(0U, X86::GR64RegisterClass); + return std::make_pair(0U, &X86::GR32RegClass); + return std::make_pair(0U, &X86::GR64RegClass); case 'R': // LEGACY_REGS if (VT == MVT::i8 || VT == MVT::i1) - return std::make_pair(0U, X86::GR8_NOREXRegisterClass); + return std::make_pair(0U, &X86::GR8_NOREXRegClass); if (VT == MVT::i16) - return std::make_pair(0U, X86::GR16_NOREXRegisterClass); + return std::make_pair(0U, &X86::GR16_NOREXRegClass); if (VT == MVT::i32 || !Subtarget->is64Bit()) - return std::make_pair(0U, X86::GR32_NOREXRegisterClass); - return std::make_pair(0U, X86::GR64_NOREXRegisterClass); + return std::make_pair(0U, &X86::GR32_NOREXRegClass); + return std::make_pair(0U, &X86::GR64_NOREXRegClass); case 'f': // FP Stack registers. // If SSE is enabled for this VT, use f80 to ensure the isel moves the // value to the correct fpstack register class. if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) - return std::make_pair(0U, X86::RFP32RegisterClass); + return std::make_pair(0U, &X86::RFP32RegClass); if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) - return std::make_pair(0U, X86::RFP64RegisterClass); - return std::make_pair(0U, X86::RFP80RegisterClass); + return std::make_pair(0U, &X86::RFP64RegClass); + return std::make_pair(0U, &X86::RFP80RegClass); case 'y': // MMX_REGS if MMX allowed. if (!Subtarget->hasMMX()) break; - return std::make_pair(0U, X86::VR64RegisterClass); + return std::make_pair(0U, &X86::VR64RegClass); case 'Y': // SSE_REGS if SSE2 allowed if (!Subtarget->hasSSE2()) break; // FALL THROUGH. @@ -15712,10 +15713,10 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // Scalar SSE types. case MVT::f32: case MVT::i32: - return std::make_pair(0U, X86::FR32RegisterClass); + return std::make_pair(0U, &X86::FR32RegClass); case MVT::f64: case MVT::i64: - return std::make_pair(0U, X86::FR64RegisterClass); + return std::make_pair(0U, &X86::FR64RegClass); // Vector types. case MVT::v16i8: case MVT::v8i16: @@ -15723,7 +15724,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case MVT::v2i64: case MVT::v4f32: case MVT::v2f64: - return std::make_pair(0U, X86::VR128RegisterClass); + return std::make_pair(0U, &X86::VR128RegClass); // AVX types. case MVT::v32i8: case MVT::v16i16: @@ -15731,8 +15732,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case MVT::v4i64: case MVT::v8f32: case MVT::v4f64: - return std::make_pair(0U, X86::VR256RegisterClass); - + return std::make_pair(0U, &X86::VR256RegClass); } break; } @@ -15755,28 +15755,28 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, Constraint[6] == '}') { Res.first = X86::ST0+Constraint[4]-'0'; - Res.second = X86::RFP80RegisterClass; + Res.second = &X86::RFP80RegClass; return Res; } // GCC allows "st(0)" to be called just plain "st". if (StringRef("{st}").equals_lower(Constraint)) { Res.first = X86::ST0; - Res.second = X86::RFP80RegisterClass; + Res.second = &X86::RFP80RegClass; return Res; } // flags -> EFLAGS if (StringRef("{flags}").equals_lower(Constraint)) { Res.first = X86::EFLAGS; - Res.second = X86::CCRRegisterClass; + Res.second = &X86::CCRRegClass; return Res; } // 'A' means EAX + EDX. if (Constraint == "A") { Res.first = X86::EAX; - Res.second = X86::GR32_ADRegisterClass; + Res.second = &X86::GR32_ADRegClass; return Res; } return Res; @@ -15792,7 +15792,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we // really want an 8-bit or 32-bit register, map to the appropriate register // class and return the appropriate register. - if (Res.second == X86::GR16RegisterClass) { + if (Res.second == &X86::GR16RegClass) { if (VT == MVT::i8) { unsigned DestReg = 0; switch (Res.first) { @@ -15804,7 +15804,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } if (DestReg) { Res.first = DestReg; - Res.second = X86::GR8RegisterClass; + Res.second = &X86::GR8RegClass; } } else if (VT == MVT::i32) { unsigned DestReg = 0; @@ -15821,7 +15821,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } if (DestReg) { Res.first = DestReg; - Res.second = X86::GR32RegisterClass; + Res.second = &X86::GR32RegClass; } } else if (VT == MVT::i64) { unsigned DestReg = 0; @@ -15838,22 +15838,22 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } if (DestReg) { Res.first = DestReg; - Res.second = X86::GR64RegisterClass; + Res.second = &X86::GR64RegClass; } } - } else if (Res.second == X86::FR32RegisterClass || - Res.second == X86::FR64RegisterClass || - Res.second == X86::VR128RegisterClass) { + } else if (Res.second == &X86::FR32RegClass || + Res.second == &X86::FR64RegClass || + Res.second == &X86::VR128RegClass) { // Handle references to XMM physical registers that got mapped into the // wrong class. This can happen with constraints like {xmm0} where the // target independent register mapper will just pick the first match it can // find, ignoring the required type. if (VT == MVT::f32) - Res.second = X86::FR32RegisterClass; + Res.second = &X86::FR32RegClass; else if (VT == MVT::f64) - Res.second = X86::FR64RegisterClass; - else if (X86::VR128RegisterClass->hasType(VT)) - Res.second = X86::VR128RegisterClass; + Res.second = &X86::FR64RegClass; + else if (X86::VR128RegClass.hasType(VT)) + Res.second = &X86::VR128RegClass; } return Res; -- cgit v1.1 From d0cf565e799063555f3a3e2858d2ccf53056a0c4 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 21 Apr 2012 18:13:35 +0000 Subject: Tidy up. 80 columns and some other spacing issues. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155291 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 48ec8a2..ca26f7e 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14888,6 +14888,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, N00.getOperand(0), N00.getOperand(1)), DAG.getConstant(1, VT)); } + // Optimize vectors in AVX mode: // // v8i16 -> v8i32 @@ -14902,15 +14903,17 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, // if (Subtarget->hasAVX()) { - if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || - ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { + if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || + ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); - SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, DAG); - SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, DAG); + SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, + DAG); + SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, + DAG); - EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), - VT.getVectorNumElements()/2); + EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), + VT.getVectorNumElements()/2); OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); @@ -14919,7 +14922,6 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, } } - return SDValue(); } -- cgit v1.1 From 9e401f22ec4d1fc42c22802fef1479180ca31600 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 21 Apr 2012 18:58:38 +0000 Subject: Make some fixed arrays const. Use array_lengthof in a couple places instead of a hardcoded number. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155294 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ca26f7e..a03b97f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -162,7 +162,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) TD = getTargetData(); // Set up the TargetLowering object. - static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; + static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; // X86 is weird, it always uses i8 for shift amounts and setcc results. setBooleanContents(ZeroOrOneBooleanContent); @@ -345,7 +345,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // (low) operations are left as Legal, as there are single-result // instructions for this in x86. Using the two-result multiply instructions // when both high and low results are needed must be arranged by dagcombine. - for (unsigned i = 0, e = 4; i != e; ++i) { + for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { MVT VT = IntVTs[i]; setOperationAction(ISD::MULHS, VT, Expand); setOperationAction(ISD::MULHU, VT, Expand); @@ -492,7 +492,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setShouldFoldAtomicFences(true); // Expand certain atomics - for (unsigned i = 0, e = 4; i != e; ++i) { + for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { MVT VT = IntVTs[i]; setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); @@ -13043,7 +13043,7 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); // PSHUFD - int ShufMask1[] = {0, 2, 0, 0}; + static const int ShufMask1[] = {0, 2, 0, 0}; OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT), ShufMask1); @@ -13051,7 +13051,7 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, ShufMask1); // MOVLHPS - int ShufMask2[] = {0, 1, 4, 5}; + static const int ShufMask2[] = {0, 1, 4, 5}; return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); } @@ -13067,8 +13067,8 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); // PSHUFB - int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, - -1, -1, -1, -1, -1, -1, -1, -1}; + static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, + -1, -1, -1, -1, -1, -1, -1, -1}; OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, DAG.getUNDEF(MVT::v16i8), @@ -13081,7 +13081,7 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); // MOVLHPS - int ShufMask2[] = {0, 1, 4, 5}; + static const int ShufMask2[] = {0, 1, 4, 5}; SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2); return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res); -- cgit v1.1 From 1da5867236f4132ec56493f3535c7b5830878b55 Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Sun, 22 Apr 2012 09:39:03 +0000 Subject: ZERO_EXTEND/SIGN_EXTEND/TRUNCATE optimization for AVX2 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155309 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 75 +++++++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 9 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a03b97f..5e52b84 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1222,6 +1222,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::ZERO_EXTEND); + setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::TRUNCATE); setTargetDAGCombine(ISD::SINT_TO_FP); @@ -13033,6 +13034,20 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) { + if (Subtarget->hasAVX2()) { + // AVX2: v4i64 -> v4i32 + + // VPERMD + static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; + + Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op); + Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), + ShufMask); + + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, DAG.getIntPtrConstant(0)); + } + + // AVX: v4i64 -> v4i32 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, DAG.getIntPtrConstant(0)); @@ -13057,6 +13072,40 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, } if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { + if (Subtarget->hasAVX2()) { + // AVX2: v8i32 -> v8i16 + + Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); + // PSHUFB + SmallVector pshufbMask; + for (unsigned i = 0; i < 2; ++i) { + pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); + for (unsigned j = 0; j < 8; ++j) + pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); + } + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, &pshufbMask[0], + 32); + Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); + + Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); + + static const int ShufMask[] = {0, 2, -1, -1}; + Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), + &ShufMask[0]); + + Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, + DAG.getIntPtrConstant(0)); + + return DAG.getNode(ISD::BITCAST, dl, VT, Op); + } + SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, DAG.getIntPtrConstant(0)); @@ -14822,15 +14871,6 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, if (!Subtarget->hasAVX()) return SDValue(); - // Optimize vectors in AVX mode - // Sign extend v8i16 to v8i32 and - // v4i32 to v4i64 - // - // Divide input vector into two parts - // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} - // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 - // concat the vectors to original VT - EVT VT = N->getValueType(0); SDValue Op = N->getOperand(0); EVT OpVT = Op.getValueType(); @@ -14839,6 +14879,19 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { + if (Subtarget->hasAVX2()) { + return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); + } + + // Optimize vectors in AVX mode + // Sign extend v8i16 to v8i32 and + // v4i32 to v4i64 + // + // Divide input vector into two parts + // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} + // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 + // concat the vectors to original VT + unsigned NumElems = OpVT.getVectorNumElements(); SmallVector ShufMask1(NumElems, -1); for (unsigned i = 0; i < NumElems/2; i++) ShufMask1[i] = i; @@ -14906,6 +14959,9 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { + if (Subtarget->hasAVX2()) + return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); + SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, DAG); @@ -15108,6 +15164,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::FAND: return PerformFANDCombine(N, DAG); case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); + case ISD::ANY_EXTEND: case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, Subtarget); case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI); -- cgit v1.1 From 4c7972d6385ff17574d76d0fdda5a1a5f53d49e8 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 22 Apr 2012 18:15:59 +0000 Subject: Simplify code by converting multiple places that were manually concatenating 128-bit vectors to use either CONCAT_VECTORS or a helper function. CONCAT_VECTORS will itself be lowered to the same pattern as before. The helper function is needed for concats of BUILD_VECTORs since getNode(CONCAT_VECTORS) will just return a large BUILD_VECTOR and we may be trying to lower large BUILD_VECTORS when this occurs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155318 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 5e52b84..6a6a5f6 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -134,6 +134,19 @@ static SDValue Insert128BitVector(SDValue Result, return SDValue(); } +/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 +/// instructions. This is used because creating CONCAT_VECTOR nodes of +/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower +/// large BUILD_VECTORS. +static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, + unsigned NumElems, SelectionDAG &DAG, + DebugLoc dl) { + SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, + DAG.getConstant(0, MVT::i32), DAG, dl); + return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), + DAG, dl); +} + static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { const X86Subtarget *Subtarget = &TM.getSubtarget(); bool is64Bit = Subtarget->is64Bit(); @@ -4207,10 +4220,7 @@ static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); } else { // AVX Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); - SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32), - Vec, DAG.getConstant(0, MVT::i32), DAG, dl); - Vec = Insert128BitVector(InsV, Vec, - DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl); + Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); } } else { Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); @@ -4348,10 +4358,7 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { // into the low and high part. This is necessary because we want // to use VPERM* to shuffle the vectors if (Size == 256) { - SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1, - DAG.getConstant(0, MVT::i32), DAG, dl); - V1 = Insert128BitVector(InsV, V1, - DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); + V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); } return getLegalSplat(DAG, V1, EltNo); @@ -5214,10 +5221,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { NumElems/2); // Recreate the wider vector with the lower and upper part. - SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower, - DAG.getConstant(0, MVT::i32), DAG, dl); - return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); } // Let legalizer expand 2-wide build_vectors. @@ -5384,10 +5388,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { SDValue V2 = Op.getOperand(1); unsigned NumElems = ResVT.getVectorNumElements(); - SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, ResVT), V1, - DAG.getConstant(0, MVT::i32), DAG, dl); - return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); } SDValue @@ -6050,10 +6051,7 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { } // Concatenate the result back - SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Shufs[0], - DAG.getConstant(0, MVT::i32), DAG, dl); - return Insert128BitVector(V, Shufs[1],DAG.getConstant(NumLaneElems, MVT::i32), - DAG, dl); + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Shufs[0], Shufs[1]); } /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with -- cgit v1.1 From d63fa657e4434f9e4b1bb6e54b66fee6093a86f3 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 22 Apr 2012 18:51:37 +0000 Subject: Tidy up. 80 columns and argument alignment. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155319 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6a6a5f6..887fec8 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -13042,15 +13042,16 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), ShufMask); - return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, DAG.getIntPtrConstant(0)); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, + DAG.getIntPtrConstant(0)); } // AVX: v4i64 -> v4i32 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, - DAG.getIntPtrConstant(0)); + DAG.getIntPtrConstant(0)); SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, - DAG.getIntPtrConstant(2)); + DAG.getIntPtrConstant(2)); OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); @@ -13058,22 +13059,22 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, // PSHUFD static const int ShufMask1[] = {0, 2, 0, 0}; - OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT), - ShufMask1); - OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT), - ShufMask1); + OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT), ShufMask1); + OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT), ShufMask1); // MOVLHPS static const int ShufMask2[] = {0, 1, 4, 5}; return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); } + if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { if (Subtarget->hasAVX2()) { // AVX2: v8i32 -> v8i16 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); + // PSHUFB SmallVector pshufbMask; for (unsigned i = 0; i < 2; ++i) { @@ -13088,27 +13089,27 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, for (unsigned j = 0; j < 8; ++j) pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); } - SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, &pshufbMask[0], - 32); + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, + &pshufbMask[0], 32); Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); static const int ShufMask[] = {0, 2, -1, -1}; - Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), + Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), &ShufMask[0]); - Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, - DAG.getIntPtrConstant(0)); + Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, + DAG.getIntPtrConstant(0)); return DAG.getNode(ISD::BITCAST, dl, VT, Op); } SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, - DAG.getIntPtrConstant(0)); + DAG.getIntPtrConstant(0)); SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, - DAG.getIntPtrConstant(4)); + DAG.getIntPtrConstant(4)); OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo); OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); @@ -13117,11 +13118,9 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, -1, -1, -1, -1, -1, -1, -1, -1}; - OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, - DAG.getUNDEF(MVT::v16i8), + OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, DAG.getUNDEF(MVT::v16i8), ShufMask1); - OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, - DAG.getUNDEF(MVT::v16i8), + OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, DAG.getUNDEF(MVT::v16i8), ShufMask1); OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); -- cgit v1.1 From df966f6beec0bcfa5f2c9bbcccd6e445f118668a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 22 Apr 2012 19:17:57 +0000 Subject: Make calls to getVectorShuffle more consistent. Use shuffle VT for calls to getUNDEF instead of requerying. Use &Mask[0] instead of Mask.data(). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155320 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 887fec8..a1c8303 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5111,8 +5111,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { Mask.push_back(Idx); for (unsigned i = 1; i != VecElts; ++i) Mask.push_back(i); - Item = DAG.getVectorShuffle(VecVT, dl, Item, - DAG.getUNDEF(Item.getValueType()), + Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), &Mask[0]); } return DAG.getNode(ISD::BITCAST, dl, VT, Item); @@ -14409,8 +14408,8 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, for (unsigned i = 0; i < NumElems; i++) ShuffleVec[i*SizeRatio] = i; SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, - DAG.getUNDEF(SlicedVec.getValueType()), - ShuffleVec.data()); + DAG.getUNDEF(WideVecVT), + &ShuffleVec[0]); // Bitcast to the requested type. Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); @@ -14491,8 +14490,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, - DAG.getUNDEF(WideVec.getValueType()), - ShuffleVec.data()); + DAG.getUNDEF(WideVecVT), + &ShuffleVec[0]); // At this point all of the data is stored at the bottom of the // register. We now need to save it to mem. @@ -14894,13 +14893,13 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, for (unsigned i = 0; i < NumElems/2; i++) ShufMask1[i] = i; SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), - ShufMask1.data()); + &ShufMask1[0]); SmallVector ShufMask2(NumElems, -1); for (unsigned i = 0; i < NumElems/2; i++) ShufMask2[i] = i + NumElems/2; SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), - ShufMask2.data()); + &ShufMask2[0]); EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), VT.getVectorNumElements()/2); -- cgit v1.1 From 767b4f64a09b6f3880e7cdc55ccdd02a090a97d0 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 22 Apr 2012 19:29:34 +0000 Subject: Convert getNode(UNDEF) to getUNDEF. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155321 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a1c8303..13ec544 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -75,7 +75,7 @@ static SDValue Extract128BitVector(SDValue Vec, // Extract from UNDEF is UNDEF. if (Vec.getOpcode() == ISD::UNDEF) - return DAG.getNode(ISD::UNDEF, dl, ResultVT); + return DAG.getUNDEF(ResultVT); if (isa(Idx)) { unsigned IdxVal = cast(Idx)->getZExtValue(); @@ -6962,7 +6962,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); // Insert the 128-bit vector. - return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op, + return Insert128BitVector(DAG.getUNDEF(OpVT), Op, DAG.getConstant(0, MVT::i32), DAG, dl); } @@ -12965,16 +12965,17 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, if (isShuffleHigh128VectorInsertLow(SVOp)) { SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); - SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), - V, DAG.getConstant(0, MVT::i32), DAG, dl); + SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, + DAG.getConstant(0, MVT::i32), DAG, dl); return DCI.CombineTo(N, InsV); } // vector_shuffle or if (isShuffleLow128VectorInsertHigh(SVOp)) { SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl); - SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), - V, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); + SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, + DAG.getConstant(NumElems/2, MVT::i32), + DAG, dl); return DCI.CombineTo(N, InsV); } -- cgit v1.1 From b14940a047d1bbf9927881d506f0b8b669658e52 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 22 Apr 2012 20:55:18 +0000 Subject: Make Extract128BitVector and Insert128BitVector take an unsigned instead of an ConstantNode SDValue. getConstant was almost always called just before only to have the functions take it apart and build a new ConstantSDNode. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155325 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 167 +++++++++++++++---------------------- 1 file changed, 68 insertions(+), 99 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 13ec544..0ebf7d5 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -62,10 +62,8 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, /// simple subregister reference. Idx is an index in the 128 bits we /// want. It need not be aligned to a 128-bit bounday. That makes /// lowering EXTRACT_VECTOR_ELT operations easier. -static SDValue Extract128BitVector(SDValue Vec, - SDValue Idx, - SelectionDAG &DAG, - DebugLoc dl) { +static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, + SelectionDAG &DAG, DebugLoc dl) { EVT VT = Vec.getValueType(); assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); EVT ElVT = VT.getVectorElementType(); @@ -77,26 +75,20 @@ static SDValue Extract128BitVector(SDValue Vec, if (Vec.getOpcode() == ISD::UNDEF) return DAG.getUNDEF(ResultVT); - if (isa(Idx)) { - unsigned IdxVal = cast(Idx)->getZExtValue(); - - // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR - // we can match to VEXTRACTF128. - unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); + // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR + // we can match to VEXTRACTF128. + unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); - // This is the index of the first element of the 128-bit chunk - // we want. - unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) - * ElemsPerChunk); + // This is the index of the first element of the 128-bit chunk + // we want. + unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) + * ElemsPerChunk); - SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); - SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, - VecIdx); + SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); + SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, + VecIdx); - return Result; - } - - return SDValue(); + return Result; } /// Generate a DAG to put 128-bits into a vector > 128 bits. This @@ -104,34 +96,27 @@ static SDValue Extract128BitVector(SDValue Vec, /// simple superregister reference. Idx is an index in the 128 bits /// we want. It need not be aligned to a 128-bit bounday. That makes /// lowering INSERT_VECTOR_ELT operations easier. -static SDValue Insert128BitVector(SDValue Result, - SDValue Vec, - SDValue Idx, - SelectionDAG &DAG, +static SDValue Insert128BitVector(SDValue Result, SDValue Vec, + unsigned IdxVal, SelectionDAG &DAG, DebugLoc dl) { - if (isa(Idx)) { - EVT VT = Vec.getValueType(); - assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); + EVT VT = Vec.getValueType(); + assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); - EVT ElVT = VT.getVectorElementType(); - unsigned IdxVal = cast(Idx)->getZExtValue(); - EVT ResultVT = Result.getValueType(); + EVT ElVT = VT.getVectorElementType(); + EVT ResultVT = Result.getValueType(); - // Insert the relevant 128 bits. - unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); + // Insert the relevant 128 bits. + unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); - // This is the index of the first element of the 128-bit chunk - // we want. - unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) - * ElemsPerChunk); + // This is the index of the first element of the 128-bit chunk + // we want. + unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) + * ElemsPerChunk); - SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); - Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, - VecIdx); - return Result; - } - - return SDValue(); + SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); + Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, + VecIdx); + return Result; } /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 @@ -141,10 +126,8 @@ static SDValue Insert128BitVector(SDValue Result, static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, unsigned NumElems, SelectionDAG &DAG, DebugLoc dl) { - SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, - DAG.getConstant(0, MVT::i32), DAG, dl); - return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); + return Insert128BitVector(V, V2, NumElems/2, DAG, dl); } static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { @@ -4341,7 +4324,7 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { // the splat element index when it refers to the higher register. if (Size == 256) { unsigned Idx = (EltNo >= NumElems/2) ? NumElems/2 : 0; - V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl); + V1 = Extract128BitVector(V1, Idx, DAG, dl); if (Idx > 0) EltNo -= NumElems/2; } @@ -5144,8 +5127,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); if (VT.getSizeInBits() == 256) { SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); - Item = Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32), - DAG, dl); + Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); } else { assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); @@ -6035,13 +6017,12 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { Shufs[l] = DAG.getUNDEF(NVT); } else { SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), - DAG.getConstant((InputUsed[0] % 2) * NumLaneElems, MVT::i32), - DAG, dl); + (InputUsed[0] % 2) * NumLaneElems, + DAG, dl); // If only one input was used, use an undefined vector for the other. SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), - DAG.getConstant((InputUsed[1] % 2) * NumLaneElems, MVT::i32), - DAG, dl); + (InputUsed[1] % 2) * NumLaneElems, DAG, dl); // At least one input vector was used. Create a new shuffle vector. Shufs[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); } @@ -6776,8 +6757,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // Get the 128-bit vector. bool Upper = IdxVal >= NumElems/2; - Vec = Extract128BitVector(Vec, - DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl); + Vec = Extract128BitVector(Vec, Upper ? NumElems/2 : 0, DAG, dl); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx); @@ -6916,7 +6896,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { unsigned NumElems = VT.getVectorNumElements(); unsigned IdxVal = cast(N2)->getZExtValue(); bool Upper = IdxVal >= NumElems/2; - SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32); + unsigned Ins128Idx = Upper ? NumElems/2 : 0; SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl); // Insert the element into the desired half. @@ -6962,9 +6942,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); // Insert the 128-bit vector. - return Insert128BitVector(DAG.getUNDEF(OpVT), Op, - DAG.getConstant(0, MVT::i32), - DAG, dl); + return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); } if (Op.getValueType() == MVT::v1i64 && @@ -6988,9 +6966,11 @@ X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { SDValue Vec = Op.getNode()->getOperand(0); SDValue Idx = Op.getNode()->getOperand(1); - if (Op.getNode()->getValueType(0).getSizeInBits() == 128 - && Vec.getNode()->getValueType(0).getSizeInBits() == 256) { - return Extract128BitVector(Vec, Idx, DAG, dl); + if (Op.getNode()->getValueType(0).getSizeInBits() == 128 && + Vec.getNode()->getValueType(0).getSizeInBits() == 256 && + isa(Idx)) { + unsigned IdxVal = cast(Idx)->getZExtValue(); + return Extract128BitVector(Vec, IdxVal, DAG, dl); } } return SDValue(); @@ -7007,9 +6987,11 @@ X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { SDValue SubVec = Op.getNode()->getOperand(1); SDValue Idx = Op.getNode()->getOperand(2); - if (Op.getNode()->getValueType(0).getSizeInBits() == 256 - && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) { - return Insert128BitVector(Vec, SubVec, Idx, DAG, dl); + if (Op.getNode()->getValueType(0).getSizeInBits() == 256 && + SubVec.getNode()->getValueType(0).getSizeInBits() == 128 && + isa(Idx)) { + unsigned IdxVal = cast(Idx)->getZExtValue(); + return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); } } return SDValue(); @@ -8355,18 +8337,16 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { int NumElems = VT.getVectorNumElements(); DebugLoc dl = Op.getDebugLoc(); SDValue CC = Op.getOperand(2); - SDValue Idx0 = DAG.getConstant(0, MVT::i32); - SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); - SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); - SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); + SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); + SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); // Extract the RHS vectors SDValue RHS = Op.getOperand(1); - SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); - SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); + SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); + SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); // Issue the operation on the smaller types and concatenate the result back MVT EltVT = VT.getVectorElementType().getSimpleVT(); @@ -10153,18 +10133,16 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { int NumElems = VT.getVectorNumElements(); DebugLoc dl = Op.getDebugLoc(); - SDValue Idx0 = DAG.getConstant(0, MVT::i32); - SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); - SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); - SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); + SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); + SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); // Extract the RHS vectors SDValue RHS = Op.getOperand(1); - SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); - SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); + SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); + SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); MVT EltVT = VT.getVectorElementType().getSimpleVT(); EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); @@ -10426,9 +10404,8 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); // Extract the two vectors - SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl); - SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + SDValue V1 = Extract128BitVector(R, 0, DAG, dl); + SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); // Recreate the shift amount vectors SDValue Amt1, Amt2; @@ -10447,9 +10424,8 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { &Amt2Csts[0], NumElems/2); } else { // Variable shift amount - Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl); - Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + Amt1 = Extract128BitVector(Amt, 0, DAG, dl); + Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); } // Issue new vector shifts for the smaller types @@ -10560,13 +10536,11 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, if (!Subtarget->hasAVX2()) { // needs to be split int NumElems = VT.getVectorNumElements(); - SDValue Idx0 = DAG.getConstant(0, MVT::i32); - SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); - SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); - SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); + SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); + SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); MVT EltVT = VT.getVectorElementType().getSimpleVT(); EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); @@ -12952,8 +12926,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, // Emit a zeroed vector and insert the desired subvector on its // first half. SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); - SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), - DAG.getConstant(0, MVT::i32), DAG, dl); + SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); return DCI.CombineTo(N, InsV); } @@ -12963,19 +12936,15 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> if (isShuffleHigh128VectorInsertLow(SVOp)) { - SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); - SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, - DAG.getConstant(0, MVT::i32), DAG, dl); + SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); + SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); return DCI.CombineTo(N, InsV); } // vector_shuffle or if (isShuffleLow128VectorInsertHigh(SVOp)) { - SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl); - SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, - DAG.getConstant(NumElems/2, MVT::i32), - DAG, dl); + SDValue V = Extract128BitVector(V1, 0, DAG, dl); + SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); return DCI.CombineTo(N, InsV); } -- cgit v1.1 From e8eb116ff33b101708b3dbf84f660539268c1776 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 03:26:18 +0000 Subject: Remove some 'else' after 'return'. No functional change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155330 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0ebf7d5..bc46693 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1273,7 +1273,6 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { break; } } - return; } /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate @@ -7321,7 +7320,10 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, Subtarget->is64Bit()); } - } else if (Subtarget->isTargetDarwin()) { + llvm_unreachable("Unknown TLS model."); + } + + if (Subtarget->isTargetDarwin()) { // Darwin only has one model of TLS. Lower to that. unsigned char OpFlag = 0; unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? @@ -7364,7 +7366,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), Chain.getValue(1)); - } else if (Subtarget->isTargetWindows()) { + } + + if (Subtarget->isTargetWindows()) { // Just use the implicit TLS architecture // Need to generate someting similar to: // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage -- cgit v1.1 From 0fbf364fd791a5179a777ad7dbe628f7439b629f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 03:28:34 +0000 Subject: Remove some tab characers. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155331 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index bc46693..783c752 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1454,7 +1454,7 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, bool X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, - MachineFunction &MF, bool isVarArg, + MachineFunction &MF, bool isVarArg, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; @@ -1635,7 +1635,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SmallVector RVLocs; bool Is64Bit = Subtarget->is64Bit(); CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_X86); // Copy all of the result registers out of their specified physreg. @@ -2740,7 +2740,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, SmallVector ArgLocs; CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_X86); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) @@ -2761,7 +2761,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, if (Unused) { SmallVector RVLocs; CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_X86); for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; @@ -2775,12 +2775,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, if (!CCMatch) { SmallVector RVLocs1; CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs1, *DAG.getContext()); + getTargetMachine(), RVLocs1, *DAG.getContext()); CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); SmallVector RVLocs2; CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs2, *DAG.getContext()); + getTargetMachine(), RVLocs2, *DAG.getContext()); CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); if (RVLocs1.size() != RVLocs2.size()) @@ -2807,7 +2807,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // argument is passed on the stack. SmallVector ArgLocs; CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 if (Subtarget->isTargetWin64()) { @@ -7414,7 +7414,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { false, false, false, 0); SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), - getPointerTy()); + getPointerTy()); IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); -- cgit v1.1 From 731dfd0da928e10980eacb6edd104ebb1c71e0f0 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 03:42:40 +0000 Subject: Add a couple llvm_unreachables. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155332 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 783c752..119e49a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -10291,6 +10291,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); return Res; } + llvm_unreachable("Unknown shift opcode."); } if (Subtarget->hasAVX2() && VT == MVT::v32i8) { @@ -10334,6 +10335,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); return Res; } + llvm_unreachable("Unknown shift opcode."); } } } -- cgit v1.1 From 1842ba0dfc441b586187ce8174ee8b3d986f1e3a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 06:38:28 +0000 Subject: Tidy up spacing in LowerVECTOR_SHUFFLEtoBlend. Remove code that checks if shuffle operand has a different type than the the shuffle result since it can never happen. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155333 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 73 ++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 39 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 119e49a..0f62468 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5390,75 +5390,70 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { } // Try to lower a shuffle node into a simple blend instruction. -static SDValue LowerVECTOR_SHUFFLEtoBlend(SDValue Op, +static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - ShuffleVectorSDNode *SVOp = cast(Op); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); DebugLoc dl = SVOp->getDebugLoc(); - EVT VT = Op.getValueType(); - EVT InVT = V1.getValueType(); - int MaskSize = VT.getVectorNumElements(); - int InSize = InVT.getVectorNumElements(); + EVT VT = SVOp->getValueType(0); + unsigned NumElems = VT.getVectorNumElements(); if (!Subtarget->hasSSE41()) return SDValue(); - if (MaskSize != InSize) - return SDValue(); - - int ISDNo = 0; + unsigned ISDNo = 0; MVT OpTy; switch (VT.getSimpleVT().SimpleTy) { default: return SDValue(); case MVT::v8i16: - ISDNo = X86ISD::BLENDPW; - OpTy = MVT::v8i16; - break; + ISDNo = X86ISD::BLENDPW; + OpTy = MVT::v8i16; + break; case MVT::v4i32: case MVT::v4f32: - ISDNo = X86ISD::BLENDPS; - OpTy = MVT::v4f32; - break; + ISDNo = X86ISD::BLENDPS; + OpTy = MVT::v4f32; + break; case MVT::v2i64: case MVT::v2f64: - ISDNo = X86ISD::BLENDPD; - OpTy = MVT::v2f64; - break; + ISDNo = X86ISD::BLENDPD; + OpTy = MVT::v2f64; + break; case MVT::v8i32: case MVT::v8f32: - if (!Subtarget->hasAVX()) - return SDValue(); - ISDNo = X86ISD::BLENDPS; - OpTy = MVT::v8f32; - break; + if (!Subtarget->hasAVX()) + return SDValue(); + ISDNo = X86ISD::BLENDPS; + OpTy = MVT::v8f32; + break; case MVT::v4i64: case MVT::v4f64: - if (!Subtarget->hasAVX()) - return SDValue(); - ISDNo = X86ISD::BLENDPD; - OpTy = MVT::v4f64; - break; + if (!Subtarget->hasAVX()) + return SDValue(); + ISDNo = X86ISD::BLENDPD; + OpTy = MVT::v4f64; + break; case MVT::v16i16: - if (!Subtarget->hasAVX2()) - return SDValue(); - ISDNo = X86ISD::BLENDPW; - OpTy = MVT::v16i16; - break; + if (!Subtarget->hasAVX2()) + return SDValue(); + ISDNo = X86ISD::BLENDPW; + OpTy = MVT::v16i16; + break; } assert(ISDNo && "Invalid Op Number"); unsigned MaskVals = 0; - for (int i = 0; i < MaskSize; ++i) { + for (unsigned i = 0; i != NumElems; ++i) { int EltIdx = SVOp->getMaskElt(i); - if (EltIdx == i || EltIdx == -1) + if (EltIdx == (int)i || EltIdx < 0) MaskVals |= (1< Date: Mon, 23 Apr 2012 06:57:04 +0000 Subject: Tidy up by removing some 'else' after 'return' git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155336 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 104 +++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 44 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0f62468..515682c 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3048,10 +3048,12 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, // X > -1 -> X == 0, jump !sign. RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_NS; - } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { + } + if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { // X < 0 -> X == 0, jump on sign. return X86::COND_S; - } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { + } + if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { // X < 1 -> X <= 0 RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_LE; @@ -4857,8 +4859,9 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl &Elts, LDBase->getPointerInfo(), LDBase->isVolatile(), LDBase->isNonTemporal(), LDBase->isInvariant(), LDBase->getAlignment()); - } else if (NumElems == 4 && LastLoadedElt == 1 && - DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { + } + if (NumElems == 4 && LastLoadedElt == 1 && + DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; SDValue ResNode = @@ -6081,7 +6084,9 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { } return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); - } else if (NumLo == 3 || NumHi == 3) { + } + + if (NumLo == 3 || NumHi == 3) { // Otherwise, we must have three elements from one vector, call it X, and // one element from the other, call it Y. First, use a shufps to build an // intermediate vector with the one element from Y and the element from X @@ -6117,17 +6122,17 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { Mask1[2] = HiIndex & 1 ? 6 : 4; Mask1[3] = HiIndex & 1 ? 4 : 6; return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); - } else { - Mask1[0] = HiIndex & 1 ? 2 : 0; - Mask1[1] = HiIndex & 1 ? 0 : 2; - Mask1[2] = PermMask[2]; - Mask1[3] = PermMask[3]; - if (Mask1[2] >= 0) - Mask1[2] += 4; - if (Mask1[3] >= 0) - Mask1[3] += 4; - return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); } + + Mask1[0] = HiIndex & 1 ? 2 : 0; + Mask1[1] = HiIndex & 1 ? 0 : 2; + Mask1[2] = PermMask[2]; + Mask1[3] = PermMask[3]; + if (Mask1[2] >= 0) + Mask1[2] += 4; + if (Mask1[3] >= 0) + Mask1[3] += 4; + return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); } // Break it into (shuffle shuffle_hi, shuffle_lo). @@ -6538,11 +6543,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { // new vector_shuffle with the corrected mask.p SmallVector NewMask(M.begin(), M.end()); NormalizeMask(NewMask, NumElems); - if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) { + if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); - } else if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) { + if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); - } } if (Commuted) { @@ -6688,7 +6692,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); - } else if (VT.getSizeInBits() == 16) { + } + + if (VT.getSizeInBits() == 16) { unsigned Idx = cast(Op.getOperand(1))->getZExtValue(); // If Idx is 0, it's cheaper to do a move instead of a pextrw. if (Idx == 0) @@ -6703,7 +6709,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); - } else if (VT == MVT::f32) { + } + + if (VT == MVT::f32) { // EXTRACTPS outputs to a GPR32 register which will require a movd to copy // the result back to FR32 register. It's only worth matching if the // result has a single use which is a store or a bitcast to i32. And in @@ -6723,7 +6731,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, Op.getOperand(0)), Op.getOperand(1)); return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); - } else if (VT == MVT::i32 || VT == MVT::i64) { + } + + if (VT == MVT::i32 || VT == MVT::i64) { // ExtractPS/pextrq works with constant index. if (isa(Op.getOperand(1))) return Op; @@ -6784,7 +6794,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); - } else if (VT.getSizeInBits() == 32) { + } + + if (VT.getSizeInBits() == 32) { unsigned Idx = cast(Op.getOperand(1))->getZExtValue(); if (Idx == 0) return Op; @@ -6796,7 +6808,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, DAG.getUNDEF(VVT), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, DAG.getIntPtrConstant(0)); - } else if (VT.getSizeInBits() == 64) { + } + + if (VT.getSizeInBits() == 64) { // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught // to match extract_elt for f64. @@ -6849,7 +6863,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, if (N2.getValueType() != MVT::i32) N2 = DAG.getIntPtrConstant(cast(N2)->getZExtValue()); return DAG.getNode(Opc, dl, VT, N0, N1, N2); - } else if (EltVT == MVT::f32 && isa(N2)) { + } + + if (EltVT == MVT::f32 && isa(N2)) { // Bits [7:6] of the constant are the source select. This will always be // zero here. The DAG Combiner may combine an extract_elt index into these // bits. For example (insert (extract, 3), 2) could be matched by putting @@ -6862,8 +6878,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, // Create this as a scalar to vector.. N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); - } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) && - isa(N2)) { + } + + if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa(N2)) { // PINSR* works with constant index. return Op; } @@ -7673,12 +7690,11 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, // Handle final rounding. EVT DestVT = Op.getValueType(); - if (DestVT.bitsLT(MVT::f64)) { + if (DestVT.bitsLT(MVT::f64)) return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, DAG.getIntPtrConstant(0)); - } else if (DestVT.bitsGT(MVT::f64)) { + if (DestVT.bitsGT(MVT::f64)) return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); - } // Handle final rounding. return Sub; @@ -7699,10 +7715,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, EVT DstVT = Op.getValueType(); if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) return LowerUINT_TO_FP_i64(Op, DAG); - else if (SrcVT == MVT::i32 && X86ScalarSSEf64) + if (SrcVT == MVT::i32 && X86ScalarSSEf64) return LowerUINT_TO_FP_i32(Op, DAG); - else if (Subtarget->is64Bit() && - SrcVT == MVT::i64 && DstVT == MVT::f32) + if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) return SDValue(); // Make a 64-bit buffer, and use it to build an FILD. @@ -7879,9 +7894,9 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); - else - // The node is the result. - return FIST; + + // The node is the result. + return FIST; } SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, @@ -7896,9 +7911,9 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); - else - // The node is the result. - return FIST; + + // The node is the result. + return FIST; } SDValue X86TargetLowering::LowerFABS(SDValue Op, @@ -7948,12 +7963,12 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { MVT XORVT = VT.getSizeInBits() == 128 ? MVT::v2i64 : MVT::v4i64; return DAG.getNode(ISD::BITCAST, dl, VT, DAG.getNode(ISD::XOR, dl, XORVT, - DAG.getNode(ISD::BITCAST, dl, XORVT, - Op.getOperand(0)), - DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); - } else { - return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); + DAG.getNode(ISD::BITCAST, dl, XORVT, + Op.getOperand(0)), + DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); } + + return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); } SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { @@ -8415,7 +8430,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { EQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); - } else if (SetCCOpcode == ISD::SETONE) { + } + if (SetCCOpcode == ISD::SETONE) { SDValue ORD, NEQ; ORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); -- cgit v1.1 From 9d35240eee370542523bf68199d2cdb21ffbb42a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 07:24:41 +0000 Subject: Make getZeroVector and getOnesVector more alike as far as how they detect 128-bit versus 256-bit vectors. Be explicit about both sizes and use llvm_unreachable. Similar changes to getLegalSplat. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155337 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 515682c..dc94246 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4158,11 +4158,12 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) { static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, SelectionDAG &DAG, DebugLoc dl) { assert(VT.isVector() && "Expected a vector type"); + unsigned Size = VT.getSizeInBits(); // Always build SSE zero vectors as <4 x i32> bitcasted // to their dest type. This ensures they get CSE'd. SDValue Vec; - if (VT.getSizeInBits() == 128) { // SSE + if (Size == 128) { // SSE if (Subtarget->hasSSE2()) { // SSE2 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); @@ -4170,7 +4171,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); } - } else if (VT.getSizeInBits() == 256) { // AVX + } else if (Size == 256) { // AVX if (Subtarget->hasAVX2()) { // AVX2 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; @@ -4182,7 +4183,9 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); } - } + } else + llvm_unreachable("Unexpected vector type"); + return DAG.getNode(ISD::BITCAST, dl, VT, Vec); } @@ -4193,12 +4196,11 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, DebugLoc dl) { assert(VT.isVector() && "Expected a vector type"); - assert((VT.is128BitVector() || VT.is256BitVector()) - && "Expected a 128-bit or 256-bit vector type"); + unsigned Size = VT.getSizeInBits(); SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Vec; - if (VT.getSizeInBits() == 256) { + if (Size == 256) { if (HasAVX2) { // AVX2 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); @@ -4206,9 +4208,10 @@ static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); } - } else { + } else if (Size == 128) { Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); - } + } else + llvm_unreachable("Unexpected vector type"); return DAG.getNode(ISD::BITCAST, dl, VT, Vec); } @@ -4285,15 +4288,14 @@ static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { EVT VT = V.getValueType(); DebugLoc dl = V.getDebugLoc(); - assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256) - && "Vector size not supported"); + unsigned Size = VT.getSizeInBits(); - if (VT.getSizeInBits() == 128) { + if (Size == 128) { V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), &SplatMask[0]); - } else { + } else if (Size == 256) { // To use VPERMILPS to splat scalars, the second half of indicies must // refer to the higher part, which is a duplication of the lower one, // because VPERMILPS can only handle in-lane permutations. @@ -4303,7 +4305,8 @@ static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), &SplatMask[0]); - } + } else + llvm_unreachable("Vector size not supported"); return DAG.getNode(ISD::BITCAST, dl, VT, V); } -- cgit v1.1 From 708e44fc9678085d35ce522aaf5995494901d312 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 23 Apr 2012 07:36:33 +0000 Subject: Use MVT instead of EVT through all of LowerVECTOR_SHUFFLEtoBlend and not just the switch. Saves a little bit of binary size. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155339 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index dc94246..b9dba7b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5402,7 +5402,7 @@ static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); DebugLoc dl = SVOp->getDebugLoc(); - EVT VT = SVOp->getValueType(0); + MVT VT = SVOp->getValueType(0).getSimpleVT(); unsigned NumElems = VT.getVectorNumElements(); if (!Subtarget->hasSSE41()) @@ -5411,7 +5411,7 @@ static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, unsigned ISDNo = 0; MVT OpTy; - switch (VT.getSimpleVT().SimpleTy) { + switch (VT.SimpleTy) { default: return SDValue(); case MVT::v8i16: ISDNo = X86ISD::BLENDPW; -- cgit v1.1 From a35407705da45effd3401fb42395355adaa6e0c2 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Mon, 23 Apr 2012 21:53:37 +0000 Subject: Optimize the vector UINT_TO_FP, SINT_TO_FP and FP_TO_SINT operations where the integer type is i8 (commonly used in graphics). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155397 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 56 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b9dba7b..87c4805 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1221,7 +1221,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::TRUNCATE); + setTargetDAGCombine(ISD::UINT_TO_FP); setTargetDAGCombine(ISD::SINT_TO_FP); + setTargetDAGCombine(ISD::FP_TO_SINT); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); if (Subtarget->hasBMI()) @@ -14985,9 +14987,43 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, + const X86TargetLowering *XTLI) { + SDValue Op0 = N->getOperand(0); + EVT InVT = Op0->getValueType(0); + if (!InVT.isSimple()) + return SDValue(); + + // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32)) + MVT SrcVT = InVT.getSimpleVT(); + if (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i8) { + DebugLoc dl = N->getDebugLoc(); + MVT DstVT = (SrcVT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); + // Notice that we use SINT_TO_FP because we know that the high bits + // are zero and SINT_TO_FP is better supported by the hardware. + return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); + } + + return SDValue(); +} + static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, const X86TargetLowering *XTLI) { SDValue Op0 = N->getOperand(0); + EVT InVT = Op0->getValueType(0); + if (!InVT.isSimple()) + return SDValue(); + + // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) + MVT SrcVT = InVT.getSimpleVT(); + if (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i8) { + DebugLoc dl = N->getDebugLoc(); + MVT DstVT = (SrcVT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); + return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); + } + // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have // a 32-bit target where SSE doesn't support i64->FP operations. if (Op0.getOpcode() == ISD::LOAD) { @@ -15006,6 +15042,24 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } +static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG, + const X86TargetLowering *XTLI) { + EVT InVT = N->getValueType(0); + if (!InVT.isSimple()) + return SDValue(); + + // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT() + MVT VT = InVT.getSimpleVT(); + if (VT == MVT::v8i8 || VT == MVT::v4i8) { + DebugLoc dl = N->getDebugLoc(); + MVT DstVT = (VT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0)); + return DAG.getNode(ISD::TRUNCATE, dl, VT, I); + } + + return SDValue(); +} + // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, X86TargetLowering::DAGCombinerInfo &DCI) { @@ -15142,7 +15196,9 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget); case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); + case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG, this); case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); + case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG, this); case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); case X86ISD::FXOR: -- cgit v1.1 From 7fd5e16d3bc24a8ec54c7b48f68b5fed53bfd2e6 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 24 Apr 2012 06:02:29 +0000 Subject: Simplify code a bit and make it compile better. Remove unused parameters. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155428 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 87c4805..074f96f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14987,18 +14987,14 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } -static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, - const X86TargetLowering *XTLI) { +static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) { SDValue Op0 = N->getOperand(0); EVT InVT = Op0->getValueType(0); - if (!InVT.isSimple()) - return SDValue(); // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32)) - MVT SrcVT = InVT.getSimpleVT(); - if (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i8) { + if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { DebugLoc dl = N->getDebugLoc(); - MVT DstVT = (SrcVT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); // Notice that we use SINT_TO_FP because we know that the high bits // are zero and SINT_TO_FP is better supported by the hardware. @@ -15012,14 +15008,11 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, const X86TargetLowering *XTLI) { SDValue Op0 = N->getOperand(0); EVT InVT = Op0->getValueType(0); - if (!InVT.isSimple()) - return SDValue(); // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) - MVT SrcVT = InVT.getSimpleVT(); - if (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i8) { + if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { DebugLoc dl = N->getDebugLoc(); - MVT DstVT = (SrcVT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); } @@ -15042,17 +15035,13 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG, - const X86TargetLowering *XTLI) { - EVT InVT = N->getValueType(0); - if (!InVT.isSimple()) - return SDValue(); +static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT() - MVT VT = InVT.getSimpleVT(); if (VT == MVT::v8i8 || VT == MVT::v4i8) { DebugLoc dl = N->getDebugLoc(); - MVT DstVT = (VT.getVectorNumElements() == 4 ? MVT::v4i32 : MVT::v8i32); + MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0)); return DAG.getNode(ISD::TRUNCATE, dl, VT, I); } @@ -15196,9 +15185,9 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget); case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); - case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG, this); + case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG); case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); - case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG, this); + case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG); case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); case X86ISD::FXOR: -- cgit v1.1 From 3ef43cf3a2d7fd933f9f9b2960df9426b054d32a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 24 Apr 2012 06:36:35 +0000 Subject: Remove dangling spaces. Fix some other formatting. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155429 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 074f96f..e16367a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -13014,7 +13014,8 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, if (!DCI.isBeforeLegalizeOps()) return SDValue(); - if (!Subtarget->hasAVX()) return SDValue(); + if (!Subtarget->hasAVX()) + return SDValue(); EVT VT = N->getValueType(0); SDValue Op = N->getOperand(0); @@ -14856,7 +14857,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, if (!DCI.isBeforeLegalizeOps()) return SDValue(); - if (!Subtarget->hasAVX()) + if (!Subtarget->hasAVX()) return SDValue(); EVT VT = N->getValueType(0); @@ -14867,9 +14868,8 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { - if (Subtarget->hasAVX2()) { + if (Subtarget->hasAVX2()) return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); - } // Optimize vectors in AVX mode // Sign extend v8i16 to v8i32 and @@ -14882,21 +14882,23 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, unsigned NumElems = OpVT.getVectorNumElements(); SmallVector ShufMask1(NumElems, -1); - for (unsigned i = 0; i < NumElems/2; i++) ShufMask1[i] = i; + for (unsigned i = 0; i != NumElems/2; ++i) + ShufMask1[i] = i; SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), &ShufMask1[0]); SmallVector ShufMask2(NumElems, -1); - for (unsigned i = 0; i < NumElems/2; i++) ShufMask2[i] = i + NumElems/2; + for (unsigned i = 0; i != NumElems/2; ++i) + ShufMask2[i] = i + NumElems/2; SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT), &ShufMask2[0]); - EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), + EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), VT.getVectorNumElements()/2); - OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); + OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); -- cgit v1.1 From 87ffdbcb7b93db35d8ff87dfb84d6ae623a5f49f Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Tue, 24 Apr 2012 11:27:53 +0000 Subject: AVX2: The BLENDPW instruction selects between vectors of v16i16 using an i8 immediate. We can't use it here because the shuffle code does not check that the lower part of the word is identical to the upper part. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155440 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 6 ------ 1 file changed, 6 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e16367a..8a11b45 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5443,12 +5443,6 @@ static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, ISDNo = X86ISD::BLENDPD; OpTy = MVT::v4f64; break; - case MVT::v16i16: - if (!Subtarget->hasAVX2()) - return SDValue(); - ISDNo = X86ISD::BLENDPW; - OpTy = MVT::v16i16; - break; } assert(ISDNo && "Invalid Op Number"); -- cgit v1.1 From c16f851569eff1c2296e6addf341c9797e386f01 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 25 Apr 2012 06:39:39 +0000 Subject: Use vector_shuffles instead of target specific unpack nodes for AVX ZERO_EXTEND/ANY_EXTEND combine. These will be converted to target specific nodes during lowering. This is more consistent with other code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155537 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8a11b45..0f99844 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -12999,7 +12999,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, } -/// PerformTruncateCombine - Converts truncate operation to +/// DCI, PerformTruncateCombine - Converts truncate operation to /// a sequence of vector shuffle operations. /// It is possible when we truncate 256-bit vector to 128-bit vector @@ -14901,6 +14901,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, } static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> // (and (i32 x86isd::setcc_carry), 1) @@ -14938,28 +14939,29 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. // Concat upper and lower parts. // - if (Subtarget->hasAVX()) { + if (!DCI.isBeforeLegalizeOps()) + return SDValue(); + + if (!Subtarget->hasAVX()) + return SDValue(); - if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || - ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { + if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || + ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { - if (Subtarget->hasAVX2()) - return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); + if (Subtarget->hasAVX2()) + return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); - SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); - SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, - DAG); - SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, - DAG); + SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); + SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec); + SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec); - EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), - VT.getVectorNumElements()/2); + EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), + VT.getVectorNumElements()/2); - OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); - OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); + OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); + OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); - return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); - } + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); } return SDValue(); @@ -15192,7 +15194,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::ANY_EXTEND: - case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, Subtarget); + case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI); case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); -- cgit v1.1 From 17c836c4b51a14f07a5d5442cf2e984474a8f57d Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Fri, 27 Apr 2012 12:07:43 +0000 Subject: X86: Don't emit conditional floating point moves on when targeting pre-pentiumpro architectures. * Model FPSW (the FPU status word) as a register. * Add ISel patterns for the FUCOM*, FNSTSW and SAHF instructions. * During Legalize/Lowering, build a node sequence to transfer the comparison result from FPSW into EFLAGS. If you're wondering about the right-shift: That's an implicit sub-register extraction (%ax -> %ah) which is handled later on by the instruction selector. Fixes PR6679. Patch by Christoph Erhardt! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155704 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0f99844..257757b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8214,6 +8214,30 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); } +/// Convert a comparison if required by the subtarget. +SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, + SelectionDAG &DAG) const { + // If the subtarget does not support the FUCOMI instruction, floating-point + // comparisons have to be converted. + if (Subtarget->hasCMov() || + Cmp.getOpcode() != X86ISD::CMP || + !Cmp.getOperand(0).getValueType().isFloatingPoint() || + !Cmp.getOperand(1).getValueType().isFloatingPoint()) + return Cmp; + + // The instruction selector will select an FUCOM instruction instead of + // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence + // build an SDNode sequence that transfers the result from FPSW into EFLAGS: + // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) + DebugLoc dl = Cmp.getDebugLoc(); + SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); + SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); + SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, + DAG.getConstant(8, MVT::i8)); + SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); + return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); +} + /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node /// if it's possible. SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, @@ -8335,6 +8359,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { return SDValue(); SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); + EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, DAG.getConstant(X86CC, MVT::i8), EFLAGS); } @@ -8503,7 +8528,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { // isX86LogicalCmp - Return true if opcode is a X86 logical comparison. static bool isX86LogicalCmp(SDValue Op) { unsigned Opc = Op.getNode()->getOpcode(); - if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) + if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || + Opc == X86ISD::SAHF) return true; if (Op.getResNo() == 1 && (Opc == X86ISD::ADD || @@ -8567,6 +8593,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue CmpOp0 = Cmp.getOperand(0); Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); + Cmp = ConvertCmpIfNecessary(Cmp, DAG); SDValue Res = // Res = 0 or -1. DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), @@ -8673,6 +8700,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { // a >= b ? -1 : 0 -> RES = setcc_carry // a >= b ? 0 : -1 -> RES = ~setcc_carry if (Cond.getOpcode() == X86ISD::CMP) { + Cond = ConvertCmpIfNecessary(Cond, DAG); unsigned CondCode = cast(CC)->getZExtValue(); if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && @@ -8911,6 +8939,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond.getOperand(0), Cond.getOperand(1)); + Cmp = ConvertCmpIfNecessary(Cmp, DAG); CC = DAG.getConstant(X86::COND_NE, MVT::i8); Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain, Dest, CC, Cmp); @@ -8940,6 +8969,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond.getOperand(0), Cond.getOperand(1)); + Cmp = ConvertCmpIfNecessary(Cmp, DAG); CC = DAG.getConstant(X86::COND_NE, MVT::i8); Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain, Dest, CC, Cmp); @@ -8973,6 +9003,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { CC = DAG.getConstant(X86::COND_NE, MVT::i8); Cond = EmitTest(Cond, X86::COND_NE, DAG); } + Cond = ConvertCmpIfNecessary(Cond, DAG); return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain, Dest, CC, Cond); } @@ -11111,6 +11142,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; + case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; @@ -11179,6 +11211,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; + case X86ISD::SAHF: return "X86ISD::SAHF"; } } -- cgit v1.1 From b4a8aef96df2440a41a8e49ce40e43030bd66f29 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 27 Apr 2012 21:05:09 +0000 Subject: Tidy up spacing. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155733 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 257757b..65cd899 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14457,8 +14457,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // 128-bit ones. If in the future the cost becomes only one memory access the // first version would be better. if (VT.getSizeInBits() == 256 && - StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && - StoredVal.getNumOperands() == 2) { + StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && + StoredVal.getNumOperands() == 2) { SDValue Value0 = StoredVal.getOperand(0); SDValue Value1 = StoredVal.getOperand(1); -- cgit v1.1 From a73b6fc51126d40e4b8830a13a9a4e9322c282a2 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Fri, 27 Apr 2012 22:33:25 +0000 Subject: Add x86-specific DAG combine to simplify: x == -y --> x+y == 0 x != -y --> x+y != 0 On x86, the generated code goes from negl %esi cmpl %esi, %edi je .LBB0_2 to addl %esi, %edi je .L4 This case is correctly handled for ARM with "cmn". Patch by Manman Ren. rdar://11245199 PR12545 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155739 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 65cd899..f0dc8e4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1223,6 +1223,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::TRUNCATE); setTargetDAGCombine(ISD::UINT_TO_FP); setTargetDAGCombine(ISD::SINT_TO_FP); + setTargetDAGCombine(ISD::SETCC); setTargetDAGCombine(ISD::FP_TO_SINT); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); @@ -15000,6 +15001,32 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } +// Optimize x == -y --> x+y == 0 +// x != -y --> x+y != 0 +static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { + ISD::CondCode CC = cast(N->getOperand(2))->get(); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + + if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) + if (ConstantSDNode *C = dyn_cast(LHS.getOperand(0))) + if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { + SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + LHS.getValueType(), RHS, LHS.getOperand(1)); + return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + addV, DAG.getConstant(0, addV.getValueType()), CC); + } + if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) + if (ConstantSDNode *C = dyn_cast(RHS.getOperand(0))) + if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { + SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + RHS.getValueType(), LHS, RHS.getOperand(1)); + return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + addV, DAG.getConstant(0, addV.getValueType()), CC); + } + return SDValue(); +} + // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { unsigned X86CC = N->getConstantOperandVal(0); @@ -15230,6 +15257,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI); + case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); case X86ISD::SHUFP: // Handle all target specific shuffles case X86ISD::PALIGN: -- cgit v1.1 From 66ddd153f6c3481cc4e1a771526157f41a9832b5 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 27 Apr 2012 22:54:43 +0000 Subject: Use 'unsigned' instead of 'int' in several places when retrieving number of vector elements. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155742 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f0dc8e4..862a157 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -67,7 +67,7 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, EVT VT = Vec.getValueType(); assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); EVT ElVT = VT.getVectorElementType(); - int Factor = VT.getSizeInBits()/128; + unsigned Factor = VT.getSizeInBits()/128; EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, VT.getVectorNumElements()/Factor); @@ -4793,7 +4793,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); int EltNo = (Offset - StartOffset) >> 2; - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, @@ -4801,7 +4801,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, false, false, false, 0); SmallVector Mask; - for (int i = 0; i < NumElems; ++i) + for (unsigned i = 0; i != NumElems; ++i) Mask.push_back(EltNo); return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); @@ -8373,7 +8373,7 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { assert(VT.getSizeInBits() == 256 && Op.getOpcode() == ISD::SETCC && "Unsupported value type for operation"); - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); DebugLoc dl = Op.getDebugLoc(); SDValue CC = Op.getOperand(2); @@ -10177,7 +10177,7 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { assert(VT.getSizeInBits() == 256 && VT.isInteger() && "Unsupported value type for operation"); - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); DebugLoc dl = Op.getDebugLoc(); // Extract the LHS vectors @@ -10583,7 +10583,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, return SDValue(); if (!Subtarget->hasAVX2()) { // needs to be split - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); @@ -12893,10 +12893,10 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N, /// inserting the result into the low part of a new 256-bit vector static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { EVT VT = SVOp->getValueType(0); - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> - for (int i = 0, j = NumElems/2; i < NumElems/2; ++i, ++j) + for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || SVOp->getMaskElt(j) >= 0) return false; @@ -12909,10 +12909,10 @@ static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { /// inserting the result into the high part of a new 256-bit vector static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { EVT VT = SVOp->getValueType(0); - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); // vector_shuffle or - for (int i = NumElems/2, j = 0; i < NumElems; ++i, ++j) + for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || SVOp->getMaskElt(j) >= 0) return false; @@ -12929,7 +12929,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); EVT VT = SVOp->getValueType(0); - int NumElems = VT.getVectorNumElements(); + unsigned NumElems = VT.getVectorNumElements(); if (V1.getOpcode() == ISD::CONCAT_VECTORS && V2.getOpcode() == ISD::CONCAT_VECTORS) { @@ -12954,7 +12954,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, // To match the shuffle mask, the first half of the mask should // be exactly the first vector, and all the rest a splat with the // first element of the second one. - for (int i = 0; i < NumElems/2; ++i) + for (unsigned i = 0; i != NumElems/2; ++i) if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) return SDValue(); -- cgit v1.1 From d77d2feb7a5cb63f6ea55cd4695d38fc3b7c8560 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 29 Apr 2012 20:22:05 +0000 Subject: Simplify code a bit. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155798 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 862a157..8da52b8 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -6956,14 +6956,13 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); } - if (Op.getValueType() == MVT::v1i64 && + if (OpVT == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64) return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); - assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && - "Expected an SSE type!"); - return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), + assert(OpVT.getSizeInBits() == 128 && "Expected an SSE type!"); + return DAG.getNode(ISD::BITCAST, dl, OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); } -- cgit v1.1 From 6610b1db55bedaa9e96bf9ff49a19b1a91bd5370 Mon Sep 17 00:00:00 2001 From: Jakub Staszak Date: Sun, 29 Apr 2012 20:52:53 +0000 Subject: Remove unneeded casts. No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155800 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8da52b8..6508e5a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -701,8 +701,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // First set operation action for all vector types to either promote // (for widening) or expand (for scalarization). Then we will selectively // turn on ones that can be effectively codegen'd. - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + for (int VT = MVT::FIRST_VECTOR_VALUETYPE; + VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); @@ -760,8 +760,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); - for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) + for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; + InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) setTruncStoreAction((MVT::SimpleValueType)VT, (MVT::SimpleValueType)InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); @@ -870,7 +870,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); // Custom lower build_vector, vector_shuffle, and extract_vector_elt. - for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { + for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { EVT VT = (MVT::SimpleValueType)i; // Do not attempt to custom lower non-power-of-2 vectors if (!isPowerOf2_32(VT.getVectorNumElements())) @@ -899,7 +899,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. - for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { + for (int i = MVT::v16i8; i != MVT::v2i64; i++) { MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; EVT VT = SVT; @@ -1117,8 +1117,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } // Custom lower several nodes for 256-bit types. - for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { + for (int i = MVT::FIRST_VECTOR_VALUETYPE; + i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; EVT VT = SVT; @@ -1140,7 +1140,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. - for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) { + for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; EVT VT = SVT; @@ -1163,8 +1163,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion // of this type with custom code. - for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; - VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) { + for (int VT = MVT::FIRST_VECTOR_VALUETYPE; + VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, Custom); } -- cgit v1.1 From 7d1e3dcf71406eec040cff90a38917a4ec5c87cf Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 30 Apr 2012 05:17:10 +0000 Subject: No need to normalize index before calling Extract128BitVector git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155811 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6508e5a..b1b6ec4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4330,9 +4330,8 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { // Extract the 128-bit part containing the splat element and update // the splat element index when it refers to the higher register. if (Size == 256) { - unsigned Idx = (EltNo >= NumElems/2) ? NumElems/2 : 0; - V1 = Extract128BitVector(V1, Idx, DAG, dl); - if (Idx > 0) + V1 = Extract128BitVector(V1, EltNo, DAG, dl); + if (EltNo >= NumElems/2) EltNo -= NumElems/2; } @@ -6760,11 +6759,12 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, unsigned IdxVal = cast(Idx)->getZExtValue(); // Get the 128-bit vector. - bool Upper = IdxVal >= NumElems/2; - Vec = Extract128BitVector(Vec, Upper ? NumElems/2 : 0, DAG, dl); + Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); + if (IdxVal >= NumElems/2) + IdxVal -= NumElems/2; return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, - Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx); + DAG.getConstant(IdxVal, MVT::i32)); } assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length"); @@ -6906,16 +6906,15 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { // Get the desired 128-bit vector half. unsigned NumElems = VT.getVectorNumElements(); unsigned IdxVal = cast(N2)->getZExtValue(); - bool Upper = IdxVal >= NumElems/2; - unsigned Ins128Idx = Upper ? NumElems/2 : 0; - SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl); + SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); // Insert the element into the desired half. - V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, - N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2); + bool Upper = IdxVal >= NumElems/2; + V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, + DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); // Insert the changed part back to the 256-bit vector - return Insert128BitVector(N0, V, Ins128Idx, DAG, dl); + return Insert128BitVector(N0, V, IdxVal, DAG, dl); } if (Subtarget->hasSSE41()) -- cgit v1.1 From 238ae31aa3409dd498e7f9f79d42b0b2fbc8144c Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Mon, 30 Apr 2012 17:47:15 +0000 Subject: Tidy up. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155832 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b1b6ec4..83c357d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3420,11 +3420,11 @@ static bool isMOVLPMask(ArrayRef Mask, EVT VT) { if (NumElems != 2 && NumElems != 4) return false; - for (unsigned i = 0; i != NumElems/2; ++i) + for (unsigned i = 0, e = NumElems/2; i != e; ++i) if (!isUndefOrEqual(Mask[i], i + NumElems)) return false; - for (unsigned i = NumElems/2; i != NumElems; ++i) + for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) if (!isUndefOrEqual(Mask[i], i)) return false; @@ -3440,12 +3440,12 @@ static bool isMOVLHPSMask(ArrayRef Mask, EVT VT) { || VT.getSizeInBits() > 128) return false; - for (unsigned i = 0; i != NumElems/2; ++i) + for (unsigned i = 0, e = NumElems/2; i != e; ++i) if (!isUndefOrEqual(Mask[i], i)) return false; - for (unsigned i = 0; i != NumElems/2; ++i) - if (!isUndefOrEqual(Mask[i + NumElems/2], i + NumElems)) + for (unsigned i = 0, e = NumElems/2; i != e; ++i) + if (!isUndefOrEqual(Mask[i + e], i + NumElems)) return false; return true; @@ -4109,7 +4109,7 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, for (unsigned i = 0, e = NumElems/2; i != e; ++i) if (!isUndefOrEqual(Mask[i], i)) return false; - for (unsigned i = NumElems/2; i != NumElems; ++i) + for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) if (!isUndefOrEqual(Mask[i], i+NumElems)) return false; return true; @@ -4257,9 +4257,8 @@ static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); - unsigned Half = NumElems/2; SmallVector Mask; - for (unsigned i = 0; i != Half; ++i) { + for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { Mask.push_back(i + Half); Mask.push_back(i + NumElems + Half); } -- cgit v1.1 From 16a76519a5aa0e3a351cfde8e7236119ffd8b7fb Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Mon, 30 Apr 2012 22:51:25 +0000 Subject: X86: optimization for -(x != 0) This patch will optimize -(x != 0) on X86 FROM cmpl $0x01,%edi sbbl %eax,%eax notl %eax TO negl %edi sbbl %eax %eax git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155853 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 83c357d..660a1bd 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8589,6 +8589,22 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Y = isAllOnes(Op2) ? Op1 : Op2; SDValue CmpOp0 = Cmp.getOperand(0); + // further optimization for special cases + // (select (x != 0), -1, 0) -> neg & sbb + // (select (x == 0), 0, -1) -> neg & sbb + if (ConstantSDNode *YC = dyn_cast(Y)) + if (YC->isNullValue() && + (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { + SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); + SDValue Neg = DAG.getNode(ISD::SUB, DL, VTs, + DAG.getConstant(0, CmpOp0.getValueType()), + CmpOp0); + SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), + DAG.getConstant(X86::COND_B, MVT::i8), + SDValue(Neg.getNode(), 1)); + return Res; + } + Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); Cmp = ConvertCmpIfNecessary(Cmp, DAG); -- cgit v1.1 From 769ea2f93fa7fdd73f8388e863cf4dc9689d2e38 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Tue, 1 May 2012 17:16:15 +0000 Subject: X86: optimization for max-like struct This patch will optimize the following cases on X86 (a > b) ? (a-b) : 0 (a >= b) ? (a-b) : 0 (b < a) ? (a-b) : 0 (b <= a) ? (a-b) : 0 FROM movl %edi, %ecx subl %esi, %ecx cmpl %edi, %esi movl $0, %eax cmovll %ecx, %eax TO xorl %eax, %eax subl %esi, %edi cmovll %eax, %edi movl %edi, %eax rdar: 10734411 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155919 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 40 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 660a1bd..8005e23 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8573,6 +8573,46 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { Cond = NewCond; } + // Handle the following cases related to max and min: + // (a > b) ? (a-b) : 0 + // (a >= b) ? (a-b) : 0 + // (b < a) ? (a-b) : 0 + // (b <= a) ? (a-b) : 0 + // Comparison is removed to use EFLAGS from SUB. + if (ConstantSDNode *C = dyn_cast(Op2)) + if (Cond.getOpcode() == X86ISD::SETCC && + Cond.getOperand(1).getOpcode() == X86ISD::CMP && + (Op1.getOpcode() == ISD::SUB || Op1.getOpcode() == X86ISD::SUB) && + C->getAPIntValue() == 0) { + SDValue Cmp = Cond.getOperand(1); + unsigned CC = cast(Cond.getOperand(0))->getZExtValue(); + if ((DAG.isEqualTo(Op1.getOperand(0), Cmp.getOperand(0)) && + DAG.isEqualTo(Op1.getOperand(1), Cmp.getOperand(1)) && + (CC == X86::COND_G || CC == X86::COND_GE || + CC == X86::COND_A || CC == X86::COND_AE)) || + (DAG.isEqualTo(Op1.getOperand(0), Cmp.getOperand(1)) && + DAG.isEqualTo(Op1.getOperand(1), Cmp.getOperand(0)) && + (CC == X86::COND_L || CC == X86::COND_LE || + CC == X86::COND_B || CC == X86::COND_BE))) { + + if (Op1.getOpcode() == ISD::SUB) { + SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i32); + SDValue New = DAG.getNode(X86ISD::SUB, DL, VTs, + Op1.getOperand(0), Op1.getOperand(1)); + DAG.ReplaceAllUsesWith(Op1, New); + Op1 = New; + } + + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); + unsigned NewCC = (CC == X86::COND_G || CC == X86::COND_GE || + CC == X86::COND_L || + CC == X86::COND_LE) ? X86::COND_GE : X86::COND_AE; + SDValue Ops[] = { Op2, Op1, DAG.getConstant(NewCC, MVT::i8), + SDValue(Op1.getNode(), 1) }; + return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); + } + } + // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y -- cgit v1.1 From a9a568a79dbaf7315db863b4808d31ad9f5f91dc Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 2 May 2012 08:03:44 +0000 Subject: Add support for selecting AVX2 vpshuflw and vpshufhw. Add decoding support for AsmPrinter. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155982 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 46 ++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 12 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8005e23..7ab4b26 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3196,8 +3196,8 @@ static bool isPSHUFDMask(ArrayRef Mask, EVT VT) { /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that /// is suitable for input to PSHUFHW. -static bool isPSHUFHWMask(ArrayRef Mask, EVT VT) { - if (VT != MVT::v8i16) +static bool isPSHUFHWMask(ArrayRef Mask, EVT VT, bool HasAVX2) { + if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) return false; // Lower quadword copied in order or undef. @@ -3206,16 +3206,27 @@ static bool isPSHUFHWMask(ArrayRef Mask, EVT VT) { // Upper quadword shuffled. for (unsigned i = 4; i != 8; ++i) - if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) + if (!isUndefOrInRange(Mask[i], 4, 8)) return false; + if (VT == MVT::v16i16) { + // Lower quadword copied in order or undef. + if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) + return false; + + // Upper quadword shuffled. + for (unsigned i = 12; i != 16; ++i) + if (!isUndefOrInRange(Mask[i], 12, 16)) + return false; + } + return true; } /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that /// is suitable for input to PSHUFLW. -static bool isPSHUFLWMask(ArrayRef Mask, EVT VT) { - if (VT != MVT::v8i16) +static bool isPSHUFLWMask(ArrayRef Mask, EVT VT, bool HasAVX2) { + if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) return false; // Upper quadword copied in order. @@ -3224,9 +3235,20 @@ static bool isPSHUFLWMask(ArrayRef Mask, EVT VT) { // Lower quadword shuffled. for (unsigned i = 0; i != 4; ++i) - if (Mask[i] >= 4) + if (!isUndefOrInRange(Mask[i], 0, 4)) return false; + if (VT == MVT::v16i16) { + // Upper quadword copied in order. + if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) + return false; + + // Lower quadword shuffled. + for (unsigned i = 8; i != 12; ++i) + if (!isUndefOrInRange(Mask[i], 8, 12)) + return false; + } + return true; } @@ -4405,12 +4427,12 @@ static bool getTargetShuffleMask(SDNode *N, EVT VT, break; case X86ISD::PSHUFHW: ImmN = N->getOperand(N->getNumOperands()-1); - DecodePSHUFHWMask(cast(ImmN)->getZExtValue(), Mask); + DecodePSHUFHWMask(VT, cast(ImmN)->getZExtValue(), Mask); IsUnary = true; break; case X86ISD::PSHUFLW: ImmN = N->getOperand(N->getNumOperands()-1); - DecodePSHUFLWMask(cast(ImmN)->getZExtValue(), Mask); + DecodePSHUFLWMask(VT, cast(ImmN)->getZExtValue(), Mask); IsUnary = true; break; case X86ISD::MOVSS: @@ -6581,12 +6603,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); } - if (isPSHUFHWMask(M, VT)) + if (isPSHUFHWMask(M, VT, HasAVX2)) return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, getShufflePSHUFHWImmediate(SVOp), DAG); - if (isPSHUFLWMask(M, VT)) + if (isPSHUFLWMask(M, VT, HasAVX2)) return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, getShufflePSHUFLWImmediate(SVOp), DAG); @@ -11376,8 +11398,8 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl &M, isMOVLMask(M, VT) || isSHUFPMask(M, VT, Subtarget->hasAVX()) || isPSHUFDMask(M, VT) || - isPSHUFHWMask(M, VT) || - isPSHUFLWMask(M, VT) || + isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) || + isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) || isPALIGNRMask(M, VT, Subtarget) || isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || -- cgit v1.1 From e2849851b29c0ac02d4428cd006163966dbf1bbf Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Wed, 2 May 2012 15:24:32 +0000 Subject: Revert r155853 The commit is intended to fix rdar://10961709. But it is the root cause of PR12720. Revert it for now. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155992 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7ab4b26..bbe2808 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8651,22 +8651,6 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Y = isAllOnes(Op2) ? Op1 : Op2; SDValue CmpOp0 = Cmp.getOperand(0); - // further optimization for special cases - // (select (x != 0), -1, 0) -> neg & sbb - // (select (x == 0), 0, -1) -> neg & sbb - if (ConstantSDNode *YC = dyn_cast(Y)) - if (YC->isNullValue() && - (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { - SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); - SDValue Neg = DAG.getNode(ISD::SUB, DL, VTs, - DAG.getConstant(0, CmpOp0.getValueType()), - CmpOp0); - SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), - DAG.getConstant(X86::COND_B, MVT::i8), - SDValue(Neg.getNode(), 1)); - return Res; - } - Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); Cmp = ConvertCmpIfNecessary(Cmp, DAG); -- cgit v1.1 From c0f0a93936a23a0935aa946a65095388b50c087f Mon Sep 17 00:00:00 2001 From: Preston Gurd Date: Wed, 2 May 2012 22:02:02 +0000 Subject: For Intel Atom, use ILP scheduling always, instead of ILP for 64 bit and Hybrid for 32 bit, since benchmarks show ILP scheduling is better most of the time. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156028 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index bbe2808..bdbdaf5 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -167,11 +167,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // For 64-bit since we have so many registers use the ILP scheduler, for // 32-bit code use the register pressure specific scheduling. - // For 32 bit Atom, use Hybrid (register pressure + latency) scheduling. - if (Subtarget->is64Bit()) + // For Atom, always use ILP scheduling. + if (Subtarget->isAtom()) + setSchedulingPreference(Sched::ILP); + else if (Subtarget->is64Bit()) setSchedulingPreference(Sched::ILP); - else if (Subtarget->isAtom()) - setSchedulingPreference(Sched::Hybrid); else setSchedulingPreference(Sched::RegPressure); setStackPointerRegisterToSaveRestore(X86StackPtr); -- cgit v1.1 From 6b28d356c56d656e8e4d23c71de80162bb2eba5e Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 3 May 2012 07:12:59 +0000 Subject: Fix 256-bit vpshuflw and vpshufhw immediate encoding to handle undefs in the lower half correctly. Missed in r155982. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156059 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 51 +++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index bdbdaf5..cacdb52 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3904,9 +3904,8 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { for (unsigned i = 0; i != NumElts; ++i) { int Elt = N->getMaskElt(i); if (Elt < 0) continue; - Elt %= NumLaneElts; - unsigned ShAmt = i << Shift; - if (ShAmt >= 8) ShAmt -= 8; + Elt &= NumLaneElts - 1; + unsigned ShAmt = (i << Shift) % 8; Mask |= Elt << ShAmt; } @@ -3916,30 +3915,48 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { + EVT VT = N->getValueType(0); + + assert((VT == MVT::v8i16 || VT == MVT::v16i16) && + "Unsupported vector type for PSHUFHW"); + + unsigned NumElts = VT.getVectorNumElements(); + unsigned Mask = 0; - // 8 nodes, but we only care about the last 4. - for (unsigned i = 7; i >= 4; --i) { - int Val = N->getMaskElt(i); - if (Val >= 0) - Mask |= (Val - 4); - if (i != 4) - Mask <<= 2; + for (unsigned l = 0; l != NumElts; l += 8) { + // 8 nodes per lane, but we only care about the last 4. + for (unsigned i = 0; i < 4; ++i) { + int Elt = N->getMaskElt(l+i+4); + if (Elt < 0) continue; + Elt &= 0x3; // only 2-bits. + Mask |= Elt << (i * 2); + } } + return Mask; } /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { + EVT VT = N->getValueType(0); + + assert((VT == MVT::v8i16 || VT == MVT::v16i16) && + "Unsupported vector type for PSHUFHW"); + + unsigned NumElts = VT.getVectorNumElements(); + unsigned Mask = 0; - // 8 nodes, but we only care about the first 4. - for (int i = 3; i >= 0; --i) { - int Val = N->getMaskElt(i); - if (Val >= 0) - Mask |= Val; - if (i != 0) - Mask <<= 2; + for (unsigned l = 0; l != NumElts; l += 8) { + // 8 nodes per lane, but we only care about the first 4. + for (unsigned i = 0; i < 4; ++i) { + int Elt = N->getMaskElt(l+i); + if (Elt < 0) continue; + Elt &= 0x3; // only 2-bits + Mask |= Elt << (i * 2); + } } + return Mask; } -- cgit v1.1 From b607264550b6d992cb299b0a9c832a6ed49d483c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 3 May 2012 07:26:59 +0000 Subject: Use 'unsigned' instead of 'int' in a few places dealing with counts of vector elements. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156060 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index cacdb52..60904d0 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3176,8 +3176,8 @@ static bool isUndefOrEqual(int Val, int CmpVal) { /// from position Pos and ending in Pos+Size, falls within the specified /// sequential range (L, L+Pos]. or is undef. static bool isSequentialOrUndefInRange(ArrayRef Mask, - int Pos, int Size, int Low) { - for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low) + unsigned Pos, unsigned Size, int Low) { + for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) if (!isUndefOrEqual(Mask[i], Low)) return false; return true; @@ -10670,7 +10670,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); EVT ExtraEltVT = ExtraVT.getVectorElementType(); - int ExtraNumElems = ExtraVT.getVectorNumElements(); + unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, ExtraNumElems/2); SDValue Extra = DAG.getValueType(ExtraVT); -- cgit v1.1 From 11ac1f81a8d3cb9c58ec93e424429df9dccffe36 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 4 May 2012 04:08:44 +0000 Subject: Simplify shuffle narrowing code a bit. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156154 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 60904d0..023a6c4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5914,41 +5914,35 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, static SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, DebugLoc dl) { - EVT VT = SVOp->getValueType(0); - SDValue V1 = SVOp->getOperand(0); - SDValue V2 = SVOp->getOperand(1); + MVT VT = SVOp->getValueType(0).getSimpleVT(); unsigned NumElems = VT.getVectorNumElements(); - unsigned NewWidth = (NumElems == 4) ? 2 : 4; - EVT NewVT; - switch (VT.getSimpleVT().SimpleTy) { + MVT NewVT; + unsigned Scale; + switch (VT.SimpleTy) { default: llvm_unreachable("Unexpected!"); - case MVT::v4f32: NewVT = MVT::v2f64; break; - case MVT::v4i32: NewVT = MVT::v2i64; break; - case MVT::v8i16: NewVT = MVT::v4i32; break; - case MVT::v16i8: NewVT = MVT::v4i32; break; + case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; + case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; + case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; + case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; } - int Scale = NumElems / NewWidth; SmallVector MaskVec; - for (unsigned i = 0; i < NumElems; i += Scale) { + for (unsigned i = 0; i != NumElems; i += Scale) { int StartIdx = -1; - for (int j = 0; j < Scale; ++j) { + for (unsigned j = 0; j != Scale; ++j) { int EltIdx = SVOp->getMaskElt(i+j); if (EltIdx < 0) continue; - if (StartIdx == -1) - StartIdx = EltIdx - (EltIdx % Scale); - if (EltIdx != StartIdx + j) + if (StartIdx < 0) + StartIdx = (EltIdx / Scale); + if (EltIdx != (int)(StartIdx*Scale + j)) return SDValue(); } - if (StartIdx == -1) - MaskVec.push_back(-1); - else - MaskVec.push_back(StartIdx / Scale); + MaskVec.push_back(StartIdx); } - V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1); - V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2); + SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); + SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); } -- cgit v1.1 From f3640d7ec1373f964fb998138e54d81cc21c7a22 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 4 May 2012 04:44:49 +0000 Subject: Allow v16i16 and v32i8 shuffles to be rewritten as narrower shuffles. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156156 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 023a6c4..15dd7d9 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5920,10 +5920,12 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, unsigned Scale; switch (VT.SimpleTy) { default: llvm_unreachable("Unexpected!"); - case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; - case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; - case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; - case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; + case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; + case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; + case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; + case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; + case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; + case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; } SmallVector MaskVec; @@ -6370,7 +6372,8 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { // If the shuffle can be profitably rewritten as a narrower shuffle, then // do it! - if (VT == MVT::v8i16 || VT == MVT::v16i8) { + if (VT == MVT::v8i16 || VT == MVT::v16i8 || + VT == MVT::v16i16 || VT == MVT::v32i8) { SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); if (NewOp.getNode()) return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); -- cgit v1.1 From 5da8a803779810578a896d44bfde28fd7567b2d4 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 4 May 2012 05:49:51 +0000 Subject: Simplify broadcast lowering code. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156157 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 15dd7d9..ad87194 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4933,6 +4933,9 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); DebugLoc dl = Op.getDebugLoc(); + assert((VT.is128BitVector() || VT.is256BitVector()) && + "Unsupported vector type for broadcast."); + SDValue Ld; bool ConstSplatVal; @@ -4984,7 +4987,6 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { } bool Is256 = VT.getSizeInBits() == 256; - bool Is128 = VT.getSizeInBits() == 128; // Handle the broadcasting a single constant scalar from the constant pool // into a vector. On Sandybridge it is still better to load a constant vector @@ -4994,9 +4996,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { assert(!CVT.isVector() && "Must not broadcast a vector type"); unsigned ScalarSize = CVT.getSizeInBits(); - if ((Is256 && (ScalarSize == 32 || ScalarSize == 64)) || - (Is128 && (ScalarSize == 32))) { - + if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { const Constant *C = 0; if (ConstantSDNode *CI = dyn_cast(Ld)) C = CI->getConstantIntValue(); @@ -5025,23 +5025,13 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { unsigned ScalarSize = Ld.getValueType().getSizeInBits(); - // VBroadcast to YMM - if (Is256 && (ScalarSize == 32 || ScalarSize == 64)) - return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); - - // VBroadcast to XMM - if (Is128 && (ScalarSize == 32)) + if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); // The integer check is needed for the 64-bit into 128-bit so it doesn't match - // double since there is vbroadcastsd xmm + // double since there is no vbroadcastsd xmm if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { - // VBroadcast to YMM - if (Is256 && (ScalarSize == 8 || ScalarSize == 16)) - return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); - - // VBroadcast to XMM - if (Is128 && (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) + if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); } -- cgit v1.1 From 6643d9c1800df550c071baad9ad770a59d4dd903 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 4 May 2012 06:18:33 +0000 Subject: Fix up some spacing. No functional change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156158 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ad87194..a0a0dee 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5008,8 +5008,8 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { SDValue CP = DAG.getConstantPool(C, getPointerTy()); unsigned Alignment = cast(CP)->getAlignment(); Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, - MachinePointerInfo::getConstantPool(), - false, false, false, Alignment); + MachinePointerInfo::getConstantPool(), + false, false, false, Alignment); return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); } @@ -5739,10 +5739,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, int EltIdx = MaskVals[i]; if (EltIdx < 0) continue; - SDValue ExtOp = (EltIdx < 8) - ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, - DAG.getIntPtrConstant(EltIdx)) - : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, + SDValue ExtOp = (EltIdx < 8) ? + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, + DAG.getIntPtrConstant(EltIdx)) : + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, DAG.getIntPtrConstant(EltIdx - 8)); NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, DAG.getIntPtrConstant(i)); -- cgit v1.1 From 31a207a3b748261e3556328bd8a80a75ffbb698d Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 4 May 2012 06:39:13 +0000 Subject: Fix some loops to match coding standards. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156159 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a0a0dee..7872a49 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -899,7 +899,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. - for (int i = MVT::v16i8; i != MVT::v2i64; i++) { + for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; EVT VT = SVT; @@ -5196,7 +5196,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // Turn it into a shuffle of zero and zero-extended scalar to vector. Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); SmallVector MaskVec; - for (unsigned i = 0; i < NumElems; i++) + for (unsigned i = 0; i != NumElems; ++i) MaskVec.push_back(i == Idx ? 0 : 1); return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); } @@ -9106,7 +9106,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, const Function *F = MF.getFunction(); for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); - I != E; I++) + I != E; ++I) if (I->hasNestAttr()) report_fatal_error("Cannot use segmented stacks with functions that " "have nested arguments."); @@ -14489,7 +14489,8 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, // Redistribute the loaded elements into the different locations. SmallVector ShuffleVec(NumElems * SizeRatio, -1); - for (unsigned i = 0; i < NumElems; i++) ShuffleVec[i*SizeRatio] = i; + for (unsigned i = 0; i != NumElems; ++i) + ShuffleVec[i*SizeRatio] = i; SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), @@ -14568,7 +14569,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); SmallVector ShuffleVec(NumElems * SizeRatio, -1); - for (unsigned i = 0; i < NumElems; i++ ) ShuffleVec[i] = i * SizeRatio; + for (unsigned i = 0; i != NumElems; ++i) + ShuffleVec[i] = i * SizeRatio; // Can't shuffle using an illegal type if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); @@ -14599,7 +14601,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDValue Ptr = St->getBasePtr(); // Perform one or more big stores into memory. - for (unsigned i = 0; i < (ToSz*NumElems)/StoreType.getSizeInBits() ; i++) { + for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StoreType, ShuffWide, DAG.getIntPtrConstant(i)); -- cgit v1.1 From aaf723dd2bccc052d2dd28e3cc4db76f2a3e2fb0 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Sat, 5 May 2012 12:49:14 +0000 Subject: Add a new target hook "predictableSelectIsExpensive". This will be used to determine whether it's profitable to turn a select into a branch when the branch is likely to be predicted. Currently enabled for everything but Atom on X86 and Cortex-A9 devices on ARM. I'm not entirely happy with the name of this flag, suggestions welcome ;) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156233 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7872a49..24d95a9 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1243,6 +1243,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setPrefLoopAlignment(4); // 2^4 bytes. benefitFromCodePlacementOpt = true; + // Predictable cmov don't hurt on atom because it's in-order. + predictableSelectIsExpensive = !Subtarget->isAtom(); + setPrefFunctionAlignment(4); // 2^4 bytes. } -- cgit v1.1 From bdcbcb3ae7c035459a2c6adad7e95dc377f22f22 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 6 May 2012 18:54:26 +0000 Subject: Add VPERMQ/VPERMPD to the list of target specific shuffles that can be looked through for DAG combine purposes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156266 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 24d95a9..e282617 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2914,6 +2914,7 @@ static bool isTargetShuffle(unsigned Opcode) { case X86ISD::UNPCKH: case X86ISD::VPERMILP: case X86ISD::VPERM2X128: + case X86ISD::VPERMI: return true; } } @@ -4455,6 +4456,11 @@ static bool getTargetShuffleMask(SDNode *N, EVT VT, DecodePSHUFLWMask(VT, cast(ImmN)->getZExtValue(), Mask); IsUnary = true; break; + case X86ISD::VPERMI: + ImmN = N->getOperand(N->getNumOperands()-1); + DecodeVPERMMask(cast(ImmN)->getZExtValue(), Mask); + IsUnary = true; + break; case X86ISD::MOVSS: case X86ISD::MOVSD: { // The index 0 always comes from the first element of the second source, -- cgit v1.1 From d978c54e607fbcf426db20727d5fed71e1def2f6 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 6 May 2012 19:46:21 +0000 Subject: Use MVT instead of EVT as the argument to all the shuffle decode functions. Simplify some of the decode functions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156268 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e282617..3e63138 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4417,7 +4417,7 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the /// target specific opcode. Returns true if the Mask could be calculated. /// Sets IsUnary to true if only uses one source. -static bool getTargetShuffleMask(SDNode *N, EVT VT, +static bool getTargetShuffleMask(SDNode *N, MVT VT, SmallVectorImpl &Mask, bool &IsUnary) { unsigned NumElems = VT.getVectorNumElements(); SDValue ImmN; @@ -4518,20 +4518,21 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, // Recurse into target specific vector shuffles to find scalars. if (isTargetShuffle(Opcode)) { - unsigned NumElems = VT.getVectorNumElements(); + MVT ShufVT = V.getValueType().getSimpleVT(); + unsigned NumElems = ShufVT.getVectorNumElements(); SmallVector ShuffleMask; SDValue ImmN; bool IsUnary; - if (!getTargetShuffleMask(N, VT, ShuffleMask, IsUnary)) + if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) return SDValue(); int Elt = ShuffleMask[Index]; if (Elt < 0) - return DAG.getUNDEF(VT.getVectorElementType()); + return DAG.getUNDEF(ShufVT.getVectorElementType()); SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) - : N->getOperand(1); + : N->getOperand(1); return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); } @@ -13266,7 +13267,8 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, SmallVector ShuffleMask; bool UnaryShuffle; - if (!getTargetShuffleMask(InVec.getNode(), VT, ShuffleMask, UnaryShuffle)) + if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, + UnaryShuffle)) return SDValue(); // Select the input vector, guarding against out of range extract vector. -- cgit v1.1 From ed57984483b9268c30c71031fca07e71b985f169 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Mon, 7 May 2012 18:06:23 +0000 Subject: X86: optimization for -(x != 0) This patch will optimize -(x != 0) on X86 FROM cmpl $0x01,%edi sbbl %eax,%eax notl %eax TO negl %edi sbbl %eax %eax In order to generate negl, I added patterns in Target/X86/X86InstrCompiler.td: def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; rdar: 10961709 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156312 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 3e63138..8404c7a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8665,6 +8665,22 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Y = isAllOnes(Op2) ? Op1 : Op2; SDValue CmpOp0 = Cmp.getOperand(0); + // Apply further optimizations for special cases + // (select (x != 0), -1, 0) -> neg & sbb + // (select (x == 0), 0, -1) -> neg & sbb + if (ConstantSDNode *YC = dyn_cast(Y)) + if (YC->isNullValue() && + (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { + SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); + SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, + DAG.getConstant(0, CmpOp0.getValueType()), + CmpOp0); + SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), + DAG.getConstant(X86::COND_B, MVT::i8), + SDValue(Neg.getNode(), 1)); + return Res; + } + Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); Cmp = ConvertCmpIfNecessary(Cmp, DAG); -- cgit v1.1 From 42726835e38dd1b41c587187ddd04f5c9276ff08 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Mon, 7 May 2012 18:47:44 +0000 Subject: Fix a regression from r147481. This combine should only happen if there is a single use. rdar://11360370 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156316 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8404c7a..32fb5ce 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -13050,16 +13050,18 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. if (LoadSDNode *Ld = dyn_cast(V1.getOperand(0))) { - SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); - SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; - SDValue ResNode = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, - Ld->getMemoryVT(), - Ld->getPointerInfo(), - Ld->getAlignment(), - false/*isVolatile*/, true/*ReadMem*/, - false/*WriteMem*/); - return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); + if (Ld->hasNUsesOfValue(1, 0)) { + SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); + SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; + SDValue ResNode = + DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, + Ld->getMemoryVT(), + Ld->getPointerInfo(), + Ld->getAlignment(), + false/*isVolatile*/, true/*ReadMem*/, + false/*WriteMem*/); + return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); + } } // Emit a zeroed vector and insert the desired subvector on its -- cgit v1.1 From 5fc2187a025bb77b9023239edf12868d833630fe Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Thu, 10 May 2012 12:22:05 +0000 Subject: Generate AVX/AVX2 shuffles even when there is a memory op somewhere else in the program. Starting r155461 we are able to select patterns for vbroadcast even when the load op is used by other users. Fix PR11900. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156539 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 ---- 1 file changed, 4 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 32fb5ce..4a312f4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5029,10 +5029,6 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { if (!ISD::isNormalLoad(Ld.getNode())) return SDValue(); - // Reject loads that have uses of the chain result - if (Ld->hasAnyUseOfValue(1)) - return SDValue(); - unsigned ScalarSize = Ld.getValueType().getSizeInBits(); if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) -- cgit v1.1 From b210651654e64bec5eb14214a61da52c451a4044 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Thu, 10 May 2012 12:39:13 +0000 Subject: AVX2: Add an additional broadcast idiom. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156540 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4a312f4..a7e72e05 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4980,8 +4980,11 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { return SDValue(); SDValue Sc = Op.getOperand(0); - if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR) - return SDValue(); + if (Sc.getOpcode() == ISD::SCALAR_TO_VECTOR) + Ld = Sc.getOperand(0); + else if (Sc.getOpcode() == ISD::BUILD_VECTOR) + Ld = Sc.getOperand(0); + else return SDValue(); Ld = Sc.getOperand(0); ConstSplatVal = (Ld.getOpcode() == ISD::Constant || -- cgit v1.1 From b88e8dd31d1364d288d970f9187342a33b8de149 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Thu, 10 May 2012 12:50:02 +0000 Subject: Fix merge-typo and cleanup git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156541 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a7e72e05..688c25c 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4980,11 +4980,9 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { return SDValue(); SDValue Sc = Op.getOperand(0); - if (Sc.getOpcode() == ISD::SCALAR_TO_VECTOR) - Ld = Sc.getOperand(0); - else if (Sc.getOpcode() == ISD::BUILD_VECTOR) - Ld = Sc.getOperand(0); - else return SDValue(); + if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && + Sc.getOpcode() != ISD::BUILD_VECTOR) + return SDValue(); Ld = Sc.getOperand(0); ConstSplatVal = (Ld.getOpcode() == ISD::Constant || -- cgit v1.1 From 228756c744a1f877f7150c8fc91e074ff58c9d66 Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Fri, 11 May 2012 10:11:01 +0000 Subject: Implement initial-exec TLS model for 32-bit PIC x86 This fixes a TODO from 2007 :) Previously, LLVM would emit the wrong code here (see the update to test/CodeGen/X86/tls-pie.ll). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156611 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 42 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 16 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 688c25c..75e0588 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7286,11 +7286,10 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, X86::RAX, X86II::MO_TLSGD); } -// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or -// "local exec" model. +// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, TLSModel::Model model, - bool is64Bit) { + bool is64Bit, bool isPIC) { DebugLoc dl = GA->getDebugLoc(); // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). @@ -7308,25 +7307,36 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, unsigned WrapperKind = X86ISD::Wrapper; if (model == TLSModel::LocalExec) { OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; - } else if (is64Bit) { - assert(model == TLSModel::InitialExec); - OperandFlags = X86II::MO_GOTTPOFF; - WrapperKind = X86ISD::WrapperRIP; + } else if (model == TLSModel::InitialExec) { + if (is64Bit) { + OperandFlags = X86II::MO_GOTTPOFF; + WrapperKind = X86ISD::WrapperRIP; + } else { + OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; + } } else { - assert(model == TLSModel::InitialExec); - OperandFlags = X86II::MO_INDNTPOFF; + llvm_unreachable("Unexpected model"); } - // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial - // exec) + // emit "addl x@ntpoff,%eax" (local exec) + // or "addl x@indntpoff,%eax" (initial exec) + // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0), GA->getOffset(), OperandFlags); SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); - if (model == TLSModel::InitialExec) - Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, - MachinePointerInfo::getGOT(), false, false, false, 0); + if (model == TLSModel::InitialExec) { + if (isPIC && !is64Bit) { + Offset = DAG.getNode(ISD::ADD, dl, PtrVT, + DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), + Offset); + } else { + Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, + MachinePointerInfo::getGOT(), false, false, false, + 0); + } + } // The address of the thread local variable is the add of the thread // pointer with the offset of the variable. @@ -7341,7 +7351,6 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { if (Subtarget->isTargetELF()) { // TODO: implement the "local dynamic" model - // TODO: implement the "initial exec"model for pic executables // If GV is an alias then use the aliasee for determining // thread-localness. @@ -7360,7 +7369,8 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { case TLSModel::InitialExec: case TLSModel::LocalExec: return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, - Subtarget->is64Bit()); + Subtarget->is64Bit(), + getTargetMachine().getRelocationModel() == Reloc::PIC_); } llvm_unreachable("Unknown TLS model."); } -- cgit v1.1 From b82b5abf78549119a88a106e161f32bcf04a2d41 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 18 May 2012 06:42:06 +0000 Subject: Simplify handling of v16i8 shuffles and fix a missed optimization. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157043 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 38 ++++++++------------------------------ 1 file changed, 8 insertions(+), 30 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 75e0588..e1f777b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5770,21 +5770,11 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, DebugLoc dl = SVOp->getDebugLoc(); ArrayRef MaskVals = SVOp->getMask(); + bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; + // If we have SSSE3, case 1 is generated when all result bytes come from // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is // present, fall back to case 3. - // FIXME: kill V2Only once shuffles are canonizalized by getNode. - bool V1Only = true; - bool V2Only = true; - for (unsigned i = 0; i < 16; ++i) { - int EltIdx = MaskVals[i]; - if (EltIdx < 0) - continue; - if (EltIdx < 16) - V2Only = false; - else - V1Only = false; - } // If SSSE3, use 1 pshufb instruction per vector with elements in the result. if (TLI.getSubtarget()->hasSSSE3()) { @@ -5796,23 +5786,16 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, // Otherwise, we have elements from both input vectors, and must zero out // elements that come from V2 in the first mask, and V1 in the second mask // so that we can OR them together. - bool TwoInputs = !(V1Only || V2Only); for (unsigned i = 0; i != 16; ++i) { int EltIdx = MaskVals[i]; - if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - continue; - } + if (EltIdx < 0 || EltIdx >= 16) + EltIdx = 0x80; pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); } - // If all the elements are from V2, assign it to V1 and return after - // building the first pshufb. - if (V2Only) - V1 = V2; V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, &pshufbMask[0], 16)); - if (!TwoInputs) + if (V2IsUndef) return V1; // Calculate the shuffle mask for the second input, shuffle it, and @@ -5820,10 +5803,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, pshufbMask.clear(); for (unsigned i = 0; i != 16; ++i) { int EltIdx = MaskVals[i]; - if (EltIdx < 16) { - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - continue; - } + EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); } V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, @@ -5837,7 +5817,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, // the 16 different words that comprise the two doublequadword input vectors. V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); - SDValue NewV = V2Only ? V2 : V1; + SDValue NewV = V1; for (int i = 0; i != 8; ++i) { int Elt0 = MaskVals[i*2]; int Elt1 = MaskVals[i*2+1]; @@ -5847,9 +5827,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, continue; // This word of the result is already in the correct place, skip it. - if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) - continue; - if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) + if ((Elt0 == i*2) && (Elt1 == i*2+1)) continue; SDValue Elt0Src = Elt0 < 16 ? V1 : V2; -- cgit v1.1 From be97ae9e89ac45981ccd3faf5d47a14279b2f604 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 18 May 2012 07:07:36 +0000 Subject: Simplify code a bit. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157044 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e1f777b..439cd4f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5644,13 +5644,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, bool TwoInputs = V1Used && V2Used; for (unsigned i = 0; i != 8; ++i) { int EltIdx = MaskVals[i] * 2; - if (TwoInputs && (EltIdx >= 16)) { - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - continue; - } - pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); + int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; + int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; + pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); } V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, @@ -5664,13 +5661,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, pshufbMask.clear(); for (unsigned i = 0; i != 8; ++i) { int EltIdx = MaskVals[i] * 2; - if (EltIdx < 16) { - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); - continue; - } - pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); + int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; + int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; + pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); } V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, -- cgit v1.1 From 4fc8a5de44c2fc3ce82d5467bd96dfe25aa3a0e9 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Sat, 19 May 2012 19:57:37 +0000 Subject: Add support for additional in-reg vbroadcast patterns git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157127 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 439cd4f..203c873 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5026,12 +5026,18 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { } } + bool IsLoad = ISD::isNormalLoad(Ld.getNode()); + unsigned ScalarSize = Ld.getValueType().getSizeInBits(); + + // Handle AVX2 in-register broadcasts. + if (!IsLoad && Subtarget->hasAVX2() && + (ScalarSize == 32 || (Is256 && ScalarSize == 64))) + return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); + // The scalar source must be a normal load. - if (!ISD::isNormalLoad(Ld.getNode())) + if (!IsLoad) return SDValue(); - unsigned ScalarSize = Ld.getValueType().getSizeInBits(); - if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); -- cgit v1.1 From 87d35e8c715f5116b072ef8fd742c0cfb6fb5ce4 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Sat, 19 May 2012 20:30:08 +0000 Subject: On Haswell, perfer storing YMM registers using a single instruction. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157129 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 203c873..2810f42 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14532,13 +14532,12 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If we are saving a concatenation of two XMM registers, perform two stores. - // This is better in Sandy Bridge cause one 256-bit mem op is done via two - // 128-bit ones. If in the future the cost becomes only one memory access the - // first version would be better. - if (VT.getSizeInBits() == 256 && + // On Sandy Bridge, 256-bit memory operations are executed by two + // 128-bit ports. However, on Haswell it is better to issue a single 256-bit + // memory operation. + if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2() && StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && StoredVal.getNumOperands() == 2) { - SDValue Value0 = StoredVal.getOperand(0); SDValue Value1 = StoredVal.getOperand(1); -- cgit v1.1 From 8ae97baef204508272b60f63da0ccf97ecf09b01 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 21 May 2012 06:40:16 +0000 Subject: Allow 256-bit shuffles to still be split even if only half of the shuffle comes from two 128-bit pieces. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157175 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 59 ++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 15 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 2810f42..4e4cf27 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4061,13 +4061,14 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, SmallVector MaskVec; for (unsigned i = 0; i != NumElems; ++i) { - int idx = SVOp->getMaskElt(i); - if (idx < 0) - MaskVec.push_back(idx); - else if (idx < (int)NumElems) - MaskVec.push_back(idx + NumElems); - else - MaskVec.push_back(idx - NumElems); + int Idx = SVOp->getMaskElt(i); + if (Idx >= 0) { + if (Idx < (int)NumElems) + Idx += NumElems; + else + Idx -= NumElems; + } + MaskVec.push_back(Idx); } return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), SVOp->getOperand(0), &MaskVec[0]); @@ -5970,14 +5971,15 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { DebugLoc dl = SVOp->getDebugLoc(); MVT EltVT = VT.getVectorElementType().getSimpleVT(); EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); - SDValue Shufs[2]; + SDValue Output[2]; SmallVector Mask; for (unsigned l = 0; l < 2; ++l) { // Build a shuffle mask for the output, discovering on the fly which // input vectors to use as shuffle operands (recorded in InputUsed). // If building a suitable shuffle vector proves too hard, then bail - // out with useBuildVector set. + // out with UseBuildVector set. + bool UseBuildVector = false; int InputUsed[2] = { -1, -1 }; // Not yet discovered. unsigned LaneStart = l * NumLaneElems; for (unsigned i = 0; i != NumLaneElems; ++i) { @@ -6009,17 +6011,44 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { } if (OpNo >= array_lengthof(InputUsed)) { - // More than two input vectors used! Give up. - return SDValue(); + // More than two input vectors used! Give up on trying to create a + // shuffle vector. Insert all elements into a BUILD_VECTOR instead. + UseBuildVector = true; + break; } // Add the mask index for the new shuffle vector. Mask.push_back(Idx + OpNo * NumLaneElems); } - if (InputUsed[0] < 0) { + if (UseBuildVector) { + SmallVector SVOps; + for (unsigned i = 0; i != NumLaneElems; ++i) { + // The mask element. This indexes into the input. + int Idx = SVOp->getMaskElt(i+LaneStart); + if (Idx < 0) { + SVOps.push_back(DAG.getUNDEF(EltVT)); + continue; + } + + // The input vector this mask element indexes into. + int Input = Idx / NumElems; + + // Turn the index into an offset from the start of the input vector. + Idx -= Input * NumElems; + + // Extract the vector element by hand. + SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, + SVOp->getOperand(Input), + DAG.getIntPtrConstant(Idx))); + } + + // Construct the output using a BUILD_VECTOR. + Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], + SVOps.size()); + } else if (InputUsed[0] < 0) { // No input vectors were used! The result is undefined. - Shufs[l] = DAG.getUNDEF(NVT); + Output[l] = DAG.getUNDEF(NVT); } else { SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), (InputUsed[0] % 2) * NumLaneElems, @@ -6029,14 +6058,14 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), (InputUsed[1] % 2) * NumLaneElems, DAG, dl); // At least one input vector was used. Create a new shuffle vector. - Shufs[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); + Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); } Mask.clear(); } // Concatenate the result back - return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Shufs[0], Shufs[1]); + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); } /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with -- cgit v1.1 From 85b9e56bac85b30ede70e46ef9f60e4dec3b88f3 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 22 May 2012 06:09:38 +0000 Subject: Fix constant used for pshufb mask when lowering v16i8 shuffles. Bug introduced in r157043. Fixes PR12908. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157236 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4e4cf27..50a2d78 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5805,7 +5805,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, for (unsigned i = 0; i != 16; ++i) { int EltIdx = MaskVals[i]; EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; - pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); } V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, DAG.getNode(ISD::BUILD_VECTOR, dl, -- cgit v1.1 From d2ea0e10cbd158c93fb870cdd03001b9cd1156b8 Mon Sep 17 00:00:00 2001 From: Justin Holewinski Date: Fri, 25 May 2012 16:35:28 +0000 Subject: Change interface for TargetLowering::LowerCallTo and TargetLowering::LowerCall to pass around a struct instead of a large set of individual values. This cleans up the interface and allows more information to be added to the struct for future targets without requiring changes to each and every target. NV_CONTRIB git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157479 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 50a2d78..58af729 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2130,14 +2130,19 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, } SDValue -X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool doesNotRet, bool &isTailCall, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, +X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc &dl = CLI.DL; + SmallVector &Outs = CLI.Outs; + SmallVector &OutVals = CLI.OutVals; + SmallVector &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + CallingConv::ID CallConv = CLI.CallConv; + bool &isTailCall = CLI.IsTailCall; + bool isVarArg = CLI.IsVarArg; + MachineFunction &MF = DAG.getMachineFunction(); bool Is64Bit = Subtarget->is64Bit(); bool IsWin64 = Subtarget->isTargetWin64(); -- cgit v1.1 From ee66b417efcf4469dcd9f9ab3f503d27ec66f804 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Thu, 31 May 2012 17:28:20 +0000 Subject: Add support for return value promotion in X86 calling conventions. Patch by Yiannis Tsiouris! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157757 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 58af729..f3b66e4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1504,6 +1504,16 @@ X86TargetLowering::LowerReturn(SDValue Chain, SDValue ValToCopy = OutVals[i]; EVT ValVT = ValToCopy.getValueType(); + // Promote values to the appropriate types + if (VA.getLocInfo() == CCValAssign::SExt) + ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); + else if (VA.getLocInfo() == CCValAssign::ZExt) + ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); + else if (VA.getLocInfo() == CCValAssign::AExt) + ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); + else if (VA.getLocInfo() == CCValAssign::BCvt) + ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); + // If this is x86-64, and we disabled SSE, we can't return FP values, // or SSE or MMX vectors. if ((ValVT == MVT::f32 || ValVT == MVT::f64 || -- cgit v1.1 From f0234fcbc9be9798c10dedc3e3c134b7afbc6511 Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Fri, 1 Jun 2012 16:27:21 +0000 Subject: Implement the local-dynamic TLS model for x86 (PR3985) This implements codegen support for accesses to thread-local variables using the local-dynamic model, and adds a clean-up pass so that the base address for the TLS block can be re-used between local-dynamic access on an execution path. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157818 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 57 +++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 7 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f3b66e4..8d0e843 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7263,7 +7263,7 @@ X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { static SDValue GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, - unsigned char OperandFlags) { + unsigned char OperandFlags, bool LocalDynamic = false) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); DebugLoc dl = GA->getDebugLoc(); @@ -7271,12 +7271,16 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, GA->getValueType(0), GA->getOffset(), OperandFlags); + + X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR + : X86ISD::TLSADDR; + if (InFlag) { SDValue Ops[] = { Chain, TGA, *InFlag }; - Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); + Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); } else { SDValue Ops[] = { Chain, TGA }; - Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); + Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); } // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. @@ -7308,6 +7312,45 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, X86::RAX, X86II::MO_TLSGD); } +static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, + SelectionDAG &DAG, + const EVT PtrVT, + bool is64Bit) { + DebugLoc dl = GA->getDebugLoc(); + + // Get the start address of the TLS block for this module. + X86MachineFunctionInfo* MFI = DAG.getMachineFunction() + .getInfo(); + MFI->incNumLocalDynamicTLSAccesses(); + + SDValue Base; + if (is64Bit) { + Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, + X86II::MO_TLSLD, /*LocalDynamic=*/true); + } else { + SDValue InFlag; + SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, + DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); + InFlag = Chain.getValue(1); + Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, + X86II::MO_TLSLDM, /*LocalDynamic=*/true); + } + + // Note: the CleanupLocalDynamicTLSPass will remove redundant computations + // of Base. + + // Build x@dtpoff. + unsigned char OperandFlags = X86II::MO_DTPOFF; + unsigned WrapperKind = X86ISD::Wrapper; + SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, + GA->getValueType(0), + GA->getOffset(), OperandFlags); + SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); + + // Add x@dtpoff with the base. + return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); +} + // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, TLSModel::Model model, @@ -7372,8 +7415,6 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalValue *GV = GA->getGlobal(); if (Subtarget->isTargetELF()) { - // TODO: implement the "local dynamic" model - // If GV is an alias then use the aliasee for determining // thread-localness. if (const GlobalAlias *GA = dyn_cast(GV)) @@ -7383,11 +7424,12 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { switch (model) { case TLSModel::GeneralDynamic: - case TLSModel::LocalDynamic: // not implemented if (Subtarget->is64Bit()) return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); - + case TLSModel::LocalDynamic: + return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), + Subtarget->is64Bit()); case TLSModel::InitialExec: case TLSModel::LocalExec: return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, @@ -11257,6 +11299,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; case X86ISD::FRCP: return "X86ISD::FRCP"; case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; + case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; -- cgit v1.1 From d9b0b025612992a0b724eeca8bdf10b1d7a5c355 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Sat, 2 Jun 2012 10:20:22 +0000 Subject: Fix typos found by http://github.com/lyda/misspell-check git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157885 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8d0e843..4baa1a6 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3191,7 +3191,7 @@ static bool isUndefOrEqual(int Val, int CmpVal) { return false; } -/// isSequentialOrUndefInRange - Return true if every element in Mask, begining +/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning /// from position Pos and ending in Pos+Size, falls within the specified /// sequential range (L, L+Pos]. or is undef. static bool isSequentialOrUndefInRange(ArrayRef Mask, @@ -6333,7 +6333,7 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); if (NumElems == 4) - // If we don't care about the second element, procede to use movss. + // If we don't care about the second element, proceed to use movss. if (SVOp->getMaskElt(1) != -1) return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); } -- cgit v1.1 From 87253c2ebdae320ee24a2cbf10f8de9b3acba763 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Thu, 7 Jun 2012 00:42:47 +0000 Subject: X86: replace SUB with CMP if possible This patch will optimize the following movq %rdi, %rax subq %rsi, %rax cmovsq %rsi, %rdi movq %rdi, %rax to cmpq %rsi, %rdi cmovsq %rsi, %rdi movq %rdi, %rax Perform this optimization if the actual result of SUB is not used. rdar: 11540023 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158126 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4baa1a6..57f61ab 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8271,7 +8271,13 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, // Otherwise use a regular EFLAGS-setting instruction. switch (Op.getNode()->getOpcode()) { default: llvm_unreachable("unexpected operator!"); - case ISD::SUB: Opcode = X86ISD::SUB; break; + case ISD::SUB: + // If the only use of SUB is EFLAGS, use CMP instead. + if (Op.hasOneUse()) + Opcode = X86ISD::CMP; + else + Opcode = X86ISD::SUB; + break; case ISD::OR: Opcode = X86ISD::OR; break; case ISD::XOR: Opcode = X86ISD::XOR; break; case ISD::AND: Opcode = X86ISD::AND; break; @@ -8297,6 +8303,13 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, DAG.getConstant(0, Op.getValueType())); + if (Opcode == X86ISD::CMP) { + SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0), + Op.getOperand(1)); + DAG.ReplaceAllUsesWith(Op, New); + return SDValue(New.getNode(), 0); + } + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); SmallVector Ops; for (unsigned i = 0; i != NumOperands; ++i) -- cgit v1.1 From e6fc9d40b37495056fa9fcd2fea188cb98726035 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Thu, 7 Jun 2012 19:27:33 +0000 Subject: PR13046: we can't replace usage of SUB with CMP in the lowering phase. It will cause assertion failure later on. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158160 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 57f61ab..7b2f13b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -8306,7 +8306,8 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, if (Opcode == X86ISD::CMP) { SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0), Op.getOperand(1)); - DAG.ReplaceAllUsesWith(Op, New); + // We can't replace usage of SUB with CMP. + // The SUB node will be removed later because there is no use of it. return SDValue(New.getNode(), 0); } -- cgit v1.1 From bdcae3825633082774ec702fd05cc556ed683ca6 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Thu, 7 Jun 2012 20:53:48 +0000 Subject: Do not optimize the used bits of the x86 vselect condition operand, when the condition operand is a vector of 1-bit predicates. This may happen on MIC devices. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158168 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7b2f13b..029b800 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -13521,8 +13521,6 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - - DebugLoc DL = N->getDebugLoc(); SDValue Cond = N->getOperand(0); // Get the LHS/RHS of the select. @@ -13804,9 +13802,13 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // to simplify previous instructions. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && - !DCI.isBeforeLegalize() && - TLI.isOperationLegal(ISD::VSELECT, VT)) { + !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); + + // Don't optimize vector selects that map to mask-registers. + if (BitWidth == 1) + return SDValue(); + assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); -- cgit v1.1 From 9236362a64ce1609222512fe3f17eeb886a3ddea Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Thu, 7 Jun 2012 22:39:10 +0000 Subject: X86: optimize generated code for integer ABS This patch will generate the following for integer ABS: movl %edi, %eax negl %eax cmovll %edi, %eax INSTEAD OF movl %edi, %ecx sarl $31, %ecx leal (%rdi,%rcx), %eax xorl %ecx, %eax There exists a target-independent DAG combine for integer ABS, which converts integer ABS to sar+add+xor. For X86, we match this pattern back to neg+cmov. This is implemented in PerformXorCombine. rdar://10695237 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158175 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 46 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 029b800..99bb470 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1227,8 +1227,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::FP_TO_SINT); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); - if (Subtarget->hasBMI()) - setTargetDAGCombine(ISD::XOR); + setTargetDAGCombine(ISD::XOR); computeRegisterProperties(); @@ -14507,6 +14506,41 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } +// Generate NEG and CMOV for integer abs. +static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + + // Since X86 does not have CMOV for 8-bit integer, we don't convert + // 8-bit integer abs to NEG and CMOV. + if (VT.isInteger() && VT.getSizeInBits() == 8) + return SDValue(); + + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + DebugLoc DL = N->getDebugLoc(); + + // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) + // and change it to SUB and CMOV. + if (VT.isInteger() && N->getOpcode() == ISD::XOR && + N0.getOpcode() == ISD::ADD && + N0.getOperand(1) == N1 && + N1.getOpcode() == ISD::SRA && + N1.getOperand(0) == N0.getOperand(0)) + if (ConstantSDNode *Y1C = dyn_cast(N1.getOperand(1))) + if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { + // Generate SUB & CMOV. + SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), + DAG.getConstant(0, VT), N0.getOperand(0)); + + SDValue Ops[] = { N0.getOperand(0), Neg, + DAG.getConstant(X86::COND_GE, MVT::i8), + SDValue(Neg.getNode(), 1) }; + return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), + Ops, array_lengthof(Ops)); + } + return SDValue(); +} + // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, @@ -14514,6 +14548,14 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, if (DCI.isBeforeLegalizeOps()) return SDValue(); + SDValue RV = performIntegerAbsCombine(N, DAG); + if (RV.getNode()) + return RV; + + // Try forming BMI if it is available. + if (!Subtarget->hasBMI()) + return SDValue(); + EVT VT = N->getValueType(0); if (VT != MVT::i32 && VT != MVT::i64) -- cgit v1.1 From 45d53b866e63dd4899c2ea691e3539da8dbb3909 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Fri, 8 Jun 2012 18:58:26 +0000 Subject: Enable optimization for integer ABS on X86 if Subtarget has CMOV. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158220 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 99bb470..b27af48 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14548,9 +14548,11 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, if (DCI.isBeforeLegalizeOps()) return SDValue(); - SDValue RV = performIntegerAbsCombine(N, DAG); - if (RV.getNode()) - return RV; + if (Subtarget->hasCMov()) { + SDValue RV = performIntegerAbsCombine(N, DAG); + if (RV.getNode()) + return RV; + } // Try forming BMI if it is available. if (!Subtarget->hasBMI()) -- cgit v1.1 From c29106b36f97d0f2dc806d1e8bf8d44fc466b9d3 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 9 Jun 2012 16:46:13 +0000 Subject: Replace XOP vpcom intrinsics with fewer intrinsics that take the immediate as an argument. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158278 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 194 ++++--------------------------------- 1 file changed, 20 insertions(+), 174 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b27af48..c33579d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9535,193 +9535,39 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } // XOP comparison intrinsics - case Intrinsic::x86_xop_vpcomltb: - case Intrinsic::x86_xop_vpcomltw: - case Intrinsic::x86_xop_vpcomltd: - case Intrinsic::x86_xop_vpcomltq: - case Intrinsic::x86_xop_vpcomltub: - case Intrinsic::x86_xop_vpcomltuw: - case Intrinsic::x86_xop_vpcomltud: - case Intrinsic::x86_xop_vpcomltuq: - case Intrinsic::x86_xop_vpcomleb: - case Intrinsic::x86_xop_vpcomlew: - case Intrinsic::x86_xop_vpcomled: - case Intrinsic::x86_xop_vpcomleq: - case Intrinsic::x86_xop_vpcomleub: - case Intrinsic::x86_xop_vpcomleuw: - case Intrinsic::x86_xop_vpcomleud: - case Intrinsic::x86_xop_vpcomleuq: - case Intrinsic::x86_xop_vpcomgtb: - case Intrinsic::x86_xop_vpcomgtw: - case Intrinsic::x86_xop_vpcomgtd: - case Intrinsic::x86_xop_vpcomgtq: - case Intrinsic::x86_xop_vpcomgtub: - case Intrinsic::x86_xop_vpcomgtuw: - case Intrinsic::x86_xop_vpcomgtud: - case Intrinsic::x86_xop_vpcomgtuq: - case Intrinsic::x86_xop_vpcomgeb: - case Intrinsic::x86_xop_vpcomgew: - case Intrinsic::x86_xop_vpcomged: - case Intrinsic::x86_xop_vpcomgeq: - case Intrinsic::x86_xop_vpcomgeub: - case Intrinsic::x86_xop_vpcomgeuw: - case Intrinsic::x86_xop_vpcomgeud: - case Intrinsic::x86_xop_vpcomgeuq: - case Intrinsic::x86_xop_vpcomeqb: - case Intrinsic::x86_xop_vpcomeqw: - case Intrinsic::x86_xop_vpcomeqd: - case Intrinsic::x86_xop_vpcomeqq: - case Intrinsic::x86_xop_vpcomequb: - case Intrinsic::x86_xop_vpcomequw: - case Intrinsic::x86_xop_vpcomequd: - case Intrinsic::x86_xop_vpcomequq: - case Intrinsic::x86_xop_vpcomneb: - case Intrinsic::x86_xop_vpcomnew: - case Intrinsic::x86_xop_vpcomned: - case Intrinsic::x86_xop_vpcomneq: - case Intrinsic::x86_xop_vpcomneub: - case Intrinsic::x86_xop_vpcomneuw: - case Intrinsic::x86_xop_vpcomneud: - case Intrinsic::x86_xop_vpcomneuq: - case Intrinsic::x86_xop_vpcomfalseb: - case Intrinsic::x86_xop_vpcomfalsew: - case Intrinsic::x86_xop_vpcomfalsed: - case Intrinsic::x86_xop_vpcomfalseq: - case Intrinsic::x86_xop_vpcomfalseub: - case Intrinsic::x86_xop_vpcomfalseuw: - case Intrinsic::x86_xop_vpcomfalseud: - case Intrinsic::x86_xop_vpcomfalseuq: - case Intrinsic::x86_xop_vpcomtrueb: - case Intrinsic::x86_xop_vpcomtruew: - case Intrinsic::x86_xop_vpcomtrued: - case Intrinsic::x86_xop_vpcomtrueq: - case Intrinsic::x86_xop_vpcomtrueub: - case Intrinsic::x86_xop_vpcomtrueuw: - case Intrinsic::x86_xop_vpcomtrueud: - case Intrinsic::x86_xop_vpcomtrueuq: { - unsigned CC = 0; - unsigned Opc = 0; + case Intrinsic::x86_xop_vpcomb: + case Intrinsic::x86_xop_vpcomw: + case Intrinsic::x86_xop_vpcomd: + case Intrinsic::x86_xop_vpcomq: + case Intrinsic::x86_xop_vpcomub: + case Intrinsic::x86_xop_vpcomuw: + case Intrinsic::x86_xop_vpcomud: + case Intrinsic::x86_xop_vpcomuq: { + unsigned CC; + unsigned Opc; switch (IntNo) { default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. - case Intrinsic::x86_xop_vpcomltb: - case Intrinsic::x86_xop_vpcomltw: - case Intrinsic::x86_xop_vpcomltd: - case Intrinsic::x86_xop_vpcomltq: + case Intrinsic::x86_xop_vpcomb: + case Intrinsic::x86_xop_vpcomw: + case Intrinsic::x86_xop_vpcomd: + case Intrinsic::x86_xop_vpcomq: CC = 0; Opc = X86ISD::VPCOM; break; - case Intrinsic::x86_xop_vpcomltub: - case Intrinsic::x86_xop_vpcomltuw: - case Intrinsic::x86_xop_vpcomltud: - case Intrinsic::x86_xop_vpcomltuq: + case Intrinsic::x86_xop_vpcomub: + case Intrinsic::x86_xop_vpcomuw: + case Intrinsic::x86_xop_vpcomud: + case Intrinsic::x86_xop_vpcomuq: CC = 0; Opc = X86ISD::VPCOMU; break; - case Intrinsic::x86_xop_vpcomleb: - case Intrinsic::x86_xop_vpcomlew: - case Intrinsic::x86_xop_vpcomled: - case Intrinsic::x86_xop_vpcomleq: - CC = 1; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomleub: - case Intrinsic::x86_xop_vpcomleuw: - case Intrinsic::x86_xop_vpcomleud: - case Intrinsic::x86_xop_vpcomleuq: - CC = 1; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomgtb: - case Intrinsic::x86_xop_vpcomgtw: - case Intrinsic::x86_xop_vpcomgtd: - case Intrinsic::x86_xop_vpcomgtq: - CC = 2; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomgtub: - case Intrinsic::x86_xop_vpcomgtuw: - case Intrinsic::x86_xop_vpcomgtud: - case Intrinsic::x86_xop_vpcomgtuq: - CC = 2; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomgeb: - case Intrinsic::x86_xop_vpcomgew: - case Intrinsic::x86_xop_vpcomged: - case Intrinsic::x86_xop_vpcomgeq: - CC = 3; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomgeub: - case Intrinsic::x86_xop_vpcomgeuw: - case Intrinsic::x86_xop_vpcomgeud: - case Intrinsic::x86_xop_vpcomgeuq: - CC = 3; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomeqb: - case Intrinsic::x86_xop_vpcomeqw: - case Intrinsic::x86_xop_vpcomeqd: - case Intrinsic::x86_xop_vpcomeqq: - CC = 4; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomequb: - case Intrinsic::x86_xop_vpcomequw: - case Intrinsic::x86_xop_vpcomequd: - case Intrinsic::x86_xop_vpcomequq: - CC = 4; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomneb: - case Intrinsic::x86_xop_vpcomnew: - case Intrinsic::x86_xop_vpcomned: - case Intrinsic::x86_xop_vpcomneq: - CC = 5; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomneub: - case Intrinsic::x86_xop_vpcomneuw: - case Intrinsic::x86_xop_vpcomneud: - case Intrinsic::x86_xop_vpcomneuq: - CC = 5; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomfalseb: - case Intrinsic::x86_xop_vpcomfalsew: - case Intrinsic::x86_xop_vpcomfalsed: - case Intrinsic::x86_xop_vpcomfalseq: - CC = 6; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomfalseub: - case Intrinsic::x86_xop_vpcomfalseuw: - case Intrinsic::x86_xop_vpcomfalseud: - case Intrinsic::x86_xop_vpcomfalseuq: - CC = 6; - Opc = X86ISD::VPCOMU; - break; - case Intrinsic::x86_xop_vpcomtrueb: - case Intrinsic::x86_xop_vpcomtruew: - case Intrinsic::x86_xop_vpcomtrued: - case Intrinsic::x86_xop_vpcomtrueq: - CC = 7; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomtrueub: - case Intrinsic::x86_xop_vpcomtrueuw: - case Intrinsic::x86_xop_vpcomtrueud: - case Intrinsic::x86_xop_vpcomtrueuq: - CC = 7; - Opc = X86ISD::VPCOMU; - break; } SDValue LHS = Op.getOperand(1); SDValue RHS = Op.getOperand(2); - return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS, - DAG.getConstant(CC, MVT::i8)); + SDValue Imm = Op.getOperand(3); + return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS, Imm); } // Arithmetic intrinsics. -- cgit v1.1 From 2a5dc43bd97487ea33a1af4e686661ad90f192ad Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 9 Jun 2012 17:02:24 +0000 Subject: Use XOP vpcom intrinsics in patterns instead of a target specific SDNode type. Remove the custom lowering code that selected the SDNode type. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158279 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 36 ------------------------------------ 1 file changed, 36 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c33579d..74902c6 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9534,42 +9534,6 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const DAG.getConstant(X86CC, MVT::i8), Cond); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } - // XOP comparison intrinsics - case Intrinsic::x86_xop_vpcomb: - case Intrinsic::x86_xop_vpcomw: - case Intrinsic::x86_xop_vpcomd: - case Intrinsic::x86_xop_vpcomq: - case Intrinsic::x86_xop_vpcomub: - case Intrinsic::x86_xop_vpcomuw: - case Intrinsic::x86_xop_vpcomud: - case Intrinsic::x86_xop_vpcomuq: { - unsigned CC; - unsigned Opc; - - switch (IntNo) { - default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. - case Intrinsic::x86_xop_vpcomb: - case Intrinsic::x86_xop_vpcomw: - case Intrinsic::x86_xop_vpcomd: - case Intrinsic::x86_xop_vpcomq: - CC = 0; - Opc = X86ISD::VPCOM; - break; - case Intrinsic::x86_xop_vpcomub: - case Intrinsic::x86_xop_vpcomuw: - case Intrinsic::x86_xop_vpcomud: - case Intrinsic::x86_xop_vpcomuq: - CC = 0; - Opc = X86ISD::VPCOMU; - break; - } - - SDValue LHS = Op.getOperand(1); - SDValue RHS = Op.getOperand(2); - SDValue Imm = Op.getOperand(3); - return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS, Imm); - } - // Arithmetic intrinsics. case Intrinsic::x86_sse2_pmulu_dq: case Intrinsic::x86_avx2_pmulu_dq: -- cgit v1.1 From d6b43a317e71246380db55a50b799b062b53cdce Mon Sep 17 00:00:00 2001 From: Rafael Espindola Date: Tue, 19 Jun 2012 00:48:28 +0000 Subject: Move the support for using .init_array from ARM to the generic TargetLoweringObjectFileELF. Use this to support it on X86. Unlike ARM, on X86 it is not easy to find out if .init_array should be used or not, so the decision is made via TargetOptions and defaults to off. Add a command line option to llc that enables it. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158692 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 74902c6..dec8d07 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -140,6 +140,8 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { return new TargetLoweringObjectFileMachO(); } + if (Subtarget->isTargetLinux()) + return new X86LinuxTargetObjectFile(); if (Subtarget->isTargetELF()) return new TargetLoweringObjectFileELF(); if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) -- cgit v1.1 From 703c38bf584b39275ba517982677491607f46d20 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 20 Jun 2012 05:39:26 +0000 Subject: Don't insert 128-bit UNDEF into 256-bit vectors. Just keep the 256-bit vector. Original patch by Elena Demikhovsky. Tweaked by me to allow possibility of covering more cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158792 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index dec8d07..2576821 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -99,6 +99,10 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, DebugLoc dl) { + // Inserting UNDEF is Result + if (Vec.getOpcode() == ISD::UNDEF) + return Result; + EVT VT = Vec.getValueType(); assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); @@ -114,9 +118,8 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, * ElemsPerChunk); SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); - Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, - VecIdx); - return Result; + return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, + VecIdx); } /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 -- cgit v1.1 From ce0a5cda8aa547d5219da70a68bef40d5ed8392c Mon Sep 17 00:00:00 2001 From: Rafael Espindola Date: Sat, 23 Jun 2012 00:30:03 +0000 Subject: Handle aliases to tls variables in all architectures, not just x86. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159058 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 5 ----- 1 file changed, 5 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 2576821..4426f30 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7419,11 +7419,6 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalValue *GV = GA->getGlobal(); if (Subtarget->isTargetELF()) { - // If GV is an alias then use the aliasee for determining - // thread-localness. - if (const GlobalAlias *GA = dyn_cast(GV)) - GV = GA->resolveAliasedGlobal(false); - TLSModel::Model model = getTargetMachine().getTLSModel(GV); switch (model) { -- cgit v1.1 From b49998d76cb4e414d13d60116adf13b085d85dc1 Mon Sep 17 00:00:00 2001 From: Pete Cooper Date: Sun, 24 Jun 2012 00:05:44 +0000 Subject: DAG legalisation can now handle illegal fma vector types by scalarisation git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159092 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4426f30..35366ce 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -715,6 +715,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Custom); setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); @@ -10893,6 +10894,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); case ISD::ADD: return LowerADD(Op, DAG); case ISD::SUB: return LowerSUB(Op, DAG); + case ISD::FMA: return SDValue(); } } -- cgit v1.1 From 6e2db65266c238368f744bbc42b8f4239fd1e76e Mon Sep 17 00:00:00 2001 From: Pete Cooper Date: Sun, 24 Jun 2012 00:08:36 +0000 Subject: Remove code i'd been testing with but didn't mean to commit. Oops git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159094 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 35366ce..4426f30 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -715,7 +715,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); - setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Custom); setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); @@ -10894,7 +10893,6 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); case ISD::ADD: return LowerADD(Op, DAG); case ISD::SUB: return LowerSUB(Op, DAG); - case ISD::FMA: return SDValue(); } } -- cgit v1.1 From 82d58b147f25f97b522f8916a47b40f2d81bfa56 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Sun, 24 Jun 2012 15:53:01 +0000 Subject: %RCX is not a function live-out in eh.return functions. The function live-out registers must be live at all function returns, and %RCX is only used by eh.return. When a function also has a normal return, only %RAX holds a return value. This fixes PR13188. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159116 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4426f30..8ee6440 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9824,7 +9824,6 @@ SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, } SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { - MachineFunction &MF = DAG.getMachineFunction(); SDValue Chain = Op.getOperand(0); SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); @@ -9841,7 +9840,6 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); - MF.getRegInfo().addLiveOut(StoreAddrReg); return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, -- cgit v1.1 From 52d418df5d67ffdf4c9782c5fa8d3fdbd2478631 Mon Sep 17 00:00:00 2001 From: Eli Friedman Date: Mon, 25 Jun 2012 23:42:33 +0000 Subject: Make some ugly hacks for inline asm operands which name a specific register a bit more thorough. PR13196. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159176 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8ee6440..ee7a635 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16030,12 +16030,15 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // wrong class. This can happen with constraints like {xmm0} where the // target independent register mapper will just pick the first match it can // find, ignoring the required type. - if (VT == MVT::f32) + + if (VT == MVT::f32 || VT == MVT::i32) Res.second = &X86::FR32RegClass; - else if (VT == MVT::f64) + else if (VT == MVT::f64 || VT == MVT::i64) Res.second = &X86::FR64RegClass; else if (X86::VR128RegClass.hasType(VT)) Res.second = &X86::VR128RegClass; + else if (X86::VR256RegClass.hasType(VT)) + Res.second = &X86::VR256RegClass; } return Res; -- cgit v1.1 From 1596373671e4df54e53e79dc613545d5cf9d83bb Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Tue, 26 Jun 2012 08:04:10 +0000 Subject: Shuffle optimization for AVX/AVX2. The current patch optimizes frequently used shuffle patterns and gives these instruction sequence reduction. Before: vshufps $-35, %xmm1, %xmm0, %xmm2 ## xmm2 = xmm0[1,3],xmm1[1,3] vpermilps $-40, %xmm2, %xmm2 ## xmm2 = xmm2[0,2,1,3] vextractf128 $1, %ymm1, %xmm1 vextractf128 $1, %ymm0, %xmm0 vshufps $-35, %xmm1, %xmm0, %xmm0 ## xmm0 = xmm0[1,3],xmm1[1,3] vpermilps $-40, %xmm0, %xmm0 ## xmm0 = xmm0[0,2,1,3] vinsertf128 $1, %xmm0, %ymm2, %ymm0 After: vshufps $13, %ymm0, %ymm1, %ymm1 ## ymm1 = ymm1[1,3],ymm0[0,0],ymm1[5,7],ymm0[4,4] vshufps $13, %ymm0, %ymm0, %ymm0 ## ymm0 = ymm0[1,3,0,0,5,7,4,4] vunpcklps %ymm1, %ymm0, %ymm0 ## ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159188 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 52 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ee7a635..e84ec29 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3496,6 +3496,53 @@ static bool isMOVLHPSMask(ArrayRef Mask, EVT VT) { return true; } +// +// Some special combinations that can be optimized. +// +static +SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, + SelectionDAG &DAG) { + EVT VT = SVOp->getValueType(0); + unsigned NumElts = VT.getVectorNumElements(); + DebugLoc dl = SVOp->getDebugLoc(); + + if (VT != MVT::v8i32 && VT != MVT::v8f32) + return SDValue(); + + ArrayRef Mask = SVOp->getMask(); + + // These are the special masks that may be optimized. + static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; + static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; + bool MatchEvenMask = true; + bool MatchOddMask = true; + for (int i=0; i<8; ++i) { + if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) + MatchEvenMask = false; + if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) + MatchOddMask = false; + } + static const int CompactionMaskEven[] = {0, 2, -1, -1, 4, 6, -1, -1}; + static const int CompactionMaskOdd [] = {1, 3, -1, -1, 5, 7, -1, -1}; + + const int *CompactionMask; + if (MatchEvenMask) + CompactionMask = CompactionMaskEven; + else if (MatchOddMask) + CompactionMask = CompactionMaskOdd; + else + return SDValue(); + + SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); + + SDValue Op0 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(0), + UndefNode, CompactionMask); + SDValue Op1 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(1), + UndefNode, CompactionMask); + static const int UnpackMask[] = {0, 8, 1, 9, 4, 12, 5, 13}; + return DAG.getVectorShuffle(VT, dl, Op0, Op1, UnpackMask); +} + /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand /// specifies a shuffle of elements that is suitable for input to UNPCKL. static bool isUNPCKLMask(ArrayRef Mask, EVT VT, @@ -5982,6 +6029,11 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT, /// which could not be matched by any known target speficic shuffle static SDValue LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { + + SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); + if (NewOp.getNode()) + return NewOp; + EVT VT = SVOp->getValueType(0); unsigned NumElems = VT.getVectorNumElements(); -- cgit v1.1 From a44489d5b5687e98c39947b7b64187a3ad5faf0e Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Tue, 26 Jun 2012 10:05:06 +0000 Subject: Rename to match other X86_64* names. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159196 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e84ec29..77d99b7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -139,7 +139,7 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { if (Subtarget->isTargetEnvMacho()) { if (is64Bit) - return new X8664_MachoTargetObjectFile(); + return new X86_64MachoTargetObjectFile(); return new TargetLoweringObjectFileMachO(); } -- cgit v1.1 From fcb09468334ab9f1d50e75e07615479d2e398802 Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Tue, 26 Jun 2012 10:50:07 +0000 Subject: Removed unused variable git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159197 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 1 - 1 file changed, 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 77d99b7..1a60ce3 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3503,7 +3503,6 @@ static SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { EVT VT = SVOp->getValueType(0); - unsigned NumElts = VT.getVectorNumElements(); DebugLoc dl = SVOp->getDebugLoc(); if (VT != MVT::v8i32 && VT != MVT::v8f32) -- cgit v1.1 From 94e3b388e561ce980c861e092bf378bf40202268 Mon Sep 17 00:00:00 2001 From: Rafael Espindola Date: Fri, 29 Jun 2012 04:22:35 +0000 Subject: In the initial exec mode we always do a load to find the address of a variable. Before this patch in pic 32 bit code we would add the global base register and not load from that address. This is a really old bug, but before the introduction of the tls attributes we would never select initial exec for pic code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159409 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 1a60ce3..e6a0df7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7451,11 +7451,11 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, Offset = DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), Offset); - } else { - Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, - MachinePointerInfo::getGOT(), false, false, false, - 0); } + + Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, + MachinePointerInfo::getGOT(), false, false, false, + 0); } // The address of the thread local variable is the add of the thread -- cgit v1.1 From 8f40f7b8676ae7931baaecb1046a21f09471384b Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Sun, 1 Jul 2012 06:12:26 +0000 Subject: Optimization of shuffle node that can fit to the register form of VBROADCAST instruction on AVX2. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159504 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e6a0df7..ba66593 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5047,8 +5047,16 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { SDValue Sc = Op.getOperand(0); if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && - Sc.getOpcode() != ISD::BUILD_VECTOR) - return SDValue(); + Sc.getOpcode() != ISD::BUILD_VECTOR) { + + if (!Subtarget->hasAVX2()) + return SDValue(); + + // Use the register form of the broadcast instruction available on AVX2. + if (VT.is256BitVector()) + Sc = Extract128BitVector(Sc, 0, DAG, dl); + return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); + } Ld = Sc.getOperand(0); ConstSplatVal = (Ld.getOpcode() == ISD::Constant || -- cgit v1.1 From b8720787015dc73d8a050b063366be6c3ad01946 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Wed, 4 Jul 2012 19:28:31 +0000 Subject: Ensure CopyToReg nodes are always glued to the call instruction. The CopyToReg nodes that set up the argument registers before a call must be glued to the call instruction. Otherwise, the scheduler may emit the physreg copies long before the call, causing long live ranges for the fixed registers. Besides disabling good register allocation, that can also expose problems when EmitInstrWithCustomInserter() splits a basic block during the live range of a physreg. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159721 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 52 ++++++++++---------------------------- 1 file changed, 13 insertions(+), 39 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ba66593..c8c5dae 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2304,27 +2304,12 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); - // Build a sequence of copy-to-reg nodes chained together with token chain - // and flag operands which copy the outgoing args into registers. - SDValue InFlag; - // Tail call byval lowering might overwrite argument registers so in case of - // tail call optimization the copies to registers are lowered later. - if (!isTailCall) - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, - RegsToPass[i].second, InFlag); - InFlag = Chain.getValue(1); - } - if (Subtarget->isPICStyleGOT()) { // ELF / PIC requires GOT in the EBX register before function calls via PLT // GOT pointer. if (!isTailCall) { - Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, - DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), - InFlag); - InFlag = Chain.getValue(1); + RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), + DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); } else { // If we are tail calling and generating PIC/GOT style code load the // address of the callee into ECX. The value in ecx is used as target of @@ -2362,12 +2347,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, assert((Subtarget->hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"); - Chain = DAG.getCopyToReg(Chain, dl, X86::AL, - DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); - InFlag = Chain.getValue(1); + RegsToPass.push_back(std::make_pair(unsigned(X86::AL), + DAG.getConstant(NumXMMRegs, MVT::i8))); } - // For tail calls lower the arguments to the 'real' stack slot. if (isTailCall) { // Force all the incoming stack arguments to be loaded from the stack @@ -2381,8 +2364,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVector MemOpChains2; SDValue FIN; int FI = 0; - // Do not flag preceding copytoreg stuff together with the following stuff. - InFlag = SDValue(); if (getTargetMachine().Options.GuaranteedTailCallOpt) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -2422,19 +2403,20 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains2[0], MemOpChains2.size()); - // Copy arguments to their registers. - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, - RegsToPass[i].second, InFlag); - InFlag = Chain.getValue(1); - } - InFlag =SDValue(); - // Store the return address to the appropriate stack slot. Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, FPDiff, dl); } + // Build a sequence of copy-to-reg nodes chained together with token chain + // and flag operands which copy the outgoing args into registers. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + if (getTargetMachine().getCodeModel() == CodeModel::Large) { assert(Is64Bit && "Large code model is only legal in 64-bit mode."); // In the 64-bit large code model, we have to make all calls @@ -2536,14 +2518,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Ops.push_back(DAG.getRegister(RegsToPass[i].first, RegsToPass[i].second.getValueType())); - // Add an implicit use GOT pointer in EBX. - if (!isTailCall && Subtarget->isPICStyleGOT()) - Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); - - // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. - if (Is64Bit && isVarArg && !IsWin64) - Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); - // Add a register mask operand representing the call-preserved registers. const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); -- cgit v1.1 From 85dccf18ea0e0b7258d1c5f186b616e022dbebf1 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Wed, 4 Jul 2012 23:53:27 +0000 Subject: Make X86 call and return instructions non-variadic. Function argument and return value registers aren't part of the encoding, so they should be implicit operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159728 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c8c5dae..4f07650 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -12344,8 +12344,9 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) .addReg(sizeVReg); BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) - .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI) + .addExternalSymbol("__morestack_allocate_stack_space") .addRegMask(RegMask) + .addReg(X86::RDI, RegState::Implicit) .addReg(X86::RAX, RegState::ImplicitDefine); } else { BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) -- cgit v1.1 From 2dd83eb1ab3b7d7cdef2e244317caefd78be8a45 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Tue, 10 Jul 2012 13:25:08 +0000 Subject: Improve the loading of load-anyext vectors by allowing the codegen to load multiple scalars and insert them into a vector. Next, we shuffle the elements into the correct places, as before. Also fix a small dagcombine bug in SimplifyBinOpWithSameOpcodeHands, when the migration of bitcasts happened too late in the SelectionDAG process. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159991 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 81 +++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 27 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4f07650..0df18cb0 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14425,7 +14425,8 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, - const X86Subtarget *Subtarget) { + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget *Subtarget) { LoadSDNode *Ld = cast(N); EVT RegVT = Ld->getValueType(0); EVT MemVT = Ld->getMemoryVT(); @@ -14447,47 +14448,73 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, unsigned RegSz = RegVT.getSizeInBits(); unsigned MemSz = MemVT.getSizeInBits(); assert(RegSz > MemSz && "Register size must be greater than the mem size"); - // All sizes must be a power of two - if (!isPowerOf2_32(RegSz * MemSz * NumElems)) return SDValue(); - // Attempt to load the original value using a single load op. - // Find a scalar type which is equal to the loaded word size. + // All sizes must be a power of two. + if (!isPowerOf2_32(RegSz * MemSz * NumElems)) + return SDValue(); + + // Attempt to load the original value using scalar loads. + // Find the largest scalar type that divides the total loaded size. MVT SclrLoadTy = MVT::i8; for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { MVT Tp = (MVT::SimpleValueType)tp; - if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() == MemSz) { + if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { SclrLoadTy = Tp; - break; } } - // Proceed if a load word is found. - if (SclrLoadTy.getSizeInBits() != MemSz) return SDValue(); + // Calculate the number of scalar loads that we need to perform + // in order to load our vector from memory. + unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); + // Represent our vector as a sequence of elements which are the + // largest scalar that we can load. EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, RegSz/SclrLoadTy.getSizeInBits()); + // Represent the data using the same element type that is stored in + // memory. In practice, we ''widen'' MemVT. EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), RegSz/MemVT.getScalarType().getSizeInBits()); - // Can't shuffle using an illegal type. - if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); - // Perform a single load. - SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), - Ld->getBasePtr(), - Ld->getPointerInfo(), Ld->isVolatile(), - Ld->isNonTemporal(), Ld->isInvariant(), - Ld->getAlignment()); + assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && + "Invalid vector type"); - // Insert the word loaded into a vector. - SDValue ScalarInVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, - LoadUnitVecVT, ScalarLoad); + // We can't shuffle using an illegal type. + if (!TLI.isTypeLegal(WideVecVT)) + return SDValue(); + + SmallVector Chains; + SDValue Ptr = Ld->getBasePtr(); + SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, + TLI.getPointerTy()); + SDValue Res = DAG.getUNDEF(LoadUnitVecVT); + + for (unsigned i = 0; i < NumLoads; ++i) { + // Perform a single load. + SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), + Ptr, Ld->getPointerInfo(), + Ld->isVolatile(), Ld->isNonTemporal(), + Ld->isInvariant(), Ld->getAlignment()); + Chains.push_back(ScalarLoad.getValue(1)); + // Create the first element type using SCALAR_TO_VECTOR in order to avoid + // another round of DAGCombining. + if (i == 0) + Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); + else + Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, + ScalarLoad, DAG.getIntPtrConstant(i)); + + Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); + } + + SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], + Chains.size()); // Bitcast the loaded value to a vector of the original element type, in // the size of the target vector type. - SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, - ScalarInVector); + SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); unsigned SizeRatio = RegSz/MemSz; // Redistribute the loaded elements into the different locations. @@ -14503,8 +14530,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); // Replace the original load with the new sequence // and return the new chain. - DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Shuff); - return SDValue(ScalarLoad.getNode(), 1); + return DCI.CombineTo(N, Shuff, TF, true); } return SDValue(); @@ -14574,8 +14600,9 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, for (unsigned i = 0; i != NumElems; ++i) ShuffleVec[i] = i * SizeRatio; - // Can't shuffle using an illegal type - if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); + // Can't shuffle using an illegal type. + if (!TLI.isTypeLegal(WideVecVT)) + return SDValue(); SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, DAG.getUNDEF(WideVecVT), @@ -15308,7 +15335,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); - case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget); + case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG); case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); -- cgit v1.1 From 5cd95e1478ddb8f3f1efde56a1cd2db47b312d72 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Wed, 11 Jul 2012 13:27:05 +0000 Subject: When ext-loading and trunc-storing vectors to memory, on x86 32bit systems, allow loads/stores of 64bit values from xmm registers. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160044 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0df18cb0..4dccd40 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14464,6 +14464,11 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, } } + // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. + if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && + (64 <= MemSz)) + SclrLoadTy = MVT::f64; + // Calculate the number of scalar loads that we need to perform // in order to load our vector from memory. unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); @@ -14615,13 +14620,18 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { MVT Tp = (MVT::SimpleValueType)tp; - if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz) + if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) StoreType = Tp; } + // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. + if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && + (64 <= NumElems * ToSz)) + StoreType = MVT::f64; + // Bitcast the original vector into a vector of store-size units EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), - StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); + StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); SmallVector Chains; -- cgit v1.1 From b9bee0499553358e64c34cfcbd32380ac7fb452e Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Thu, 12 Jul 2012 09:31:43 +0000 Subject: Add intrinsics for Ivy Bridge's rdrand instruction. The rdrand/cmov sequence is the same that is emitted by both GCC and ICC. Fixes PR13284. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160117 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4dccd40..c242aaa 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1176,6 +1176,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // We want to custom lower some of our intrinsics. setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't @@ -9810,6 +9811,38 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const } } +SDValue +X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { + DebugLoc dl = Op.getDebugLoc(); + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + switch (IntNo) { + default: return SDValue(); // Don't custom lower most intrinsics. + + // RDRAND intrinsics. + case Intrinsic::x86_rdrand_16: + case Intrinsic::x86_rdrand_32: + case Intrinsic::x86_rdrand_64: { + // Emit the node with the right value type. + SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, + DAG.getVTList(Op->getValueType(0), MVT::Glue)); + + // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise + // return the value from Rand, which is always 0, casted to i32. + SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), + DAG.getConstant(1, Op->getValueType(1)), + DAG.getConstant(X86::COND_B, MVT::i32), + SDValue(Result.getNode(), 1) }; + SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, + DAG.getVTList(Op->getValueType(1), MVT::Glue), + Ops, 4); + + // Return { result, isValid, chain }. + return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, + Op.getOperand(0)); + } + } +} + SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); @@ -10894,6 +10927,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::VAARG: return LowerVAARG(Op, DAG); case ISD::VACOPY: return LowerVACOPY(Op, DAG); case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::FRAME_TO_ARGS_OFFSET: @@ -11228,6 +11262,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; case X86ISD::SAHF: return "X86ISD::SAHF"; + case X86ISD::RDRAND: return "X86ISD::RDRAND"; } } -- cgit v1.1 From feae00a68e819e661ff6fddd15be703670247c10 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Thu, 12 Jul 2012 18:14:57 +0000 Subject: Give the rdrand instructions a SideEffect flag and a chain so MachineCSE and MachineLICM don't touch it. I already had the necessary things in place for IR-level passes but missed the machine passes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160137 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c242aaa..19cf5bf 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9823,8 +9823,8 @@ X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { case Intrinsic::x86_rdrand_32: case Intrinsic::x86_rdrand_64: { // Emit the node with the right value type. - SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, - DAG.getVTList(Op->getValueType(0), MVT::Glue)); + SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); + SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise // return the value from Rand, which is always 0, casted to i32. @@ -9838,7 +9838,7 @@ X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { // Return { result, isValid, chain }. return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, - Op.getOperand(0)); + SDValue(Result.getNode(), 2)); } } } -- cgit v1.1 From 65f489fd7d876c3e624938cd46d2475c7f365a8a Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Sat, 14 Jul 2012 22:26:05 +0000 Subject: AVX: Fix a bug in getTargetVShiftNode. The shift amount has to be a 128bit vector with the same element type as the input vector. This is needed because of the patterns we have for the VP[SLL/SRA/SRL][W/D/Q] instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160222 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 19cf5bf..f74a187 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9458,7 +9458,13 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, ShOps[2] = DAG.getUNDEF(MVT::i32); ShOps[3] = DAG.getUNDEF(MVT::i32); ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); - ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); + + // The return type has to be a 128-bit type with the same element + // type as the input type. + MVT EltVT = VT.getVectorElementType().getSimpleVT(); + EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); + + ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); } -- cgit v1.1 From d896e242993fd04c013dda8987c232cdcab63dd4 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Sun, 15 Jul 2012 20:27:43 +0000 Subject: Teach getTargetVShiftNode about TargetConstant nodes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160234 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f74a187..c55a1ef 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9433,12 +9433,15 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); if (isa(ShAmt)) { + // Constant may be a TargetConstant. Use a regular constant. + uint32_t ShiftAmt = cast(ShAmt)->getZExtValue(); switch (Opc) { default: llvm_unreachable("Unknown target vector shift node"); case X86ISD::VSHLI: case X86ISD::VSRLI: case X86ISD::VSRAI: - return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); + return DAG.getNode(Opc, dl, VT, SrcOp, + DAG.getConstant(ShiftAmt, MVT::i32)); } } -- cgit v1.1 From 98819c9d1e3b929e9ebab0e8cd3edb31aad21bd8 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Mon, 16 Jul 2012 19:35:43 +0000 Subject: For something like uint32_t hi(uint64_t res) { uint_32t hi = res >> 32; return !hi; } llvm IR looks like this: define i32 @hi(i64 %res) nounwind uwtable ssp { entry: %lnot = icmp ult i64 %res, 4294967296 %lnot.ext = zext i1 %lnot to i32 ret i32 %lnot.ext } The optimizer has optimize away the right shift and truncate but the resulting constant is too large to fit in the 32-bit immediate field. The resulting x86 code is worse as a result: movabsq $4294967296, %rax ## imm = 0x100000000 cmpq %rax, %rdi sbbl %eax, %eax andl $1, %eax This patch teaches the x86 lowering code to handle ult against a large immediate with trailing zeros. It will issue a right shift and a truncate followed by a comparison against a shifted immediate. shrq $32, %rdi testl %edi, %edi sete %al movzbl %al, %eax It also handles a ugt comparison against a large immediate with trailing bits set. i.e. X > 0x0ffffffff -> (X >> 32) >= 1 rdar://11866926 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160312 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 44 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c55a1ef..1d72aad 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3059,6 +3059,50 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_LE; } + if (SetCCOpcode == ISD::SETULT || SetCCOpcode == ISD::SETUGE) { + unsigned TrailZeros = RHSC->getAPIntValue().countTrailingZeros(); + if (TrailZeros >= 32) { + // The constant doesn't fit in cmp immediate field. Right shift LHS by + // the # of trailing zeros and truncate it to 32-bit. Then compare + // against shifted RHS. + assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!"); + DebugLoc dl = LHS.getDebugLoc(); + LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, + DAG.getNode(ISD::SRL, dl, MVT::i64, LHS, + DAG.getConstant(TrailZeros, MVT::i8))); + uint64_t C = RHSC->getZExtValue() >> TrailZeros; + + if (SetCCOpcode == ISD::SETULT) { + // X < 0x300000000 -> (X >> 32) < 3 + // X < 0x100000000 -> (X >> 32) == 0 + // X < 0x200000000 -> (X >> 33) == 0 + if (C == 1) { + RHS = DAG.getConstant(0, MVT::i32); + return X86::COND_E; + } + RHS = DAG.getConstant(C, MVT::i32); + return X86::COND_B; + } else /* SetCCOpcode == ISD::SETUGE */ { + // X >= 0x100000000 -> (X >> 32) >= 1 + RHS = DAG.getConstant(C, MVT::i32); + return X86::COND_AE; + } + } + } + if (SetCCOpcode == ISD::SETUGT) { + unsigned TrailOnes = RHSC->getAPIntValue().countTrailingOnes(); + if (TrailOnes >= 32 && !RHSC->isAllOnesValue()) { + assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!"); + DebugLoc dl = LHS.getDebugLoc(); + LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, + DAG.getNode(ISD::SRL, dl, MVT::i64, LHS, + DAG.getConstant(TrailOnes, MVT::i8))); + uint64_t C = (RHSC->getZExtValue()+1) >> TrailOnes; + // X > 0x0ffffffff -> (X >> 32) >= 1 + RHS = DAG.getConstant(C, MVT::i32); + return X86::COND_AE; + } + } } switch (SetCCOpcode) { -- cgit v1.1 From 70e10d3fe4c5df189348f64fce56254a5a32b51c Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 17 Jul 2012 06:53:39 +0000 Subject: This is another case where instcombine demanded bits optimization created large immediates. Add dag combine logic to recover in case the large immediates doesn't fit in cmp immediate operand field. int foo(unsigned long l) { return (l>> 47) == 1; } we produce %shr.mask = and i64 %l, -140737488355328 %cmp = icmp eq i64 %shr.mask, 140737488355328 %conv = zext i1 %cmp to i32 ret i32 %conv which codegens to movq $0xffff800000000000,%rax andq %rdi,%rax movq $0x0000800000000000,%rcx cmpq %rcx,%rax sete %al movzbl %al,%eax ret TargetLowering::SimplifySetCC would transform (X & -256) == 256 -> (X >> 8) == 1 if the immediate fails the isLegalICmpImmediate() test. For x86, that's immediates which are not a signed 32-bit immediate. Based on a patch by Eli Friedman. PR10328 rdar://9758774 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160346 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 1d72aad..347f197 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -11384,6 +11384,14 @@ bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { return true; } +bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { + return Imm == (int32_t)Imm; +} + +bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { + return Imm == (int32_t)Imm; +} + bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { if (!VT1.isInteger() || !VT2.isInteger()) return false; -- cgit v1.1 From f5c0539092996771824893309f311378e719e32e Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 17 Jul 2012 08:31:11 +0000 Subject: Implement r160312 as target indepedenet dag combine. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160354 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 44 -------------------------------------- 1 file changed, 44 deletions(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 347f197..4f642ec 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3059,50 +3059,6 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_LE; } - if (SetCCOpcode == ISD::SETULT || SetCCOpcode == ISD::SETUGE) { - unsigned TrailZeros = RHSC->getAPIntValue().countTrailingZeros(); - if (TrailZeros >= 32) { - // The constant doesn't fit in cmp immediate field. Right shift LHS by - // the # of trailing zeros and truncate it to 32-bit. Then compare - // against shifted RHS. - assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!"); - DebugLoc dl = LHS.getDebugLoc(); - LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, - DAG.getNode(ISD::SRL, dl, MVT::i64, LHS, - DAG.getConstant(TrailZeros, MVT::i8))); - uint64_t C = RHSC->getZExtValue() >> TrailZeros; - - if (SetCCOpcode == ISD::SETULT) { - // X < 0x300000000 -> (X >> 32) < 3 - // X < 0x100000000 -> (X >> 32) == 0 - // X < 0x200000000 -> (X >> 33) == 0 - if (C == 1) { - RHS = DAG.getConstant(0, MVT::i32); - return X86::COND_E; - } - RHS = DAG.getConstant(C, MVT::i32); - return X86::COND_B; - } else /* SetCCOpcode == ISD::SETUGE */ { - // X >= 0x100000000 -> (X >> 32) >= 1 - RHS = DAG.getConstant(C, MVT::i32); - return X86::COND_AE; - } - } - } - if (SetCCOpcode == ISD::SETUGT) { - unsigned TrailOnes = RHSC->getAPIntValue().countTrailingOnes(); - if (TrailOnes >= 32 && !RHSC->isAllOnesValue()) { - assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!"); - DebugLoc dl = LHS.getDebugLoc(); - LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, - DAG.getNode(ISD::SRL, dl, MVT::i64, LHS, - DAG.getConstant(TrailOnes, MVT::i8))); - uint64_t C = (RHSC->getZExtValue()+1) >> TrailOnes; - // X > 0x0ffffffff -> (X >> 32) >= 1 - RHS = DAG.getConstant(C, MVT::i32); - return X86::COND_AE; - } - } } switch (SetCCOpcode) { -- cgit v1.1 From a9e13ba3c8230073dd1157e4a17ef52906ac6cb8 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 17 Jul 2012 18:54:11 +0000 Subject: Back out r160101 and instead implement a dag combine to recover from instcombine transformation. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160387 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4f642ec..e9c60ae 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -11345,6 +11345,7 @@ bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { } bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { + // Can also use sub to handle negated immediates. return Imm == (int32_t)Imm; } -- cgit v1.1 From c8e41c591741b3da1077f7000274ad040bef8002 Mon Sep 17 00:00:00 2001 From: Sylvestre Ledru Date: Mon, 23 Jul 2012 08:51:15 +0000 Subject: Fix a typo (the the => the) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160621 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/Target/X86/X86ISelLowering.cpp') diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e9c60ae..4ccb0a3 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1673,7 +1673,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SDValue Val; // If this is a call to a function that returns an fp value on the floating - // point stack, we must guarantee the the value is popped from the stack, so + // point stack, we must guarantee the value is popped from the stack, so // a CopyFromReg is not good enough - the copy instruction may be eliminated // if the return value is not used. We use the FpPOP_RETVAL instruction // instead. -- cgit v1.1