diff options
Diffstat (limited to 'lib/Target/X86/X86FastISel.cpp')
-rw-r--r-- | lib/Target/X86/X86FastISel.cpp | 99 |
1 files changed, 54 insertions, 45 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 69752c5..585b7a5 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -183,37 +183,37 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, case MVT::i1: case MVT::i8: Opc = X86::MOV8rm; - RC = X86::GR8RegisterClass; + RC = &X86::GR8RegClass; break; case MVT::i16: Opc = X86::MOV16rm; - RC = X86::GR16RegisterClass; + RC = &X86::GR16RegClass; break; case MVT::i32: Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; break; case MVT::i64: // Must be in x86-64 mode. Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; } else { Opc = X86::LD_Fp32m; - RC = X86::RFP32RegisterClass; + RC = &X86::RFP32RegClass; } break; case MVT::f64: if (X86ScalarSSEf64) { Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; } else { Opc = X86::LD_Fp64m; - RC = X86::RFP64RegisterClass; + RC = &X86::RFP64RegClass; } break; case MVT::f80: @@ -240,7 +240,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { default: return false; case MVT::i1: { // Mask out all but lowest bit. - unsigned AndResult = createResultReg(X86::GR8RegisterClass); + unsigned AndResult = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; @@ -547,13 +547,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { if (TLI.getPointerTy() == MVT::i64) { Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; if (Subtarget->isPICStyleRIPRel()) StubAM.Base.Reg = X86::RIP; } else { Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; } LoadReg = createResultReg(RC); @@ -743,7 +743,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + I->getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); const Value *RV = Ret->getOperand(0); @@ -1258,7 +1258,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { if (V->getType()->isFloatTy()) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(X86::FR64RegisterClass); + unsigned ResultReg = createResultReg(&X86::FR64RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::CVTSS2SDrr), ResultReg) .addReg(OpReg); @@ -1277,7 +1277,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { if (V->getType()->isDoubleTy()) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(X86::FR32RegisterClass); + unsigned ResultReg = createResultReg(&X86::FR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::CVTSD2SSrr), ResultReg) .addReg(OpReg); @@ -1314,8 +1314,9 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { if (!Subtarget->is64Bit()) { // If we're on x86-32; we can't extract an i8 from a general register. // First issue a copy to GR16_ABCD or GR32_ABCD. - const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) - ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; + const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? + (const TargetRegisterClass*)&X86::GR16_ABCDRegClass : + (const TargetRegisterClass*)&X86::GR32_ABCDRegClass; unsigned CopyReg = createResultReg(CopyRC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg); @@ -1423,7 +1424,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return DoSelectCall(&I, "memset"); } case Intrinsic::stackprotector: { - // Emit code inline code to store the stack guard onto the stack. + // Emit code to store the stack guard onto the stack. EVT PtrTy = TLI.getPointerTy(); const Value *Op1 = I.getArgOperand(0); // The guard's value. @@ -1484,7 +1485,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; // The call to CreateRegs builds two sequential registers, to store the - // both the the returned values. + // both the returned values. unsigned ResultReg = FuncInfo.CreateRegs(I.getType()); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) .addReg(Reg1).addReg(Reg2); @@ -1548,12 +1549,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Check whether the function can return without sret-demotion. SmallVector<ISD::OutputArg, 4> Outs; - SmallVector<uint64_t, 4> Offsets; GetReturnInfo(I->getType(), CS.getAttributes().getRetAttributes(), - Outs, TLI, &Offsets); + Outs, TLI); bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), - *FuncInfo.MF, FTy->isVarArg(), - Outs, FTy->getContext()); + *FuncInfo.MF, FTy->isVarArg(), + Outs, FTy->getContext()); if (!CanLowerReturn) return false; @@ -1667,7 +1667,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, - I->getParent()->getContext()); + I->getParent()->getContext()); // Allocate shadow area for Win64 if (Subtarget->isTargetWin64()) @@ -1693,7 +1693,6 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Promote the value if needed. switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: { assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && @@ -1737,6 +1736,14 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { ArgVT = VA.getLocVT(); break; } + case CCValAssign::VExt: + // VExt has not been implemented, so this should be impossible to reach + // for now. However, fallback to Selection DAG isel once implemented. + return false; + case CCValAssign::Indirect: + // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully + // support this. + return false; } if (VA.isRegLoc()) { @@ -1838,25 +1845,27 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { MIB.addGlobalAddress(GV, 0, OpFlags); } + // Add a register mask with the call-preserved registers. + // Proper defs for return values will be added by setPhysRegsDeadExcept(). + MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); + // Add an implicit use GOT pointer in EBX. if (Subtarget->isPICStyleGOT()) - MIB.addReg(X86::EBX); + MIB.addReg(X86::EBX, RegState::Implicit); if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) - MIB.addReg(X86::AL); + MIB.addReg(X86::AL, RegState::Implicit); // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); - - // Add a register mask with the call-preserved registers. - // Proper defs for return values will be added by setPhysRegsDeadExcept(). - MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); + MIB.addReg(RegArgs[i], RegState::Implicit); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); unsigned NumBytesCallee = 0; if (!Subtarget->is64Bit() && !Subtarget->isTargetWindows() && + !(CS.getCallingConv() == CallingConv::Fast || + CS.getCallingConv() == CallingConv::GHC) && CS.paramHasAttr(1, Attribute::StructRet)) NumBytesCallee = 4; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) @@ -1889,7 +1898,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { SmallVector<unsigned, 4> UsedRegs; SmallVector<CCValAssign, 16> RVLocs; CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs, - I->getParent()->getContext()); + I->getParent()->getContext()); unsigned ResultReg = FuncInfo.CreateRegs(I->getType()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -1903,7 +1912,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { RVLocs[i].getLocReg() == X86::ST1)) { if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { CopyVT = MVT::f80; - CopyReg = createResultReg(X86::RFP80RegisterClass); + CopyReg = createResultReg(&X86::RFP80RegClass); } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL), CopyReg); @@ -2001,37 +2010,37 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { default: return false; case MVT::i8: Opc = X86::MOV8rm; - RC = X86::GR8RegisterClass; + RC = &X86::GR8RegClass; break; case MVT::i16: Opc = X86::MOV16rm; - RC = X86::GR16RegisterClass; + RC = &X86::GR16RegClass; break; case MVT::i32: Opc = X86::MOV32rm; - RC = X86::GR32RegisterClass; + RC = &X86::GR32RegClass; break; case MVT::i64: // Must be in x86-64 mode. Opc = X86::MOV64rm; - RC = X86::GR64RegisterClass; + RC = &X86::GR64RegClass; break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; } else { Opc = X86::LD_Fp32m; - RC = X86::RFP32RegisterClass; + RC = &X86::RFP32RegClass; } break; case MVT::f64: if (X86ScalarSSEf64) { Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; } else { Opc = X86::LD_Fp64m; - RC = X86::RFP64RegisterClass; + RC = &X86::RFP64RegClass; } break; case MVT::f80: @@ -2124,19 +2133,19 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { case MVT::f32: if (X86ScalarSSEf32) { Opc = X86::FsFLD0SS; - RC = X86::FR32RegisterClass; + RC = &X86::FR32RegClass; } else { Opc = X86::LD_Fp032; - RC = X86::RFP32RegisterClass; + RC = &X86::RFP32RegClass; } break; case MVT::f64: if (X86ScalarSSEf64) { Opc = X86::FsFLD0SD; - RC = X86::FR64RegisterClass; + RC = &X86::FR64RegClass; } else { Opc = X86::LD_Fp064; - RC = X86::RFP64RegisterClass; + RC = &X86::RFP64RegClass; } break; case MVT::f80: |