aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/Mips/MipsISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Mips/MipsISelLowering.cpp')
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp447
1 files changed, 280 insertions, 167 deletions
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index ff2bfb3..9253b2e 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -70,7 +70,7 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
if (!isShiftedMask_64(I))
return false;
- Size = CountPopulation_64(I);
+ Size = countPopulation(I);
Pos = countTrailingZeros(I);
return true;
}
@@ -203,7 +203,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
const MipsSubtarget &STI)
- : TargetLowering(TM), Subtarget(STI) {
+ : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
@@ -215,12 +215,15 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
ZeroOrNegativeOneBooleanContent);
// Load extented operations for i1 types must be promoted
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ }
// MIPS doesn't have extending float->double load/store
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ for (MVT VT : MVT::fp_valuetypes())
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Used by legalize types to correctly generate the setcc result.
@@ -258,6 +261,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::LOAD, MVT::i64, Custom);
setOperationAction(ISD::STORE, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
}
if (!Subtarget.isGP64bit()) {
@@ -368,9 +374,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
if (Subtarget.isGP64bit()) {
- setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
setTruncStoreAction(MVT::i64, MVT::i32, Custom);
}
@@ -387,14 +393,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
- setMinStackArgumentAlignment(
- (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4);
+ setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? 8 : 4);
- setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64
- : Mips::SP);
+ setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
- setExceptionPointerRegister(Subtarget.isABI_N64() ? Mips::A0_64 : Mips::A0);
- setExceptionSelectorRegister(Subtarget.isABI_N64() ? Mips::A1_64 : Mips::A1);
+ setExceptionPointerRegister(ABI.IsN64() ? Mips::A0_64 : Mips::A0);
+ setExceptionSelectorRegister(ABI.IsN64() ? Mips::A1_64 : Mips::A1);
MaxStoresPerMemcpy = 16;
@@ -933,18 +937,35 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::DIVU:
case Mips::MOD:
case Mips::MODU:
- return insertDivByZeroTrap(
- MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), false);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false);
case Mips::PseudoDSDIV:
case Mips::PseudoDUDIV:
case Mips::DDIV:
case Mips::DDIVU:
case Mips::DMOD:
case Mips::DMODU:
- return insertDivByZeroTrap(
- MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), true);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true);
case Mips::SEL_D:
return emitSEL_D(MI, BB);
+
+ case Mips::PseudoSELECT_I:
+ case Mips::PseudoSELECT_I64:
+ case Mips::PseudoSELECT_S:
+ case Mips::PseudoSELECT_D32:
+ case Mips::PseudoSELECT_D64:
+ return emitPseudoSELECT(MI, BB, false, Mips::BNE);
+ case Mips::PseudoSELECTFP_F_I:
+ case Mips::PseudoSELECTFP_F_I64:
+ case Mips::PseudoSELECTFP_F_S:
+ case Mips::PseudoSELECTFP_F_D32:
+ case Mips::PseudoSELECTFP_F_D64:
+ return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
+ case Mips::PseudoSELECTFP_T_I:
+ case Mips::PseudoSELECTFP_T_I64:
+ case Mips::PseudoSELECTFP_T_S:
+ case Mips::PseudoSELECTFP_T_D32:
+ case Mips::PseudoSELECTFP_T_D64:
+ return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
}
}
@@ -959,8 +980,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, AND, NOR, ZERO, BEQ;
@@ -1043,8 +1063,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
unsigned SrcReg) const {
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
if (Subtarget.hasMips32r2() && Size == 1) {
@@ -1080,8 +1099,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1178,7 +1196,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
+ BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1201,7 +1220,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, DL, TII->get(Mips::SC), Success)
+ unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
+ BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1231,8 +1251,7 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ;
@@ -1314,8 +1333,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1412,7 +1430,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
+ BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::BNE))
@@ -1428,7 +1447,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, DL, TII->get(Mips::SC), Success)
+ unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
+ BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1450,10 +1470,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
- const TargetInstrInfo *TII =
- getTargetMachine().getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock::iterator II(MI);
@@ -1497,8 +1515,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
false, 0);
Chain = Addr.getValue(1);
- if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) ||
- Subtarget.isABI_N64()) {
+ if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) || ABI.IsN64()) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -1580,32 +1597,29 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = N->getGlobal();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64()) {
- const MipsTargetObjectFile &TLOF =
- (const MipsTargetObjectFile&)getObjFileLowering();
-
- if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine()))
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ const MipsTargetObjectFile *TLOF =
+ static_cast<const MipsTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
+ if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine()))
// %gp_rel relocation
- return getAddrGPRel(N, Ty, DAG);
+ return getAddrGPRel(N, SDLoc(N), Ty, DAG);
// %hi/%lo relocation
- return getAddrNonPIC(N, Ty, DAG);
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
}
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
if (LargeGOT)
- return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
+ return getAddrGlobalLargeGOT(N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16,
MipsII::MO_GOT_LO16, DAG.getEntryNode(),
MachinePointerInfo::getGOT());
- return getAddrGlobal(N, Ty, DAG,
- (Subtarget.isABI_N32() || Subtarget.isABI_N64())
- ? MipsII::MO_GOT_DISP
- : MipsII::MO_GOT16,
+ return getAddrGlobal(N, SDLoc(N), Ty, DAG,
+ (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP
+ : MipsII::MO_GOT16,
DAG.getEntryNode(), MachinePointerInfo::getGOT());
}
@@ -1614,12 +1628,10 @@ SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64())
- return getAddrNonPIC(N, Ty, DAG);
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::
@@ -1707,12 +1719,10 @@ lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64())
- return getAddrNonPIC(N, Ty, DAG);
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::
@@ -1721,20 +1731,19 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget.isABI_N64()) {
- const MipsTargetObjectFile &TLOF =
- (const MipsTargetObjectFile&)getObjFileLowering();
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ const MipsTargetObjectFile *TLOF =
+ static_cast<const MipsTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
- if (TLOF.IsConstantInSmallSection(N->getConstVal(), getTargetMachine()))
+ if (TLOF->IsConstantInSmallSection(N->getConstVal(), getTargetMachine()))
// %gp_rel relocation
- return getAddrGPRel(N, Ty, DAG);
+ return getAddrGPRel(N, SDLoc(N), Ty, DAG);
- return getAddrNonPIC(N, Ty, DAG);
+ return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
}
- return getAddrLocal(N, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
@@ -1760,8 +1769,7 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
unsigned Align = Node->getConstantOperandVal(3);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
- unsigned ArgSlotSizeInBytes =
- (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4;
+ unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
SDValue VAListLoad = DAG.getLoad(getPointerTy(), DL, Chain, VAListPtr,
MachinePointerInfo(SV), false, false, false,
@@ -1924,9 +1932,8 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
SDLoc DL(Op);
- SDValue FrameAddr =
- DAG.getCopyFromReg(DAG.getEntryNode(), DL,
- Subtarget.isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
+ SDValue FrameAddr = DAG.getCopyFromReg(
+ DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
@@ -1942,7 +1949,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT VT = Op.getSimpleValueType();
- unsigned RA = Subtarget.isABI_N64() ? Mips::RA_64 : Mips::RA;
+ unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
MFI->setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
@@ -1964,12 +1971,12 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc DL(Op);
- EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
+ EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
// EH_RETURN nodes, so that instructions are emitted back-to-back.
- unsigned OffsetReg = Subtarget.isABI_N64() ? Mips::V1_64 : Mips::V1;
- unsigned AddrReg = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
+ unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
@@ -1991,10 +1998,11 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
+
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
-
- // if shamt < 32:
+ // if shamt < (VT.bits):
// lo = (shl lo, shamt)
// hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
// else:
@@ -2002,18 +2010,17 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
// hi = (shl lo, shamt[4:0])
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
DAG.getConstant(-1, MVT::i32));
- SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
- DAG.getConstant(1, MVT::i32));
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
- Not);
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
- SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
+ DAG.getConstant(1, VT));
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
DAG.getConstant(0x20, MVT::i32));
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
- DAG.getConstant(0, MVT::i32), ShiftLeftLo);
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
+ DAG.getConstant(0, VT), ShiftLeftLo);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
SDValue Ops[2] = {Lo, Hi};
return DAG.getMergeValues(Ops, DL);
@@ -2024,8 +2031,9 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
- // if shamt < 32:
+ // if shamt < (VT.bits):
// lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
// if isSRA:
// hi = (sra hi, shamt)
@@ -2040,21 +2048,19 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
// hi = 0
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
DAG.getConstant(-1, MVT::i32));
- SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
- DAG.getConstant(1, MVT::i32));
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
- SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
- Hi, Shamt);
+ SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
+ DAG.getConstant(1, VT));
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
+ DL, VT, Hi, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
DAG.getConstant(0x20, MVT::i32));
- SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
- DAG.getConstant(31, MVT::i32));
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
- IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
- ShiftRightHi);
+ SDValue Shift31 = DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(31, VT));
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
+ IsSRA ? Shift31 : DAG.getConstant(0, VT), ShiftRightHi);
SDValue Ops[2] = {Lo, Hi};
return DAG.getMergeValues(Ops, DL);
@@ -2266,9 +2272,9 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, const MCPhysReg *F64Regs) {
-
- static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
+ CCState &State, ArrayRef<MCPhysReg> F64Regs) {
+ const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
+ State.getMachineFunction().getSubtarget());
static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
@@ -2278,6 +2284,19 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
return true;
// Promote i8 and i16
+ if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
+ if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
+ LocVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExtUpper;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExtUpper;
+ else
+ LocInfo = CCValAssign::AExtUpper;
+ }
+ }
+
+ // Promote i8 and i16
if (LocVT == MVT::i8 || LocVT == MVT::i16) {
LocVT = MVT::i32;
if (ArgFlags.isSExt())
@@ -2293,39 +2312,39 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
// f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
// is true: function is vararg, argument is 3rd or higher, there is previous
// argument which is not f32 or f64.
- bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1
- || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo;
+ bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
+ State.getFirstUnallocated(F32Regs) != ValNo;
unsigned OrigAlign = ArgFlags.getOrigAlign();
bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
// If this is the first part of an i64 arg,
// the allocated register must be either A0 or A2.
if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
LocVT = MVT::i32;
} else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
// Allocate int register and shadow next int register. If first
// available register is Mips::A1 or Mips::A3, shadow it too.
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
if (Reg == Mips::A1 || Reg == Mips::A3)
- Reg = State.AllocateReg(IntRegs, IntRegsSize);
- State.AllocateReg(IntRegs, IntRegsSize);
+ Reg = State.AllocateReg(IntRegs);
+ State.AllocateReg(IntRegs);
LocVT = MVT::i32;
} else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
// we are guaranteed to find an available float register
if (ValVT == MVT::f32) {
- Reg = State.AllocateReg(F32Regs, FloatRegsSize);
+ Reg = State.AllocateReg(F32Regs);
// Shadow int register
- State.AllocateReg(IntRegs, IntRegsSize);
+ State.AllocateReg(IntRegs);
} else {
- Reg = State.AllocateReg(F64Regs, FloatRegsSize);
+ Reg = State.AllocateReg(F64Regs);
// Shadow int registers
- unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize);
+ unsigned Reg2 = State.AllocateReg(IntRegs);
if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
- State.AllocateReg(IntRegs, IntRegsSize);
- State.AllocateReg(IntRegs, IntRegsSize);
+ State.AllocateReg(IntRegs);
+ State.AllocateReg(IntRegs);
}
} else
llvm_unreachable("Cannot handle this ValVT.");
@@ -2407,8 +2426,8 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// used for the function (that is, Mips linker doesn't generate lazy binding
// stub for a function whose address is taken in the program).
if (IsPICCall && !InternalLinkage && IsCallReloc) {
- unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP;
- EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
+ unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
+ EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
}
@@ -2431,8 +2450,7 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
if (Subtarget.inMips16HardFloat()) {
@@ -2468,7 +2486,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
@@ -2480,7 +2498,6 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Allocate the reserved argument area. It seems strange to do this from the
// caller side but removing it breaks the frame size calculation.
- const MipsABIInfo &ABI = Subtarget.getABI();
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(), Callee.getNode());
@@ -2511,8 +2528,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
SDValue StackPtr = DAG.getCopyFromReg(
- Chain, DL, Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP,
- getPointerTy());
+ Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP, getPointerTy());
// With EABI is it possible to have 16 args on registers.
std::deque< std::pair<unsigned, SDValue> > RegsToPass;
@@ -2626,9 +2642,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- bool IsPICCall =
- (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to
- // jalr $25
+ bool IsPICCall = (ABI.IsN64() || IsPIC); // true if calls are translated to
+ // jalr $25
bool GlobalOrExternal = false, InternalLinkage = false, IsCallReloc = false;
SDValue CalleeLo;
EVT Ty = Callee.getValueType();
@@ -2639,15 +2654,14 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InternalLinkage = Val->hasInternalLinkage();
if (InternalLinkage)
- Callee = getAddrLocal(G, Ty, DAG,
- Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
else if (LargeGOT) {
- Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
+ Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Val));
IsCallReloc = true;
} else {
- Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Val));
IsCallReloc = true;
}
@@ -2659,16 +2673,16 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
- if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static
- Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
- MipsII::MO_NO_FLAG);
+ if (!ABI.IsN64() && !IsPIC) // !N64 && static
+ Callee =
+ DAG.getTargetExternalSymbol(Sym, getPointerTy(), MipsII::MO_NO_FLAG);
else if (LargeGOT) {
- Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
+ Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Sym));
IsCallReloc = true;
} else { // N64 || PIC
- Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Sym));
IsCallReloc = true;
}
@@ -2844,7 +2858,6 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- const MipsABIInfo &ABI = Subtarget.getABI();
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
@@ -2858,13 +2871,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
- CurArgIdx = Ins[i].OrigArgIndex;
+ if (Ins[i].isOrigArg()) {
+ std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
+ CurArgIdx = Ins[i].getOrigArgIndex();
+ }
EVT ValVT = VA.getValVT();
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool IsRegLoc = VA.isRegLoc();
if (Flags.isByVal()) {
+ assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
unsigned FirstByValReg, LastByValReg;
unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
@@ -2897,7 +2913,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
(RegVT == MVT::i64 && ValVT == MVT::f64) ||
(RegVT == MVT::f64 && ValVT == MVT::i64))
ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
- else if (Subtarget.isABI_O32() && RegVT == MVT::i32 &&
+ else if (ABI.IsO32() && RegVT == MVT::i32 &&
ValVT == MVT::f64) {
unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
getNextIntArgReg(ArgReg), RC);
@@ -2912,7 +2928,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
} else { // VA.isRegLoc()
MVT LocVT = VA.getLocVT();
- if (Subtarget.isABI_O32()) {
+ if (ABI.IsO32()) {
// We ought to be able to use LocVT directly but O32 sets it to i32
// when allocating floating point values to integer registers.
// This shouldn't influence how we load the value into registers unless
@@ -2949,7 +2965,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(
- getRegClassFor(Subtarget.isABI_N64() ? MVT::i64 : MVT::i32));
+ getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
MipsFI->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
@@ -3066,7 +3082,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
- unsigned V0 = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
Flag = Chain.getValue(1);
@@ -3201,7 +3217,7 @@ parsePhysicalReg(StringRef C, std::string &Prefix,
std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
const TargetRegisterInfo *TRI =
- getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ Subtarget.getRegisterInfo();
const TargetRegisterClass *RC;
std::string Prefix;
unsigned long long Reg;
@@ -3275,9 +3291,10 @@ parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
/// Given a register class constraint, like 'r', if this corresponds directly
/// to an LLVM register class, return a register of 0 and the register class
/// pointer.
-std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
-{
+std::pair<unsigned, const TargetRegisterClass *>
+MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
@@ -3333,7 +3350,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
if (R.second)
return R;
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
@@ -3477,7 +3494,7 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
unsigned MipsTargetLowering::getJumpTableEncoding() const {
- if (Subtarget.isABI_N64())
+ if (ABI.IsN64())
return MachineJumpTableInfo::EK_GPRel64BlockAddress;
return TargetLowering::getJumpTableEncoding();
@@ -3495,7 +3512,6 @@ void MipsTargetLowering::copyByValRegs(
unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
int FrameObjOffset;
- const MipsABIInfo &ABI = Subtarget.getABI();
ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
if (RegAreaSize)
@@ -3547,7 +3563,7 @@ void MipsTargetLowering::passByValArg(
unsigned NumRegs = LastReg - FirstReg;
if (NumRegs) {
- const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetByValArgRegs();
+ const ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
unsigned I = 0;
@@ -3630,8 +3646,8 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
SDValue Chain, SDLoc DL,
SelectionDAG &DAG,
CCState &State) const {
- const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetVarArgRegs();
- unsigned Idx = State.getFirstUnallocated(ArgRegs.data(), ArgRegs.size());
+ const ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
+ unsigned Idx = State.getFirstUnallocated(ArgRegs);
unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
const TargetRegisterClass *RC = getRegClassFor(RegTy);
@@ -3646,7 +3662,6 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
VaArgOffset =
RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
else {
- const MipsABIInfo &ABI = Subtarget.getABI();
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
(int)(RegSizeInBytes * (ArgRegs.size() - Idx));
@@ -3677,8 +3692,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
unsigned Align) const {
- MachineFunction &MF = State->getMachineFunction();
- const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
+ const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
assert(Size && "Byval argument's size shouldn't be 0.");
@@ -3689,10 +3703,10 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
if (State->getCallingConv() != CallingConv::Fast) {
unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
- const ArrayRef<MCPhysReg> IntArgRegs = Subtarget.getABI().GetByValArgRegs();
+ const ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
// FIXME: The O32 case actually describes no shadow registers.
const MCPhysReg *ShadowRegs =
- Subtarget.isABI_O32() ? IntArgRegs.data() : Mips64DPRegs;
+ ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
// We used to check the size as well but we can't do that anymore since
// CCState::HandleByVal() rounds up the size after calling this function.
@@ -3700,7 +3714,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
"Byval argument's alignment should be a multiple of"
"RegSizeInBytes.");
- FirstReg = State->getFirstUnallocated(IntArgRegs.data(), IntArgRegs.size());
+ FirstReg = State->getFirstUnallocated(IntArgRegs);
// If Align > RegSizeInBytes, the first arg register must be even.
// FIXME: This condition happens to do the right thing but it's not the
@@ -3720,3 +3734,102 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
}
+
+MachineBasicBlock *
+MipsTargetLowering::emitPseudoSELECT(MachineInstr *MI, MachineBasicBlock *BB,
+ bool isFPCmp, unsigned Opc) const {
+ assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
+ "Subtarget already supports SELECT nodes with the use of"
+ "conditional-move instructions.");
+
+ const TargetInstrInfo *TII =
+ Subtarget.getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ // To "insert" a SELECT instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = BB;
+ ++It;
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // setcc r1, r2, r3
+ // bNE r1, r0, copy1MBB
+ // fallthrough --> copy0MBB
+ MachineBasicBlock *thisMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, copy0MBB);
+ F->insert(It, sinkMBB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Next, add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
+
+ if (isFPCmp) {
+ // bc1[tf] cc, sinkMBB
+ BuildMI(BB, DL, TII->get(Opc))
+ .addReg(MI->getOperand(1).getReg())
+ .addMBB(sinkMBB);
+ } else {
+ // bne rs, $0, sinkMBB
+ BuildMI(BB, DL, TII->get(Opc))
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(Mips::ZERO)
+ .addMBB(sinkMBB);
+ }
+
+ // copy0MBB:
+ // %FalseValue = ...
+ // # fallthrough to sinkMBB
+ BB = copy0MBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
+ // ...
+ BB = sinkMBB;
+
+ BuildMI(*BB, BB->begin(), DL,
+ TII->get(Mips::PHI), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB)
+ .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+
+ return BB;
+}
+
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned MipsTargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ // Named registers is expected to be fairly rare. For now, just support $28
+ // since the linux kernel uses it.
+ if (Subtarget.isGP64bit()) {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("$28", Mips::GP_64)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ } else {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("$28", Mips::GP)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ }
+ report_fatal_error("Invalid register name global variable");
+}