aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/Mips/MipsISelLowering.cpp
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-12-01 14:51:49 -0800
committerStephen Hines <srhines@google.com>2014-12-02 16:08:10 -0800
commit37ed9c199ca639565f6ce88105f9e39e898d82d0 (patch)
tree8fb36d3910e3ee4c4e1b7422f4f017108efc52f5 /lib/Target/Mips/MipsISelLowering.cpp
parentd2327b22152ced7bc46dc629fc908959e8a52d03 (diff)
downloadexternal_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.zip
external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.gz
external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.bz2
Update aosp/master LLVM for rebase to r222494.
Change-Id: Ic787f5e0124df789bd26f3f24680f45e678eef2d
Diffstat (limited to 'lib/Target/Mips/MipsISelLowering.cpp')
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp1079
1 files changed, 544 insertions, 535 deletions
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index b7af2d4..ff2bfb3 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -14,6 +14,7 @@
#include "MipsISelLowering.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MipsCCState.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
@@ -24,6 +25,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -56,15 +58,6 @@ EnableMipsFastISel("mips-fast-isel", cl::Hidden,
cl::desc("Allow mips-fast-isel to be used"),
cl::init(false));
-static const MCPhysReg O32IntRegs[4] = {
- Mips::A0, Mips::A1, Mips::A2, Mips::A3
-};
-
-static const MCPhysReg Mips64IntRegs[8] = {
- Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
- Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64
-};
-
static const MCPhysReg Mips64DPRegs[8] = {
Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
@@ -208,16 +201,16 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
-MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
- : TargetLowering(TM, new MipsTargetObjectFile()),
- Subtarget(&TM.getSubtarget<MipsSubtarget>()) {
+MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
+ const MipsSubtarget &STI)
+ : TargetLowering(TM), Subtarget(STI) {
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
// does. Integer booleans still use 0 and 1.
- if (Subtarget->hasMips32r6())
+ if (Subtarget.hasMips32r6())
setBooleanContents(ZeroOrOneBooleanContent,
ZeroOrNegativeOneBooleanContent);
@@ -251,12 +244,11 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::f32, Custom);
setOperationAction(ISD::SETCC, MVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- if (Subtarget->isGP64bit()) {
+ if (Subtarget.isGP64bit()) {
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
@@ -268,14 +260,14 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
}
- if (!Subtarget->isGP64bit()) {
+ if (!Subtarget.isGP64bit()) {
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
setOperationAction(ISD::ADD, MVT::i32, Custom);
- if (Subtarget->isGP64bit())
+ if (Subtarget.isGP64bit())
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SDIV, MVT::i32, Expand);
@@ -299,7 +291,7 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- if (Subtarget->hasCnMips()) {
+ if (Subtarget.hasCnMips()) {
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
setOperationAction(ISD::CTPOP, MVT::i64, Legal);
} else {
@@ -317,10 +309,10 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- if (!Subtarget->hasMips32r2())
+ if (!Subtarget.hasMips32r2())
setOperationAction(ISD::ROTR, MVT::i32, Expand);
- if (!Subtarget->hasMips64r2())
+ if (!Subtarget.hasMips64r2())
setOperationAction(ISD::ROTR, MVT::i64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
@@ -343,7 +335,8 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
@@ -358,23 +351,23 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setInsertFencesForAtomic(true);
- if (!Subtarget->hasMips32r2()) {
+ if (!Subtarget.hasMips32r2()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
}
// MIPS16 lacks MIPS32's clz and clo instructions.
- if (!Subtarget->hasMips32() || Subtarget->inMips16Mode())
+ if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
- if (!Subtarget->hasMips64())
+ if (!Subtarget.hasMips64())
setOperationAction(ISD::CTLZ, MVT::i64, Expand);
- if (!Subtarget->hasMips32r2())
+ if (!Subtarget.hasMips32r2())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- if (!Subtarget->hasMips64r2())
+ if (!Subtarget.hasMips64r2())
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
- if (Subtarget->isGP64bit()) {
+ if (Subtarget.isGP64bit()) {
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
@@ -390,24 +383,30 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::ADD);
- setMinFunctionAlignment(Subtarget->isGP64bit() ? 3 : 2);
+ setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
+
+ // The arguments on the stack are defined in terms of 4-byte slots on O32
+ // and 8-byte slots on N32/N64.
+ setMinStackArgumentAlignment(
+ (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4);
- setStackPointerRegisterToSaveRestore(Subtarget->isABI_N64() ? Mips::SP_64
- : Mips::SP);
+ setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64
+ : Mips::SP);
- setExceptionPointerRegister(Subtarget->isABI_N64() ? Mips::A0_64 : Mips::A0);
- setExceptionSelectorRegister(Subtarget->isABI_N64() ? Mips::A1_64 : Mips::A1);
+ setExceptionPointerRegister(Subtarget.isABI_N64() ? Mips::A0_64 : Mips::A0);
+ setExceptionSelectorRegister(Subtarget.isABI_N64() ? Mips::A1_64 : Mips::A1);
MaxStoresPerMemcpy = 16;
- isMicroMips = Subtarget->inMicroMipsMode();
+ isMicroMips = Subtarget.inMicroMipsMode();
}
-const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM) {
- if (TM.getSubtargetImpl()->inMips16Mode())
- return llvm::createMips16TargetLowering(TM);
+const MipsTargetLowering *MipsTargetLowering::create(const MipsTargetMachine &TM,
+ const MipsSubtarget &STI) {
+ if (STI.inMips16Mode())
+ return llvm::createMips16TargetLowering(TM, STI);
- return llvm::createMipsSETargetLowering(TM);
+ return llvm::createMipsSETargetLowering(TM, STI);
}
// Create a fast isel object.
@@ -427,7 +426,7 @@ EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -537,7 +536,7 @@ static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -616,11 +615,11 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// Pattern match EXT.
// $dst = and ((sra or srl) $src , pos), (2**size - 1)
// => ext $dst, $src, size, pos
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
return SDValue();
SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
@@ -656,12 +655,12 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// Pattern match INS.
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
// => ins $dst, $src, size, pos, $src1
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
return SDValue();
SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
@@ -710,7 +709,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
if (DCI.isBeforeLegalizeOps())
@@ -791,6 +790,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG);
case ISD::SETCC: return lowerSETCC(Op, DAG);
case ISD::VASTART: return lowerVASTART(Op, DAG);
+ case ISD::VAARG: return lowerVAARG(Op, DAG);
case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
@@ -933,16 +933,16 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::DIVU:
case Mips::MOD:
case Mips::MODU:
- return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
- false);
+ return insertDivByZeroTrap(
+ MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), false);
case Mips::PseudoDSDIV:
case Mips::PseudoDUDIV:
case Mips::DDIV:
case Mips::DDIVU:
case Mips::DMOD:
case Mips::DMODU:
- return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
- true);
+ return insertDivByZeroTrap(
+ MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), true);
case Mips::SEL_D:
return emitSEL_D(MI, BB);
}
@@ -959,7 +959,8 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, AND, NOR, ZERO, BEQ;
@@ -968,16 +969,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
LL = Mips::LL_MM;
SC = Mips::SC_MM;
} else {
- LL = Subtarget->hasMips32r6() ? Mips::LL : Mips::LL_R6;
- SC = Subtarget->hasMips32r6() ? Mips::SC : Mips::SC_R6;
+ LL = Subtarget.hasMips32r6() ? Mips::LL_R6 : Mips::LL;
+ SC = Subtarget.hasMips32r6() ? Mips::SC_R6 : Mips::SC;
}
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
BEQ = Mips::BEQ;
} else {
- LL = Subtarget->hasMips64r6() ? Mips::LLD : Mips::LLD_R6;
- SC = Subtarget->hasMips64r6() ? Mips::SCD : Mips::SCD_R6;
+ LL = Subtarget.hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
+ SC = Subtarget.hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
AND = Mips::AND64;
NOR = Mips::NOR64;
ZERO = Mips::ZERO_64;
@@ -1042,15 +1043,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
unsigned SrcReg) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- if (Subtarget->hasMips32r2() && Size == 1) {
+ if (Subtarget.hasMips32r2() && Size == 1) {
BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
return BB;
}
- if (Subtarget->hasMips32r2() && Size == 2) {
+ if (Subtarget.hasMips32r2() && Size == 2) {
BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
return BB;
}
@@ -1078,7 +1080,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1140,7 +1143,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- if (Subtarget->isLittle()) {
+ if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
unsigned Off = RegInfo.createVirtualRegister(RC);
@@ -1228,7 +1231,8 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ;
@@ -1310,7 +1314,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
@@ -1380,7 +1385,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- if (Subtarget->isLittle()) {
+ if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
unsigned Off = RegInfo.createVirtualRegister(RC);
@@ -1445,8 +1450,10 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock::iterator II(MI);
@@ -1487,11 +1494,11 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
Addr = DAG.getExtLoad(ISD::SEXTLOAD, DL, PTy, Chain, Addr,
MachinePointerInfo::getJumpTable(), MemVT, false, false,
- 0);
+ false, 0);
Chain = Addr.getValue(1);
if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) ||
- Subtarget->isABI_N64()) {
+ Subtarget.isABI_N64()) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -1509,7 +1516,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(2);
SDLoc DL(Op);
- assert(!Subtarget->hasMips32r6() && !Subtarget->hasMips64r6());
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
// Return if flag is not set by a floating point comparison.
@@ -1529,7 +1536,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::
lowerSELECT(SDValue Op, SelectionDAG &DAG) const
{
- assert(!Subtarget->hasMips32r6() && !Subtarget->hasMips64r6());
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
// Return if flag is not set by a floating point comparison.
@@ -1555,7 +1562,7 @@ lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
}
SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- assert(!Subtarget->hasMips32r6() && !Subtarget->hasMips64r6());
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue Cond = createFPCmp(DAG, Op);
assert(Cond.getOpcode() == MipsISD::FPCmp &&
@@ -1569,26 +1576,18 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
- // FIXME there isn't actually debug info here
- SDLoc DL(Op);
EVT Ty = Op.getValueType();
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = N->getGlobal();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget->isABI_N64()) {
+ !Subtarget.isABI_N64()) {
const MipsTargetObjectFile &TLOF =
(const MipsTargetObjectFile&)getObjFileLowering();
- // %gp_rel relocation
- if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
- SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
- MipsII::MO_GPREL);
- SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, DL,
- DAG.getVTList(MVT::i32), GA);
- SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32);
- return DAG.getNode(ISD::ADD, DL, MVT::i32, GPReg, GPRelNode);
- }
+ if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine()))
+ // %gp_rel relocation
+ return getAddrGPRel(N, Ty, DAG);
// %hi/%lo relocation
return getAddrNonPIC(N, Ty, DAG);
@@ -1596,7 +1595,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
return getAddrLocal(N, Ty, DAG,
- Subtarget->isABI_N32() || Subtarget->isABI_N64());
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
if (LargeGOT)
return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
@@ -1604,7 +1603,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
MachinePointerInfo::getGOT());
return getAddrGlobal(N, Ty, DAG,
- (Subtarget->isABI_N32() || Subtarget->isABI_N64())
+ (Subtarget.isABI_N32() || Subtarget.isABI_N64())
? MipsII::MO_GOT_DISP
: MipsII::MO_GOT16,
DAG.getEntryNode(), MachinePointerInfo::getGOT());
@@ -1616,11 +1615,11 @@ SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
EVT Ty = Op.getValueType();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget->isABI_N64())
+ !Subtarget.isABI_N64())
return getAddrNonPIC(N, Ty, DAG);
return getAddrLocal(N, Ty, DAG,
- Subtarget->isABI_N32() || Subtarget->isABI_N64());
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::
@@ -1709,34 +1708,33 @@ lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
EVT Ty = Op.getValueType();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget->isABI_N64())
+ !Subtarget.isABI_N64())
return getAddrNonPIC(N, Ty, DAG);
return getAddrLocal(N, Ty, DAG,
- Subtarget->isABI_N32() || Subtarget->isABI_N64());
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::
lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
{
- // gp_rel relocation
- // FIXME: we should reference the constant pool using small data sections,
- // but the asm printer currently doesn't support this feature without
- // hacking it. This feature should come soon so we can uncomment the
- // stuff below.
- //if (IsInSmallSection(C->getType())) {
- // SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
- // SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
- // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
EVT Ty = Op.getValueType();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
- !Subtarget->isABI_N64())
+ !Subtarget.isABI_N64()) {
+ const MipsTargetObjectFile &TLOF =
+ (const MipsTargetObjectFile&)getObjFileLowering();
+
+ if (TLOF.IsConstantInSmallSection(N->getConstVal(), getTargetMachine()))
+ // %gp_rel relocation
+ return getAddrGPRel(N, Ty, DAG);
+
return getAddrNonPIC(N, Ty, DAG);
+ }
return getAddrLocal(N, Ty, DAG,
- Subtarget->isABI_N32() || Subtarget->isABI_N64());
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
@@ -1754,6 +1752,65 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(SV), false, false, 0);
}
+SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ EVT VT = Node->getValueType(0);
+ SDValue Chain = Node->getOperand(0);
+ SDValue VAListPtr = Node->getOperand(1);
+ unsigned Align = Node->getConstantOperandVal(3);
+ const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
+ SDLoc DL(Node);
+ unsigned ArgSlotSizeInBytes =
+ (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4;
+
+ SDValue VAListLoad = DAG.getLoad(getPointerTy(), DL, Chain, VAListPtr,
+ MachinePointerInfo(SV), false, false, false,
+ 0);
+ SDValue VAList = VAListLoad;
+
+ // Re-align the pointer if necessary.
+ // It should only ever be necessary for 64-bit types on O32 since the minimum
+ // argument alignment is the same as the maximum type alignment for N32/N64.
+ //
+ // FIXME: We currently align too often. The code generator doesn't notice
+ // when the pointer is still aligned from the last va_arg (or pair of
+ // va_args for the i64 on O32 case).
+ if (Align > getMinStackArgumentAlignment()) {
+ assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
+
+ VAList = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(Align - 1,
+ VAList.getValueType()));
+
+ VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(-(int64_t)Align,
+ VAList.getValueType()));
+ }
+
+ // Increment the pointer, VAList, to the next vaarg.
+ unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
+ SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes, ArgSlotSizeInBytes),
+ VAList.getValueType()));
+ // Store the incremented VAList to the legalized pointer
+ Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
+ MachinePointerInfo(SV), false, false, 0);
+
+ // In big-endian mode we must adjust the pointer when the load size is smaller
+ // than the argument slot size. We must also reduce the known alignment to
+ // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
+ // the correct half of the slot, and reduce the alignment from 8 (slot
+ // alignment) down to 4 (type alignment).
+ if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
+ unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
+ VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
+ DAG.getIntPtrConstant(Adjustment));
+ }
+ // Load the actual argument out of the pointer VAList
+ return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo(), false, false,
+ false, 0);
+}
+
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
bool HasExtractInsert) {
EVT TyX = Op.getOperand(0).getValueType();
@@ -1851,10 +1908,10 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
SDValue
MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
- if (Subtarget->isGP64bit())
- return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasExtractInsert());
+ if (Subtarget.isGP64bit())
+ return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
- return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasExtractInsert());
+ return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
}
SDValue MipsTargetLowering::
@@ -1869,7 +1926,7 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue FrameAddr =
DAG.getCopyFromReg(DAG.getEntryNode(), DL,
- Subtarget->isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
+ Subtarget.isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
@@ -1885,7 +1942,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT VT = Op.getSimpleValueType();
- unsigned RA = Subtarget->isABI_N64() ? Mips::RA_64 : Mips::RA;
+ unsigned RA = Subtarget.isABI_N64() ? Mips::RA_64 : Mips::RA;
MFI->setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
@@ -1907,12 +1964,12 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc DL(Op);
- EVT Ty = Subtarget->isABI_N64() ? MVT::i64 : MVT::i32;
+ EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
// EH_RETURN nodes, so that instructions are emitted back-to-back.
- unsigned OffsetReg = Subtarget->isABI_N64() ? Mips::V1_64 : Mips::V1;
- unsigned AddrReg = Subtarget->isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned OffsetReg = Subtarget.isABI_N64() ? Mips::V1_64 : Mips::V1;
+ unsigned AddrReg = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
@@ -2025,7 +2082,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT MemVT = LD->getMemoryVT();
- if (Subtarget->systemSupportsUnalignedAccess())
+ if (Subtarget.systemSupportsUnalignedAccess())
return Op;
// Return if load is aligned or if MemVT is neither i32 nor i64.
@@ -2033,7 +2090,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
((MemVT != MVT::i32) && (MemVT != MVT::i64)))
return SDValue();
- bool IsLittle = Subtarget->isLittle();
+ bool IsLittle = Subtarget.isLittle();
EVT VT = Op.getValueType();
ISD::LoadExtType ExtType = LD->getExtensionType();
SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
@@ -2151,10 +2208,10 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT MemVT = SD->getMemoryVT();
// Lower unaligned integer stores.
- if (!Subtarget->systemSupportsUnalignedAccess() &&
+ if (!Subtarget.systemSupportsUnalignedAccess() &&
(SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
((MemVT == MVT::i32) || (MemVT == MVT::i64)))
- return lowerUnalignedIntStore(SD, DAG, Subtarget->isLittle());
+ return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
return lowerFP_TO_SINT_STORE(SD, DAG);
}
@@ -2201,7 +2258,7 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
// an argument. Otherwise, passed in A1, A2, A3 and stack.
// f64 - Only passed in two aliased f32 registers if no int reg has been used
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
-// not used, it must be shadowed. If only A3 is avaiable, shadow it and
+// not used, it must be shadowed. If only A3 is available, shadow it and
// go to stack.
//
// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
@@ -2299,6 +2356,10 @@ static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
}
+static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State) LLVM_ATTRIBUTE_UNUSED;
+
#include "MipsGenCallingConv.inc"
//===----------------------------------------------------------------------===//
@@ -2333,15 +2394,21 @@ void MipsTargetLowering::
getOpndList(SmallVectorImpl<SDValue> &Ops,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
- CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
+ bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
+ SDValue Chain) const {
// Insert node "GP copy globalreg" before call to function.
//
// R_MIPS_CALL* operators (emitted when non-internal functions are called
// in PIC mode) allow symbols to be resolved via lazy binding.
// The lazy binding stub requires GP to point to the GOT.
- if (IsPICCall && !InternalLinkage) {
- unsigned GPReg = Subtarget->isABI_N64() ? Mips::GP_64 : Mips::GP;
- EVT Ty = Subtarget->isABI_N64() ? MVT::i64 : MVT::i32;
+ // Note that we don't need GP to point to the GOT for indirect calls
+ // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
+ // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
+ // used for the function (that is, Mips linker doesn't generate lazy binding
+ // stub for a function whose address is taken in the program).
+ if (IsPICCall && !InternalLinkage && IsCallReloc) {
+ unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP;
+ EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
}
@@ -2364,10 +2431,11 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
- if (Subtarget->inMips16HardFloat()) {
+ if (Subtarget.inMips16HardFloat()) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
llvm::StringRef Sym = G->getGlobal()->getName();
Function *F = G->getGlobal()->getParent()->getFunction(Sym);
@@ -2400,31 +2468,30 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
+ const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC::SpecialCallingConvType SpecialCallingConv =
- getSpecialCallingConv(Callee);
- MipsCC MipsCCInfo(CallConv, Subtarget->isABI_O32(), Subtarget->isFP64bit(),
- CCInfo, SpecialCallingConv);
+ MipsCCState CCInfo(
+ CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
+ MipsCCState::getSpecialCallingConvForCallee(Callee.getNode(), Subtarget));
- MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
- Subtarget->mipsSEUsesSoftFloat(),
- Callee.getNode(), CLI.getArgs());
+ // Allocate the reserved argument area. It seems strange to do this from the
+ // caller side but removing it breaks the frame size calculation.
+ const MipsABIInfo &ABI = Subtarget.getABI();
+ CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
+
+ CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(), Callee.getNode());
// Get a count of how many bytes are to be pushed on the stack.
unsigned NextStackOffset = CCInfo.getNextStackOffset();
// Check if it's really possible to do a tail call.
if (IsTailCall)
- IsTailCall =
- isEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset,
- *MF.getInfo<MipsFunctionInfo>());
+ IsTailCall = isEligibleForTailCallOptimization(
+ CCInfo, NextStackOffset, *MF.getInfo<MipsFunctionInfo>());
if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
@@ -2444,13 +2511,14 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
SDValue StackPtr = DAG.getCopyFromReg(
- Chain, DL, Subtarget->isABI_N64() ? Mips::SP_64 : Mips::SP,
+ Chain, DL, Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP,
getPointerTy());
// With EABI is it possible to have 16 args on registers.
std::deque< std::pair<unsigned, SDValue> > RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
- MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
+
+ CCInfo.rewindByValRegsInfo();
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@@ -2458,23 +2526,30 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CCValAssign &VA = ArgLocs[i];
MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ bool UseUpperBits = false;
// ByVal Arg.
if (Flags.isByVal()) {
+ unsigned FirstByValReg, LastByValReg;
+ unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
+ CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
+
assert(Flags.getByValSize() &&
"ByVal args of size 0 should have been ignored by front-end.");
- assert(ByValArg != MipsCCInfo.byval_end());
+ assert(ByValIdx < CCInfo.getInRegsParamsCount());
assert(!IsTailCall &&
"Do not tail-call optimize if there is a byval argument.");
passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
- MipsCCInfo, *ByValArg, Flags, Subtarget->isLittle());
- ++ByValArg;
+ FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
+ VA);
+ CCInfo.nextInRegsParam();
continue;
}
// Promote the value if needed.
switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
+ default:
+ llvm_unreachable("Unknown loc info!");
case CCValAssign::Full:
if (VA.isRegLoc()) {
if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
@@ -2486,7 +2561,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Arg, DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
Arg, DAG.getConstant(1, MVT::i32));
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(Lo, Hi);
unsigned LocRegLo = VA.getLocReg();
unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
@@ -2496,17 +2571,37 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
+ break;
+ case CCValAssign::SExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
break;
+ case CCValAssign::ZExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
break;
+ case CCValAssign::AExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
break;
}
+ if (UseUpperBits) {
+ unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
+ unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
+ Arg = DAG.getNode(
+ ISD::SHL, DL, VA.getLocVT(), Arg,
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ }
+
// Arguments that can be passed on register must be kept at
// RegsToPass vector
if (VA.isRegLoc()) {
@@ -2532,9 +2627,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
bool IsPICCall =
- (Subtarget->isABI_N64() || IsPIC); // true if calls are translated to
+ (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to
// jalr $25
- bool GlobalOrExternal = false, InternalLinkage = false;
+ bool GlobalOrExternal = false, InternalLinkage = false, IsCallReloc = false;
SDValue CalleeLo;
EVT Ty = Callee.getValueType();
@@ -2545,14 +2640,17 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (InternalLinkage)
Callee = getAddrLocal(G, Ty, DAG,
- Subtarget->isABI_N32() || Subtarget->isABI_N64());
- else if (LargeGOT)
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
+ else if (LargeGOT) {
Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Val));
- else
+ IsCallReloc = true;
+ } else {
Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Val));
+ IsCallReloc = true;
+ }
} else
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0,
MipsII::MO_NO_FLAG);
@@ -2561,16 +2659,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
- if (!Subtarget->isABI_N64() && !IsPIC) // !N64 && static
+ if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static
Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
MipsII::MO_NO_FLAG);
- else if (LargeGOT)
+ else if (LargeGOT) {
Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Sym));
- else // N64 || PIC
+ IsCallReloc = true;
+ } else { // N64 || PIC
Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
FuncInfo->callPtrInfo(Sym));
+ IsCallReloc = true;
+ }
GlobalOrExternal = true;
}
@@ -2579,7 +2680,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal, InternalLinkage,
- CLI, Callee, Chain);
+ IsCallReloc, CLI, Callee, Chain);
if (IsTailCall)
return DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
@@ -2594,39 +2695,68 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Handle result values, copying them out of physregs into vregs that we
// return.
- return LowerCallResult(Chain, InFlag, CallConv, IsVarArg,
- Ins, DL, DAG, InVals, CLI.Callee.getNode(), CLI.RetTy);
+ return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+ InVals, CLI);
}
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
-SDValue
-MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- const SDNode *CallNode,
- const Type *RetTy) const {
+SDValue MipsTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ TargetLowering::CallLoweringInfo &CLI) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, Subtarget->isABI_O32(), Subtarget->isFP64bit(),
- CCInfo);
-
- MipsCCInfo.analyzeCallResult(Ins, Subtarget->mipsSEUsesSoftFloat(),
- CallNode, RetTy);
+ MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
RVLocs[i].getLocVT(), InFlag);
Chain = Val.getValue(1);
InFlag = Val.getValue(2);
- if (RVLocs[i].getValVT() != RVLocs[i].getLocVT())
- Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getValVT(), Val);
+ if (VA.isUpperBitsInLoc()) {
+ unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
+ unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
+ unsigned Shift =
+ VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
+ Val = DAG.getNode(
+ Shift, DL, VA.getLocVT(), Val,
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ }
+
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
+ break;
+ case CCValAssign::AExt:
+ case CCValAssign::AExtUpper:
+ Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
+ break;
+ case CCValAssign::ZExt:
+ case CCValAssign::ZExtUpper:
+ Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
+ DAG.getValueType(VA.getValVT()));
+ Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
+ break;
+ case CCValAssign::SExt:
+ case CCValAssign::SExtUpper:
+ Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
+ DAG.getValueType(VA.getValVT()));
+ Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
+ break;
+ }
InVals.push_back(Val);
}
@@ -2634,6 +2764,60 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
return Chain;
}
+static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
+ EVT ArgVT, SDLoc DL, SelectionDAG &DAG) {
+ MVT LocVT = VA.getLocVT();
+ EVT ValVT = VA.getValVT();
+
+ // Shift into the upper bits if necessary.
+ switch (VA.getLocInfo()) {
+ default:
+ break;
+ case CCValAssign::AExtUpper:
+ case CCValAssign::SExtUpper:
+ case CCValAssign::ZExtUpper: {
+ unsigned ValSizeInBits = ArgVT.getSizeInBits();
+ unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
+ unsigned Opcode =
+ VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
+ Val = DAG.getNode(
+ Opcode, DL, VA.getLocVT(), Val,
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ break;
+ }
+ }
+
+ // If this is an value smaller than the argument slot size (32-bit for O32,
+ // 64-bit for N32/N64), it has been promoted in some way to the argument slot
+ // size. Extract the value and insert any appropriate assertions regarding
+ // sign/zero extension.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::AExtUpper:
+ case CCValAssign::AExt:
+ Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
+ break;
+ case CCValAssign::SExtUpper:
+ case CCValAssign::SExt:
+ Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
+ Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
+ break;
+ case CCValAssign::ZExtUpper:
+ case CCValAssign::ZExt:
+ Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
+ Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
+ break;
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
+ break;
+ }
+
+ return Val;
+}
+
//===----------------------------------------------------------------------===//
// Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -2658,20 +2842,19 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, Subtarget->isABI_O32(), Subtarget->isFP64bit(),
- CCInfo);
+ MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ const MipsABIInfo &ABI = Subtarget.getABI();
+ CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
- bool UseSoftFloat = Subtarget->mipsSEUsesSoftFloat();
- MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg);
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
- MipsCCInfo.hasByValArg());
+ CCInfo.getInRegsParamsCount() > 0);
unsigned CurArgIdx = 0;
- MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
+ CCInfo.rewindByValRegsInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -2682,12 +2865,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
bool IsRegLoc = VA.isRegLoc();
if (Flags.isByVal()) {
+ unsigned FirstByValReg, LastByValReg;
+ unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
+ CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
+
assert(Flags.getByValSize() &&
"ByVal args of size 0 should have been ignored by front-end.");
- assert(ByValArg != MipsCCInfo.byval_end());
+ assert(ByValIdx < CCInfo.getInRegsParamsCount());
copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
- MipsCCInfo, *ByValArg);
- ++ByValArg;
+ FirstByValReg, LastByValReg, VA, CCInfo);
+ CCInfo.nextInRegsParam();
continue;
}
@@ -2702,20 +2889,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
- // If this is an 8 or 16-bit value, it has been passed promoted
- // to 32 bits. Insert an assert[sz]ext to capture this, then
- // truncate to the right size.
- if (VA.getLocInfo() != CCValAssign::Full) {
- unsigned Opcode = 0;
- if (VA.getLocInfo() == CCValAssign::SExt)
- Opcode = ISD::AssertSext;
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- Opcode = ISD::AssertZext;
- if (Opcode)
- ArgValue = DAG.getNode(Opcode, DL, RegVT, ArgValue,
- DAG.getValueType(ValVT));
- ArgValue = DAG.getNode(ISD::TRUNCATE, DL, ValVT, ArgValue);
- }
+ ArgValue = UnpackFromArgumentSlot(ArgValue, VA, Ins[i].ArgVT, DL, DAG);
// Handle floating point arguments passed in integer registers and
// long double arguments passed in floating point registers.
@@ -2723,12 +2897,12 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
(RegVT == MVT::i64 && ValVT == MVT::f64) ||
(RegVT == MVT::f64 && ValVT == MVT::i64))
ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
- else if (Subtarget->isABI_O32() && RegVT == MVT::i32 &&
+ else if (Subtarget.isABI_O32() && RegVT == MVT::i32 &&
ValVT == MVT::f64) {
unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
getNextIntArgReg(ArgReg), RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(ArgValue, ArgValue2);
ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
ArgValue, ArgValue2);
@@ -2736,21 +2910,34 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
InVals.push_back(ArgValue);
} else { // VA.isRegLoc()
+ MVT LocVT = VA.getLocVT();
+
+ if (Subtarget.isABI_O32()) {
+ // We ought to be able to use LocVT directly but O32 sets it to i32
+ // when allocating floating point values to integer registers.
+ // This shouldn't influence how we load the value into registers unless
+ // we are targetting softfloat.
+ if (VA.getValVT().isFloatingPoint() && !Subtarget.abiUsesSoftFloat())
+ LocVT = VA.getValVT();
+ }
// sanity check
assert(VA.isMemLoc());
// The stack pointer offset is relative to the caller stack frame.
- int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
+ int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
VA.getLocMemOffset(), true);
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- SDValue Load = DAG.getLoad(ValVT, DL, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0);
- InVals.push_back(Load);
- OutChains.push_back(Load.getValue(1));
+ SDValue ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0);
+ OutChains.push_back(ArgValue.getValue(1));
+
+ ArgValue = UnpackFromArgumentSlot(ArgValue, VA, Ins[i].ArgVT, DL, DAG);
+
+ InVals.push_back(ArgValue);
}
}
@@ -2762,7 +2949,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(
- getRegClassFor(Subtarget->isABI_N64() ? MVT::i64 : MVT::i32));
+ getRegClassFor(Subtarget.isABI_N64() ? MVT::i64 : MVT::i32));
MipsFI->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
@@ -2772,7 +2959,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
}
if (IsVarArg)
- writeVarArgRegs(OutChains, MipsCCInfo, Chain, DL, DAG);
+ writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
// All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens when on varg functions
@@ -2794,8 +2981,7 @@ MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(),
- RVLocs, Context);
+ MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_Mips);
}
@@ -2811,14 +2997,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction();
// CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs,
- *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, Subtarget->isABI_O32(), Subtarget->isFP64bit(),
- CCInfo);
+ MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
// Analyze return values.
- MipsCCInfo.analyzeReturn(Outs, Subtarget->mipsSEUsesSoftFloat(),
- MF.getFunction()->getReturnType());
+ CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
@@ -2828,9 +3010,43 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
SDValue Val = OutVals[i];
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
+ bool UseUpperBits = false;
- if (RVLocs[i].getValVT() != RVLocs[i].getLocVT())
- Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getLocVT(), Val);
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
+ break;
+ case CCValAssign::AExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
+ case CCValAssign::AExt:
+ Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
+ break;
+ case CCValAssign::ZExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
+ case CCValAssign::ZExt:
+ Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
+ break;
+ case CCValAssign::SExtUpper:
+ UseUpperBits = true;
+ // Fallthrough
+ case CCValAssign::SExt:
+ Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
+ break;
+ }
+
+ if (UseUpperBits) {
+ unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
+ unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
+ Val = DAG.getNode(
+ ISD::SHL, DL, VA.getLocVT(), Val,
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ }
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
@@ -2850,7 +3066,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
- unsigned V0 = Subtarget->isABI_N64() ? Mips::V0_64 : Mips::V0;
+ unsigned V0 = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
Flag = Chain.getValue(1);
@@ -2928,7 +3144,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
weight = CW_Register;
break;
case 'f': // FPU or MSA register
- if (Subtarget->hasMSA() && type->isVectorTy() &&
+ if (Subtarget.hasMSA() && type->isVectorTy() &&
cast<VectorType>(type)->getBitWidth() == 128)
weight = CW_Register;
else if (type->isFloatTy())
@@ -2962,7 +3178,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
/// that is returned indicates whether parsing was successful. The second flag
/// is true if the numeric part exists.
static std::pair<bool, bool>
-parsePhysicalReg(const StringRef &C, std::string &Prefix,
+parsePhysicalReg(StringRef C, std::string &Prefix,
unsigned long long &Reg) {
if (C.front() != '{' || C.back() != '}')
return std::make_pair(false, false);
@@ -2983,8 +3199,9 @@ parsePhysicalReg(const StringRef &C, std::string &Prefix,
}
std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
-parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const TargetRegisterClass *RC;
std::string Prefix;
unsigned long long Reg;
@@ -3034,7 +3251,7 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
// If the size of FP registers is 64-bit or Reg is an even number, select
// the 64-bit register class. Otherwise, select the 32-bit register class.
if (VT == MVT::Other)
- VT = (Subtarget->isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
+ VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
RC = getRegClassFor(VT);
@@ -3067,13 +3284,13 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
- if (Subtarget->inMips16Mode())
+ if (Subtarget.inMips16Mode())
return std::make_pair(0U, &Mips::CPU16RegsRegClass);
return std::make_pair(0U, &Mips::GPR32RegClass);
}
- if (VT == MVT::i64 && !Subtarget->isGP64bit())
+ if (VT == MVT::i64 && !Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR32RegClass);
- if (VT == MVT::i64 && Subtarget->isGP64bit())
+ if (VT == MVT::i64 && Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR64RegClass);
// This will generate an error message
return std::make_pair(0U, nullptr);
@@ -3088,8 +3305,8 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
return std::make_pair(0U, &Mips::MSA128DRegClass);
else if (VT == MVT::f32)
return std::make_pair(0U, &Mips::FGR32RegClass);
- else if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
- if (Subtarget->isFP64bit())
+ else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
+ if (Subtarget.isFP64bit())
return std::make_pair(0U, &Mips::FGR64RegClass);
return std::make_pair(0U, &Mips::AFGR64RegClass);
}
@@ -3245,7 +3462,7 @@ EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- if (Subtarget->hasMips64())
+ if (Subtarget.hasMips64())
return MVT::i64;
return MVT::i32;
@@ -3260,291 +3477,33 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
unsigned MipsTargetLowering::getJumpTableEncoding() const {
- if (Subtarget->isABI_N64())
+ if (Subtarget.isABI_N64())
return MachineJumpTableInfo::EK_GPRel64BlockAddress;
return TargetLowering::getJumpTableEncoding();
}
-/// This function returns true if CallSym is a long double emulation routine.
-static bool isF128SoftLibCall(const char *CallSym) {
- const char *const LibCalls[] =
- {"__addtf3", "__divtf3", "__eqtf2", "__extenddftf2", "__extendsftf2",
- "__fixtfdi", "__fixtfsi", "__fixtfti", "__fixunstfdi", "__fixunstfsi",
- "__fixunstfti", "__floatditf", "__floatsitf", "__floattitf",
- "__floatunditf", "__floatunsitf", "__floatuntitf", "__getf2", "__gttf2",
- "__letf2", "__lttf2", "__multf3", "__netf2", "__powitf2", "__subtf3",
- "__trunctfdf2", "__trunctfsf2", "__unordtf2",
- "ceill", "copysignl", "cosl", "exp2l", "expl", "floorl", "fmal", "fmodl",
- "log10l", "log2l", "logl", "nearbyintl", "powl", "rintl", "sinl", "sqrtl",
- "truncl"};
-
- const char *const *End = LibCalls + array_lengthof(LibCalls);
-
- // Check that LibCalls is sorted alphabetically.
- MipsTargetLowering::LTStr Comp;
-
-#ifndef NDEBUG
- for (const char *const *I = LibCalls; I < End - 1; ++I)
- assert(Comp(*I, *(I + 1)));
-#endif
-
- return std::binary_search(LibCalls, End, CallSym, Comp);
-}
-
-/// This function returns true if Ty is fp128 or i128 which was originally a
-/// fp128.
-static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
- if (Ty->isFP128Ty())
- return true;
-
- const ExternalSymbolSDNode *ES =
- dyn_cast_or_null<const ExternalSymbolSDNode>(CallNode);
-
- // If the Ty is i128 and the function being called is a long double emulation
- // routine, then the original type is f128.
- return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
-}
-
-MipsTargetLowering::MipsCC::SpecialCallingConvType
- MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
- MipsCC::SpecialCallingConvType SpecialCallingConv =
- MipsCC::NoSpecialCallingConv;
- if (Subtarget->inMips16HardFloat()) {
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- llvm::StringRef Sym = G->getGlobal()->getName();
- Function *F = G->getGlobal()->getParent()->getFunction(Sym);
- if (F && F->hasFnAttribute("__Mips16RetHelper")) {
- SpecialCallingConv = MipsCC::Mips16RetHelperConv;
- }
- }
- }
- return SpecialCallingConv;
-}
-
-MipsTargetLowering::MipsCC::MipsCC(
- CallingConv::ID CC, bool IsO32_, bool IsFP64_, CCState &Info,
- MipsCC::SpecialCallingConvType SpecialCallingConv_)
- : CCInfo(Info), CallConv(CC), IsO32(IsO32_), IsFP64(IsFP64_),
- SpecialCallingConv(SpecialCallingConv_){
- // Pre-allocate reserved argument area.
- CCInfo.AllocateStack(reservedArgArea(), 1);
-}
-
-
-void MipsTargetLowering::MipsCC::
-analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
- bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode,
- std::vector<ArgListEntry> &FuncArgs) {
- assert((CallConv != CallingConv::Fast || !IsVarArg) &&
- "CallingConv::Fast shouldn't be used for vararg functions.");
-
- unsigned NumOpnds = Args.size();
- llvm::CCAssignFn *FixedFn = fixedArgFn(), *VarFn = varArgFn();
-
- for (unsigned I = 0; I != NumOpnds; ++I) {
- MVT ArgVT = Args[I].VT;
- ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
- bool R;
-
- if (ArgFlags.isByVal()) {
- handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
- continue;
- }
-
- if (IsVarArg && !Args[I].IsFixed)
- R = VarFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
- else {
- MVT RegVT = getRegVT(ArgVT, FuncArgs[Args[I].OrigArgIndex].Ty, CallNode,
- IsSoftFloat);
- R = FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo);
- }
-
- if (R) {
-#ifndef NDEBUG
- dbgs() << "Call operand #" << I << " has unhandled type "
- << EVT(ArgVT).getEVTString();
-#endif
- llvm_unreachable(nullptr);
- }
- }
-}
-
-void MipsTargetLowering::MipsCC::
-analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Args,
- bool IsSoftFloat, Function::const_arg_iterator FuncArg) {
- unsigned NumArgs = Args.size();
- llvm::CCAssignFn *FixedFn = fixedArgFn();
- unsigned CurArgIdx = 0;
-
- for (unsigned I = 0; I != NumArgs; ++I) {
- MVT ArgVT = Args[I].VT;
- ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
- std::advance(FuncArg, Args[I].OrigArgIndex - CurArgIdx);
- CurArgIdx = Args[I].OrigArgIndex;
-
- if (ArgFlags.isByVal()) {
- handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
- continue;
- }
-
- MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), nullptr, IsSoftFloat);
-
- if (!FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo))
- continue;
-
-#ifndef NDEBUG
- dbgs() << "Formal Arg #" << I << " has unhandled type "
- << EVT(ArgVT).getEVTString();
-#endif
- llvm_unreachable(nullptr);
- }
-}
-
-template<typename Ty>
-void MipsTargetLowering::MipsCC::
-analyzeReturn(const SmallVectorImpl<Ty> &RetVals, bool IsSoftFloat,
- const SDNode *CallNode, const Type *RetTy) const {
- CCAssignFn *Fn;
-
- if (IsSoftFloat && originalTypeIsF128(RetTy, CallNode))
- Fn = RetCC_F128Soft;
- else
- Fn = RetCC_Mips;
-
- for (unsigned I = 0, E = RetVals.size(); I < E; ++I) {
- MVT VT = RetVals[I].VT;
- ISD::ArgFlagsTy Flags = RetVals[I].Flags;
- MVT RegVT = this->getRegVT(VT, RetTy, CallNode, IsSoftFloat);
-
- if (Fn(I, VT, RegVT, CCValAssign::Full, Flags, this->CCInfo)) {
-#ifndef NDEBUG
- dbgs() << "Call result #" << I << " has unhandled type "
- << EVT(VT).getEVTString() << '\n';
-#endif
- llvm_unreachable(nullptr);
- }
- }
-}
-
-void MipsTargetLowering::MipsCC::
-analyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsSoftFloat,
- const SDNode *CallNode, const Type *RetTy) const {
- analyzeReturn(Ins, IsSoftFloat, CallNode, RetTy);
-}
-
-void MipsTargetLowering::MipsCC::
-analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsSoftFloat,
- const Type *RetTy) const {
- analyzeReturn(Outs, IsSoftFloat, nullptr, RetTy);
-}
-
-void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
- MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags) {
- assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0.");
-
- struct ByValArgInfo ByVal;
- unsigned RegSize = regSize();
- unsigned ByValSize = RoundUpToAlignment(ArgFlags.getByValSize(), RegSize);
- unsigned Align = std::min(std::max(ArgFlags.getByValAlign(), RegSize),
- RegSize * 2);
-
- if (useRegsForByval())
- allocateRegs(ByVal, ByValSize, Align);
-
- // Allocate space on caller's stack.
- ByVal.Address = CCInfo.AllocateStack(ByValSize - RegSize * ByVal.NumRegs,
- Align);
- CCInfo.addLoc(CCValAssign::getMem(ValNo, ValVT, ByVal.Address, LocVT,
- LocInfo));
- ByValArgs.push_back(ByVal);
-}
-
-unsigned MipsTargetLowering::MipsCC::numIntArgRegs() const {
- return IsO32 ? array_lengthof(O32IntRegs) : array_lengthof(Mips64IntRegs);
-}
-
-unsigned MipsTargetLowering::MipsCC::reservedArgArea() const {
- return (IsO32 && (CallConv != CallingConv::Fast)) ? 16 : 0;
-}
-
-const MCPhysReg *MipsTargetLowering::MipsCC::intArgRegs() const {
- return IsO32 ? O32IntRegs : Mips64IntRegs;
-}
-
-llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const {
- if (CallConv == CallingConv::Fast)
- return CC_Mips_FastCC;
-
- if (SpecialCallingConv == Mips16RetHelperConv)
- return CC_Mips16RetHelper;
- return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN;
-}
-
-llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const {
- return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg;
-}
-
-const MCPhysReg *MipsTargetLowering::MipsCC::shadowRegs() const {
- return IsO32 ? O32IntRegs : Mips64DPRegs;
-}
-
-void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal,
- unsigned ByValSize,
- unsigned Align) {
- unsigned RegSize = regSize(), NumIntArgRegs = numIntArgRegs();
- const MCPhysReg *IntArgRegs = intArgRegs(), *ShadowRegs = shadowRegs();
- assert(!(ByValSize % RegSize) && !(Align % RegSize) &&
- "Byval argument's size and alignment should be a multiple of"
- "RegSize.");
-
- ByVal.FirstIdx = CCInfo.getFirstUnallocated(IntArgRegs, NumIntArgRegs);
-
- // If Align > RegSize, the first arg register must be even.
- if ((Align > RegSize) && (ByVal.FirstIdx % 2)) {
- CCInfo.AllocateReg(IntArgRegs[ByVal.FirstIdx], ShadowRegs[ByVal.FirstIdx]);
- ++ByVal.FirstIdx;
- }
-
- // Mark the registers allocated.
- for (unsigned I = ByVal.FirstIdx; ByValSize && (I < NumIntArgRegs);
- ByValSize -= RegSize, ++I, ++ByVal.NumRegs)
- CCInfo.AllocateReg(IntArgRegs[I], ShadowRegs[I]);
-}
-
-MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy,
- const SDNode *CallNode,
- bool IsSoftFloat) const {
- if (IsSoftFloat || IsO32)
- return VT;
-
- // Check if the original type was fp128.
- if (originalTypeIsF128(OrigTy, CallNode)) {
- assert(VT == MVT::i64);
- return MVT::f64;
- }
-
- return VT;
-}
-
-void MipsTargetLowering::
-copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
- SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
- SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
- const MipsCC &CC, const ByValArgInfo &ByVal) const {
+void MipsTargetLowering::copyByValRegs(
+ SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG,
+ const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals,
+ const Argument *FuncArg, unsigned FirstReg, unsigned LastReg,
+ const CCValAssign &VA, MipsCCState &State) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned RegAreaSize = ByVal.NumRegs * CC.regSize();
+ unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
+ unsigned NumRegs = LastReg - FirstReg;
+ unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
int FrameObjOffset;
+ const MipsABIInfo &ABI = Subtarget.getABI();
+ ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
if (RegAreaSize)
- FrameObjOffset = (int)CC.reservedArgArea() -
- (int)((CC.numIntArgRegs() - ByVal.FirstIdx) * CC.regSize());
+ FrameObjOffset =
+ (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
+ (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
else
- FrameObjOffset = ByVal.Address;
+ FrameObjOffset = VA.getLocMemOffset();
// Create frame object.
EVT PtrTy = getPointerTy();
@@ -3552,17 +3511,17 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
InVals.push_back(FIN);
- if (!ByVal.NumRegs)
+ if (!NumRegs)
return;
// Copy arg registers.
- MVT RegTy = MVT::getIntegerVT(CC.regSize() * 8);
+ MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
const TargetRegisterClass *RC = getRegClassFor(RegTy);
- for (unsigned I = 0; I < ByVal.NumRegs; ++I) {
- unsigned ArgReg = CC.intArgRegs()[ByVal.FirstIdx + I];
+ for (unsigned I = 0; I < NumRegs; ++I) {
+ unsigned ArgReg = ByValArgRegs[FirstReg + I];
unsigned VReg = addLiveIn(MF, ArgReg, RC);
- unsigned Offset = I * CC.regSize();
+ unsigned Offset = I * GPRSizeInBytes;
SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
DAG.getConstant(Offset, PtrTy));
SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
@@ -3573,34 +3532,34 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
}
// Copy byVal arg to registers and stack.
-void MipsTargetLowering::
-passByValArg(SDValue Chain, SDLoc DL,
- std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
- MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
- const MipsCC &CC, const ByValArgInfo &ByVal,
- const ISD::ArgFlagsTy &Flags, bool isLittle) const {
+void MipsTargetLowering::passByValArg(
+ SDValue Chain, SDLoc DL,
+ std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
+ unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
+ const CCValAssign &VA) const {
unsigned ByValSizeInBytes = Flags.getByValSize();
unsigned OffsetInBytes = 0; // From beginning of struct
- unsigned RegSizeInBytes = CC.regSize();
+ unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes);
EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
+ unsigned NumRegs = LastReg - FirstReg;
- if (ByVal.NumRegs) {
- const MCPhysReg *ArgRegs = CC.intArgRegs();
- bool LeftoverBytes = (ByVal.NumRegs * RegSizeInBytes > ByValSizeInBytes);
+ if (NumRegs) {
+ const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetByValArgRegs();
+ bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
unsigned I = 0;
// Copy words to registers.
- for (; I < ByVal.NumRegs - LeftoverBytes;
- ++I, OffsetInBytes += RegSizeInBytes) {
+ for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
MachinePointerInfo(), false, false, false,
Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
- unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
+ unsigned ArgReg = ArgRegs[FirstReg + I];
RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
}
@@ -3610,9 +3569,6 @@ passByValArg(SDValue Chain, SDLoc DL,
// Copy the remainder of the byval argument with sub-word loads and shifts.
if (LeftoverBytes) {
- assert((ByValSizeInBytes > OffsetInBytes) &&
- (ByValSizeInBytes < OffsetInBytes + RegSizeInBytes) &&
- "Size of the remainder should be smaller than RegSizeInBytes.");
SDValue Val;
for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
@@ -3627,7 +3583,8 @@ passByValArg(SDValue Chain, SDLoc DL,
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue LoadVal = DAG.getExtLoad(
ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
- MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, Alignment);
+ MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, false,
+ Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
// Shift the loaded value.
@@ -3651,7 +3608,7 @@ passByValArg(SDValue Chain, SDLoc DL,
Alignment = std::min(Alignment, LoadSizeInBytes);
}
- unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
+ unsigned ArgReg = ArgRegs[FirstReg + I];
RegsToPass.push_back(std::make_pair(ArgReg, Val));
return;
}
@@ -3662,7 +3619,7 @@ passByValArg(SDValue Chain, SDLoc DL,
SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
- DAG.getIntPtrConstant(ByVal.Address));
+ DAG.getIntPtrConstant(VA.getLocMemOffset()));
Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
MachinePointerInfo(), MachinePointerInfo());
@@ -3670,14 +3627,13 @@ passByValArg(SDValue Chain, SDLoc DL,
}
void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
- const MipsCC &CC, SDValue Chain,
- SDLoc DL, SelectionDAG &DAG) const {
- unsigned NumRegs = CC.numIntArgRegs();
- const MCPhysReg *ArgRegs = CC.intArgRegs();
- const CCState &CCInfo = CC.getCCInfo();
- unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs);
- unsigned RegSize = CC.regSize();
- MVT RegTy = MVT::getIntegerVT(RegSize * 8);
+ SDValue Chain, SDLoc DL,
+ SelectionDAG &DAG,
+ CCState &State) const {
+ const ArrayRef<MCPhysReg> ArgRegs = Subtarget.getABI().GetVarArgRegs();
+ unsigned Idx = State.getFirstUnallocated(ArgRegs.data(), ArgRegs.size());
+ unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
+ MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
const TargetRegisterClass *RC = getRegClassFor(RegTy);
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -3686,28 +3642,81 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
// Offset of the first variable argument from stack pointer.
int VaArgOffset;
- if (NumRegs == Idx)
- VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize);
- else
- VaArgOffset = (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
+ if (ArgRegs.size() == Idx)
+ VaArgOffset =
+ RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
+ else {
+ const MipsABIInfo &ABI = Subtarget.getABI();
+ VaArgOffset =
+ (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
+ (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
+ }
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
- int FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
+ int FI = MFI->CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
MipsFI->setVarArgsFrameIndex(FI);
// Copy the integer registers that have not been used for argument passing
// to the argument register save area. For O32, the save area is allocated
// in the caller's stack frame, while for N32/64, it is allocated in the
// callee's stack frame.
- for (unsigned I = Idx; I < NumRegs; ++I, VaArgOffset += RegSize) {
+ for (unsigned I = Idx; I < ArgRegs.size();
+ ++I, VaArgOffset += RegSizeInBytes) {
unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
- FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
+ FI = MFI->CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
MachinePointerInfo(), false, false, 0);
- cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue((Value*)nullptr);
+ cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
+ (Value *)nullptr);
OutChains.push_back(Store);
}
}
+
+void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
+ unsigned Align) const {
+ MachineFunction &MF = State->getMachineFunction();
+ const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
+
+ assert(Size && "Byval argument's size shouldn't be 0.");
+
+ Align = std::min(Align, TFL->getStackAlignment());
+
+ unsigned FirstReg = 0;
+ unsigned NumRegs = 0;
+
+ if (State->getCallingConv() != CallingConv::Fast) {
+ unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
+ const ArrayRef<MCPhysReg> IntArgRegs = Subtarget.getABI().GetByValArgRegs();
+ // FIXME: The O32 case actually describes no shadow registers.
+ const MCPhysReg *ShadowRegs =
+ Subtarget.isABI_O32() ? IntArgRegs.data() : Mips64DPRegs;
+
+ // We used to check the size as well but we can't do that anymore since
+ // CCState::HandleByVal() rounds up the size after calling this function.
+ assert(!(Align % RegSizeInBytes) &&
+ "Byval argument's alignment should be a multiple of"
+ "RegSizeInBytes.");
+
+ FirstReg = State->getFirstUnallocated(IntArgRegs.data(), IntArgRegs.size());
+
+ // If Align > RegSizeInBytes, the first arg register must be even.
+ // FIXME: This condition happens to do the right thing but it's not the
+ // right way to test it. We want to check that the stack frame offset
+ // of the register is aligned.
+ if ((Align > RegSizeInBytes) && (FirstReg % 2)) {
+ State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
+ ++FirstReg;
+ }
+
+ // Mark the registers allocated.
+ Size = RoundUpToAlignment(Size, RegSizeInBytes);
+ for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
+ Size -= RegSizeInBytes, ++I, ++NumRegs)
+ State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
+ }
+
+ State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
+}