aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp8
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp416
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp1
-rw-r--r--lib/Target/ARM/ARMInstrFormats.td60
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td35
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td172
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td2
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td17
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp22
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp170
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp6
-rw-r--r--lib/Target/ARM/Disassembler/LLVMBuild.txt2
-rw-r--r--lib/Target/ARM/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/ARM/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/CBackend/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt2
-rw-r--r--lib/Target/CellSPU/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/CppBackend/TargetInfo/CMakeLists.txt1
-rw-r--r--lib/Target/CppBackend/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/LLVMBuild.txt32
-rw-r--r--lib/Target/MBlaze/Disassembler/CMakeLists.txt1
-rw-r--r--lib/Target/MBlaze/Disassembler/LLVMBuild.txt8
-rw-r--r--lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp3
-rw-r--r--lib/Target/MBlaze/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt2
-rw-r--r--lib/Target/MSP430/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp147
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h82
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp213
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h5
-rw-r--r--lib/Target/Mips/Makefile2
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td82
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp1
-rw-r--r--lib/Target/Mips/MipsCallingConv.td24
-rw-r--r--lib/Target/Mips/MipsCodeEmitter.cpp57
-rw-r--r--lib/Target/Mips/MipsFrameLowering.cpp24
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp446
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h80
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td136
-rw-r--r--lib/Target/Mips/MipsMCInstLower.cpp1
-rw-r--r--lib/Target/Mips/MipsMachineFunction.h11
-rw-r--r--lib/Target/Mips/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/PTX/CMakeLists.txt1
-rw-r--r--lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp18
-rw-r--r--lib/Target/PTX/LLVMBuild.txt2
-rw-r--r--lib/Target/PTX/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/PTX/PTXAsmPrinter.cpp30
-rw-r--r--lib/Target/PTX/PTXAsmPrinter.h2
-rw-r--r--lib/Target/PTX/PTXISelLowering.cpp107
-rw-r--r--lib/Target/PTX/PTXInstrInfo.td6
-rw-r--r--lib/Target/PTX/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/PowerPC/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/Sparc/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/X86/CMakeLists.txt2
-rw-r--r--lib/Target/X86/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/X86/TargetInfo/LLVMBuild.txt2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp285
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp193
-rw-r--r--lib/Target/X86/X86InstrInfo.h5
-rw-r--r--lib/Target/X86/X86InstrSSE.td138
-rw-r--r--lib/Target/XCore/MCTargetDesc/LLVMBuild.txt2
-rw-r--r--lib/Target/XCore/TargetInfo/LLVMBuild.txt2
66 files changed, 2421 insertions, 683 deletions
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 5f7b8b2..fb7d96a 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -272,16 +272,16 @@ static const NEONLdStTableEntry NEONLdStTable[] = {
{ ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, false, SingleSpc, 3, 1 ,true},
{ ARM::VST1d64TPseudo_UPD, ARM::VST1d64T_UPD, false, true, true, SingleSpc, 3, 1 ,true},
-{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, false, SingleSpc, 2, 4 ,true},
+{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, false, SingleSpc, 2, 4 ,false},
{ ARM::VST1q16PseudoWB_fixed, ARM::VST1q16wb_fixed, false, true, false, SingleSpc, 2, 4 ,false},
{ ARM::VST1q16PseudoWB_register, ARM::VST1q16wb_register, false, true, true, SingleSpc, 2, 4 ,false},
-{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, false, SingleSpc, 2, 2 ,true},
+{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, false, SingleSpc, 2, 2 ,false},
{ ARM::VST1q32PseudoWB_fixed, ARM::VST1q32wb_fixed, false, true, false, SingleSpc, 2, 2 ,false},
{ ARM::VST1q32PseudoWB_register, ARM::VST1q32wb_register, false, true, true, SingleSpc, 2, 2 ,false},
-{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, false, SingleSpc, 2, 1 ,true},
+{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, false, SingleSpc, 2, 1 ,false},
{ ARM::VST1q64PseudoWB_fixed, ARM::VST1q64wb_fixed, false, true, false, SingleSpc, 2, 1 ,false},
{ ARM::VST1q64PseudoWB_register, ARM::VST1q64wb_register, false, true, true, SingleSpc, 2, 1 ,false},
-{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, false, SingleSpc, 2, 8 ,true},
+{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, false, SingleSpc, 2, 8 ,false},
{ ARM::VST1q8PseudoWB_fixed, ARM::VST1q8wb_fixed, false, true, false, SingleSpc, 2, 8 ,false},
{ ARM::VST1q8PseudoWB_register, ARM::VST1q8wb_register, false, true, true, SingleSpc, 2, 8 ,false},
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 030fab1..4df084f 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -148,6 +148,8 @@ class ARMFastISel : public FastISel {
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
+ virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
#include "ARMGenFastISel.inc"
@@ -164,7 +166,8 @@ class ARMFastISel : public FastISel {
bool SelectFPToSI(const Instruction *I);
bool SelectSDiv(const Instruction *I);
bool SelectSRem(const Instruction *I);
- bool SelectCall(const Instruction *I);
+ bool SelectCall(const Instruction *I, const char *IntrMemName);
+ bool SelectIntrinsicCall(const IntrinsicInst &I);
bool SelectSelect(const Instruction *I);
bool SelectRet(const Instruction *I);
bool SelectTrunc(const Instruction *I);
@@ -176,10 +179,14 @@ class ARMFastISel : public FastISel {
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt);
- bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
+ bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt,
+ bool allocReg);
+
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
- void ARMSimplifyAddress(Address &Addr, EVT VT);
+ void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
+ bool ARMIsMemCpySmall(uint64_t Len);
+ bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
@@ -212,7 +219,7 @@ class ARMFastISel : public FastISel {
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags);
+ unsigned Flags, bool useAM3);
};
} // end anonymous namespace
@@ -563,9 +570,9 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
// Use MVN to emit negative constants.
if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
unsigned Imm = (unsigned)~(CI->getSExtValue());
- bool EncodeImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
(ARM_AM::getSOImmVal(Imm) != -1);
- if (EncodeImm) {
+ if (UseImm) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -723,7 +730,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
// If this is a type than can be sign or zero-extended to a basic operation
// go ahead and accept it now.
- if (VT == MVT::i8 || VT == MVT::i16)
+ if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
return true;
return false;
@@ -852,7 +859,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
return Addr.Base.Reg != 0;
}
-void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
+void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
@@ -860,21 +867,22 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
default:
assert(false && "Unhandled load/store type!");
- case MVT::i16:
- if (isThumb2)
- // Integer loads/stores handle 12-bit offsets.
- needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
- else
- // ARM i16 integer loads/stores handle +/-imm8 offsets.
- // FIXME: Negative offsets require special handling.
- if (Addr.Offset > 255 || Addr.Offset < 0)
- needsLowering = true;
break;
case MVT::i1:
case MVT::i8:
+ case MVT::i16:
case MVT::i32:
- // Integer loads/stores handle 12-bit offsets.
- needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ if (!useAM3) {
+ // Integer loads/stores handle 12-bit offsets.
+ needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ // Handle negative offsets.
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
+ } else {
+ // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
+ needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
+ }
break;
case MVT::f32:
case MVT::f64:
@@ -910,7 +918,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags) {
+ unsigned Flags, bool useAM3) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
@@ -930,41 +938,78 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
// Now add the rest of the operands.
MIB.addFrameIndex(FI);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
}
AddOptionalDefs(MIB);
}
-bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
-
+bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
+ bool isZExt = true, bool allocReg = true) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
- TargetRegisterClass *RC;
+ bool useAM3 = false;
+ TargetRegisterClass *RC;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
+ case MVT::i1:
case MVT::i8:
- Opc = isThumb2 ? ARM::t2LDRBi12 : ARM::LDRBi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
+ else
+ Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
+ } else {
+ if (isZExt) {
+ Opc = ARM::LDRBi12;
+ } else {
+ Opc = ARM::LDRSB;
+ useAM3 = true;
+ }
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::i16:
- Opc = isThumb2 ? ARM::t2LDRHi12 : ARM::LDRH;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
+ else
+ Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
+ } else {
+ Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
+ useAM3 = true;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::i32:
- Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = ARM::t2LDRi8;
+ else
+ Opc = ARM::t2LDRi12;
+ } else {
+ Opc = ARM::LDRi12;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::f32:
@@ -977,13 +1022,15 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
- ResultReg = createResultReg(RC);
+ if (allocReg)
+ ResultReg = createResultReg(RC);
+ assert (ResultReg > 255 && "Expected an allocated virtual register.");
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
return true;
}
@@ -1009,6 +1056,7 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
unsigned StrOpc;
+ bool useAM3 = false;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
@@ -1022,13 +1070,35 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
SrcReg = Res;
} // Fallthrough here.
case MVT::i8:
- StrOpc = isThumb2 ? ARM::t2STRBi12 : ARM::STRBi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRBi8;
+ else
+ StrOpc = ARM::t2STRBi12;
+ } else {
+ StrOpc = ARM::STRBi12;
+ }
break;
case MVT::i16:
- StrOpc = isThumb2 ? ARM::t2STRHi12 : ARM::STRH;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRHi8;
+ else
+ StrOpc = ARM::t2STRHi12;
+ } else {
+ StrOpc = ARM::STRH;
+ useAM3 = true;
+ }
break;
case MVT::i32:
- StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRi8;
+ else
+ StrOpc = ARM::t2STRi12;
+ } else {
+ StrOpc = ARM::STRi12;
+ }
break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
@@ -1040,13 +1110,13 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true));
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
return true;
}
@@ -1231,25 +1301,25 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Check to see if the 2nd operand is a constant that we can encode directly
// in the compare.
- int EncodedImm = 0;
- bool EncodeImm = false;
+ int Imm = 0;
+ bool UseImm = false;
bool isNegativeImm = false;
if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
SrcVT == MVT::i1) {
const APInt &CIVal = ConstInt->getValue();
- EncodedImm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
- if (EncodedImm < 0) {
+ Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
+ if (Imm < 0) {
isNegativeImm = true;
- EncodedImm = -EncodedImm;
+ Imm = -Imm;
}
- EncodeImm = isThumb2 ? (ARM_AM::getT2SOImmVal(EncodedImm) != -1) :
- (ARM_AM::getSOImmVal(EncodedImm) != -1);
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
}
} else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
if (ConstFP->isZero() && !ConstFP->isNegative())
- EncodeImm = true;
+ UseImm = true;
}
unsigned CmpOpc;
@@ -1260,11 +1330,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// TODO: Verify compares.
case MVT::f32:
isICmp = false;
- CmpOpc = EncodeImm ? ARM::VCMPEZS : ARM::VCMPES;
+ CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
break;
case MVT::f64:
isICmp = false;
- CmpOpc = EncodeImm ? ARM::VCMPEZD : ARM::VCMPED;
+ CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
break;
case MVT::i1:
case MVT::i8:
@@ -1273,12 +1343,12 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Intentional fall-through.
case MVT::i32:
if (isThumb2) {
- if (!EncodeImm)
+ if (!UseImm)
CmpOpc = ARM::t2CMPrr;
else
CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
} else {
- if (!EncodeImm)
+ if (!UseImm)
CmpOpc = ARM::CMPrr;
else
CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
@@ -1290,7 +1360,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
if (SrcReg1 == 0) return false;
unsigned SrcReg2;
- if (!EncodeImm) {
+ if (!UseImm) {
SrcReg2 = getRegForValue(Src2Value);
if (SrcReg2 == 0) return false;
}
@@ -1301,14 +1371,14 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
if (ResultReg == 0) return false;
SrcReg1 = ResultReg;
- if (!EncodeImm) {
+ if (!UseImm) {
ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
if (ResultReg == 0) return false;
SrcReg2 = ResultReg;
}
}
- if (!EncodeImm) {
+ if (!UseImm) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CmpOpc))
.addReg(SrcReg1).addReg(SrcReg2));
@@ -1319,7 +1389,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
if (isICmp)
- MIB.addImm(EncodedImm);
+ MIB.addImm(Imm);
AddOptionalDefs(MIB);
}
@@ -1490,17 +1560,49 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
if (CondReg == 0) return false;
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- unsigned Op2Reg = getRegForValue(I->getOperand(2));
- if (Op2Reg == 0) return false;
- unsigned CmpOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
+ // Check to see if we can use an immediate in the conditional move.
+ int Imm = 0;
+ bool UseImm = false;
+ bool isNegativeImm = false;
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
+ assert (VT == MVT::i32 && "Expecting an i32.");
+ Imm = (int)ConstInt->getValue().getZExtValue();
+ if (Imm < 0) {
+ isNegativeImm = true;
+ Imm = ~Imm;
+ }
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
+ }
+
+ unsigned Op2Reg;
+ if (!UseImm) {
+ Op2Reg = getRegForValue(I->getOperand(2));
+ if (Op2Reg == 0) return false;
+ }
+
+ unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addImm(1));
+ .addReg(CondReg).addImm(0));
+
+ unsigned MovCCOpc;
+ if (!UseImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
+ } else {
+ if (!isNegativeImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
+ } else {
+ MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
+ }
+ }
unsigned ResultReg = createResultReg(RC);
- unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op1Reg).addReg(Op2Reg)
- .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ if (!UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1964,12 +2066,13 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
return true;
}
-bool ARMFastISel::SelectCall(const Instruction *I) {
+bool ARMFastISel::SelectCall(const Instruction *I,
+ const char *IntrMemName = 0) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = CI->getCalledValue();
- // Can't handle inline asm or worry about intrinsics yet.
- if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
+ // Can't handle inline asm.
+ if (isa<InlineAsm>(Callee)) return false;
// Only handle global variable Callees.
const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
@@ -2011,8 +2114,12 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
ArgFlags.reserve(CS.arg_size());
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
- unsigned Arg = getRegForValue(*i);
+ // If we're lowering a memory intrinsic instead of a regular call, skip the
+ // last two arguments, which shouldn't be passed to the underlying function.
+ if (IntrMemName && e-i <= 2)
+ break;
+ unsigned Arg = getRegForValue(*i);
if (Arg == 0)
return false;
ISD::ArgFlagsTy Flags;
@@ -2054,17 +2161,26 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(GV);
// Explicitly adding the predicate here.
- if(isThumb2)
- // Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc)))
- .addGlobalAddress(GV, 0, 0);
- else
+ if(isThumb2) {
// Explicitly adding the predicate here.
MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addGlobalAddress(GV, 0, 0));
-
+ TII.get(CallOpc)));
+ if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
+ else
+ MIB.addExternalSymbol(IntrMemName, 0);
+ } else {
+ if (!IntrMemName)
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, 0));
+ else
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addExternalSymbol(IntrMemName, 0));
+ }
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
@@ -2079,6 +2195,98 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
return true;
}
+bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
+ return Len <= 16;
+}
+
+bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
+ // Make sure we don't bloat code by inlining very large memcpy's.
+ if (!ARMIsMemCpySmall(Len))
+ return false;
+
+ // We don't care about alignment here since we just emit integer accesses.
+ while (Len) {
+ MVT VT;
+ if (Len >= 4)
+ VT = MVT::i32;
+ else if (Len >= 2)
+ VT = MVT::i16;
+ else {
+ assert(Len == 1);
+ VT = MVT::i8;
+ }
+
+ bool RV;
+ unsigned ResultReg;
+ RV = ARMEmitLoad(VT, ResultReg, Src);
+ assert (RV = true && "Should be able to handle this load.");
+ RV = ARMEmitStore(VT, ResultReg, Dest);
+ assert (RV = true && "Should be able to handle this store.");
+
+ unsigned Size = VT.getSizeInBits()/8;
+ Len -= Size;
+ Dest.Offset += Size;
+ Src.Offset += Size;
+ }
+
+ return true;
+}
+
+bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
+ // FIXME: Handle more intrinsics.
+ switch (I.getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ const MemTransferInst &MTI = cast<MemTransferInst>(I);
+ // Don't handle volatile.
+ if (MTI.isVolatile())
+ return false;
+
+ // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
+ // we would emit dead code because we don't currently handle memmoves.
+ bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
+ if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
+ // Small memcpy's are common enough that we want to do them without a call
+ // if possible.
+ uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
+ if (ARMIsMemCpySmall(Len)) {
+ Address Dest, Src;
+ if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
+ !ARMComputeAddress(MTI.getRawSource(), Src))
+ return false;
+ if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
+ return true;
+ }
+ }
+
+ if (!MTI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
+ return false;
+
+ const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
+ return SelectCall(&I, IntrMemName);
+ }
+ case Intrinsic::memset: {
+ const MemSetInst &MSI = cast<MemSetInst>(I);
+ // Don't handle volatile.
+ if (MSI.isVolatile())
+ return false;
+
+ if (!MSI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MSI.getDestAddressSpace() > 255)
+ return false;
+
+ return SelectCall(&I, "memset");
+ }
+ }
+ return false;
+}
+
bool ARMFastISel::SelectTrunc(const Instruction *I) {
// The high bits for a type smaller than the register size are assumed to be
// undefined.
@@ -2150,8 +2358,6 @@ unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
bool ARMFastISel::SelectIntExt(const Instruction *I) {
// On ARM, in general, integer casts don't involve legal types; this code
// handles promotable integers.
- // FIXME: We could save an instruction in many cases by special-casing
- // load instructions.
Type *DestTy = I->getType();
Value *Src = I->getOperand(0);
Type *SrcTy = Src->getType();
@@ -2202,6 +2408,8 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
case Instruction::SRem:
return SelectSRem(I);
case Instruction::Call:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ return SelectIntrinsicCall(*II);
return SelectCall(I);
case Instruction::Select:
return SelectSelect(I);
@@ -2217,6 +2425,52 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
return false;
}
+/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+/// vreg is being provided by the specified load instruction. If possible,
+/// try to fold the load as an operand to the instruction, returning true if
+/// successful.
+bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(LI->getType(), VT))
+ return false;
+
+ // Combine load followed by zero- or sign-extend.
+ // ldrb r1, [r0] ldrb r1, [r0]
+ // uxtb r2, r1 =>
+ // mov r3, r2 mov r3, r1
+ bool isZExt = true;
+ switch(MI->getOpcode()) {
+ default: return false;
+ case ARM::SXTH:
+ case ARM::t2SXTH:
+ isZExt = false;
+ case ARM::UXTH:
+ case ARM::t2UXTH:
+ if (VT != MVT::i16)
+ return false;
+ break;
+ case ARM::SXTB:
+ case ARM::t2SXTB:
+ isZExt = false;
+ case ARM::UXTB:
+ case ARM::t2UXTB:
+ if (VT != MVT::i8)
+ return false;
+ break;
+ }
+ // See if we can handle this address.
+ Address Addr;
+ if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
+
+ unsigned ResultReg = MI->getOperand(0).getReg();
+ if (!ARMEmitLoad(VT, ResultReg, Addr, isZExt, false))
+ return false;
+ MI->eraseFromParent();
+ return true;
+}
+
namespace llvm {
llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
// Completely untested on non-darwin.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 6f2b3b8..b55ef70 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -127,6 +127,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand);
if (VT.isInteger()) {
setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td
index c5bf607..06ee2c8 100644
--- a/lib/Target/ARM/ARMInstrFormats.td
+++ b/lib/Target/ARM/ARMInstrFormats.td
@@ -174,7 +174,7 @@ def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> {
// ARM special operands for disassembly only.
//
-def SetEndAsmOperand : AsmOperandClass {
+def SetEndAsmOperand : ImmAsmOperand {
let Name = "SetEndImm";
let ParserMethod = "parseSetEndImm";
}
@@ -820,7 +820,7 @@ class AMiscA1I<bits<8> opcod, bits<4> opc7_4, dag oops, dag iops,
}
// PKH instructions
-def PKHLSLAsmOperand : AsmOperandClass {
+def PKHLSLAsmOperand : ImmAsmOperand {
let Name = "PKHLSLImm";
let ParserMethod = "parsePKHLSLImm";
}
@@ -1991,3 +1991,59 @@ class NVDupLane<bits<4> op19_16, bit op6, dag oops, dag iops,
class NEONFPPat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [HasNEON,UseNEONForFP];
}
+
+// VFP/NEON Instruction aliases for type suffices.
+class VFPDataTypeInstAlias<string opc, string dt, string asm, dag Result> :
+ InstAlias<!strconcat(opc, dt, asm), Result>;
+multiclass VFPDT8ReqInstAlias<string opc, string asm, dag Result> {
+ def I8 : VFPDataTypeInstAlias<opc, ".i8", asm, Result>;
+ def S8 : VFPDataTypeInstAlias<opc, ".s8", asm, Result>;
+ def U8 : VFPDataTypeInstAlias<opc, ".u8", asm, Result>;
+ def F8 : VFPDataTypeInstAlias<opc, ".p8", asm, Result>;
+}
+// VFPDT8ReqInstAlias plus plain ".8"
+multiclass VFPDT8InstAlias<string opc, string asm, dag Result> {
+ def _8 : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
+ defm : VFPDT8ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT16ReqInstAlias<string opc, string asm, dag Result> {
+ def I16 : VFPDataTypeInstAlias<opc, ".i16", asm, Result>;
+ def S16 : VFPDataTypeInstAlias<opc, ".s16", asm, Result>;
+ def U16 : VFPDataTypeInstAlias<opc, ".u16", asm, Result>;
+ def F16 : VFPDataTypeInstAlias<opc, ".p16", asm, Result>;
+}
+// VFPDT16ReqInstAlias plus plain ".16"
+multiclass VFPDT16InstAlias<string opc, string asm, dag Result> {
+ def _16 : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
+ defm : VFPDT16ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT32ReqInstAlias<string opc, string asm, dag Result> {
+ def I32 : VFPDataTypeInstAlias<opc, ".i32", asm, Result>;
+ def S32 : VFPDataTypeInstAlias<opc, ".s32", asm, Result>;
+ def U32 : VFPDataTypeInstAlias<opc, ".u32", asm, Result>;
+ def F32 : VFPDataTypeInstAlias<opc, ".f32", asm, Result>;
+ def F : VFPDataTypeInstAlias<opc, ".f", asm, Result>;
+}
+// VFPDT32ReqInstAlias plus plain ".32"
+multiclass VFPDT32InstAlias<string opc, string asm, dag Result> {
+ def _32 : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
+ defm : VFPDT32ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT64ReqInstAlias<string opc, string asm, dag Result> {
+ def I64 : VFPDataTypeInstAlias<opc, ".i64", asm, Result>;
+ def S64 : VFPDataTypeInstAlias<opc, ".s64", asm, Result>;
+ def U64 : VFPDataTypeInstAlias<opc, ".u64", asm, Result>;
+ def F64 : VFPDataTypeInstAlias<opc, ".f64", asm, Result>;
+ def D : VFPDataTypeInstAlias<opc, ".d", asm, Result>;
+}
+// VFPDT64ReqInstAlias plus plain ".64"
+multiclass VFPDT64InstAlias<string opc, string asm, dag Result> {
+ def _64 : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+ defm : VFPDT64ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDTAnyInstAlias<string opc, string asm, dag Result> {
+ defm : VFPDT8InstAlias<opc, asm, Result>;
+ defm : VFPDT16InstAlias<opc, asm, Result>;
+ defm : VFPDT32InstAlias<opc, asm, Result>;
+ defm : VFPDT64InstAlias<opc, asm, Result>;
+}
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index af1f490..770703c 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -284,14 +284,6 @@ def lo16AllZero : PatLeaf<(i32 imm), [{
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
-/// imm0_65535 - An immediate is in the range [0.65535].
-def Imm0_65535AsmOperand: AsmOperandClass { let Name = "Imm0_65535"; }
-def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
- return Imm >= 0 && Imm < 65536;
-}]> {
- let ParserMatchClass = Imm0_65535AsmOperand;
-}
-
class BinOpWithFlagFrag<dag res> :
PatFrag<(ops node:$LHS, node:$RHS, node:$FLAG), res>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
@@ -326,6 +318,9 @@ def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
// Operand Definitions.
//
+// Immediate operands with a shared generic asm render method.
+class ImmAsmOperand : AsmOperandClass { let RenderMethod = "addImmOperands"; }
+
// Branch target.
// FIXME: rename brtarget to t2_brtarget
def brtarget : Operand<OtherVT> {
@@ -496,7 +491,7 @@ def shift_so_reg_imm : Operand<i32>, // reg reg imm
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits.
-def SOImmAsmOperand: AsmOperandClass { let Name = "ARMSOImm"; }
+def SOImmAsmOperand: ImmAsmOperand { let Name = "ARMSOImm"; }
def so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
@@ -521,7 +516,7 @@ def arm_i32imm : PatLeaf<(imm), [{
}]>;
/// imm0_7 predicate - Immediate in the range [0,7].
-def Imm0_7AsmOperand: AsmOperandClass { let Name = "Imm0_7"; }
+def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 8;
}]> {
@@ -529,7 +524,7 @@ def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_15 predicate - Immediate in the range [0,15].
-def Imm0_15AsmOperand: AsmOperandClass { let Name = "Imm0_15"; }
+def Imm0_15AsmOperand: ImmAsmOperand { let Name = "Imm0_15"; }
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
@@ -537,7 +532,7 @@ def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
-def Imm0_31AsmOperand: AsmOperandClass { let Name = "Imm0_31"; }
+def Imm0_31AsmOperand: ImmAsmOperand { let Name = "Imm0_31"; }
def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
@@ -545,7 +540,7 @@ def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_32 predicate - True if the 32-bit immediate is in the range [0,32].
-def Imm0_32AsmOperand: AsmOperandClass { let Name = "Imm0_32"; }
+def Imm0_32AsmOperand: ImmAsmOperand { let Name = "Imm0_32"; }
def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
@@ -553,25 +548,33 @@ def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_255 predicate - Immediate in the range [0,255].
-def Imm0_255AsmOperand : AsmOperandClass { let Name = "Imm0_255"; }
+def Imm0_255AsmOperand : ImmAsmOperand { let Name = "Imm0_255"; }
def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
let ParserMatchClass = Imm0_255AsmOperand;
}
+/// imm0_65535 - An immediate is in the range [0.65535].
+def Imm0_65535AsmOperand: ImmAsmOperand { let Name = "Imm0_65535"; }
+def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm >= 0 && Imm < 65536;
+}]> {
+ let ParserMatchClass = Imm0_65535AsmOperand;
+}
+
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
// FIXME: This really needs a Thumb version separate from the ARM version.
// While the range is the same, and can thus use the same match class,
// the encoding is different so it should have a different encoder method.
-def Imm0_65535ExprAsmOperand: AsmOperandClass { let Name = "Imm0_65535Expr"; }
+def Imm0_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm0_65535Expr"; }
def imm0_65535_expr : Operand<i32> {
let EncoderMethod = "getHiLo16ImmOpValue";
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
-def Imm24bitAsmOperand: AsmOperandClass { let Name = "Imm24bit"; }
+def Imm24bitAsmOperand: ImmAsmOperand { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm <= 0xffffff;
}]> {
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index d3c4486b..49cc254 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -1238,9 +1238,8 @@ class VST1D<bits<4> op7_4, string Dt>
let DecoderMethod = "DecodeVSTInstruction";
}
class VST1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b1010,op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2), IIC_VST1x2,
- "vst1", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
+ : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListTwoD:$Vd),
+ IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVSTInstruction";
@@ -5180,3 +5179,170 @@ def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
+
+
+//===----------------------------------------------------------------------===//
+// Assembler aliases
+//
+
+// VAND/VEOR/VORR accept but do not require a type suffix.
+defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+
+// VLD1 requires a size suffix, but also accepts type specific variants.
+// Load one D register.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+
+// Load two D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q8 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q16 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q32 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q64 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q8wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q16wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q32wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q64wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+
+// Load three D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+
+
+// Load four D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+
+// VST1 requires a size suffix, but also accepts type specific variants.
+// Store one D register.
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d8 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d16 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d32 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d64 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d8wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d16wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d32wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d64wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+
+// Store two D registers.
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q8 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q16 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q32 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q64 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q8wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q16wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q32wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q64wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+
+// FIXME: The three and four register VST1 instructions haven't been moved
+// to the VecList* encoding yet, so we can't do assembly parsing support
+// for them. Uncomment these when that happens.
+// Load three D registers.
+//defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d8T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d16T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d32T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d64T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+
+// Load four D registers.
+//defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d8Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d16Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d32Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d64Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 0a28226..03077c0 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -65,7 +65,7 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
// t2_so_imm - Match a 32-bit immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
// immediate splatted into multiple bytes of the word.
-def t2_so_imm_asmoperand : AsmOperandClass { let Name = "T2SOImm"; }
+def t2_so_imm_asmoperand : ImmAsmOperand { let Name = "T2SOImm"; }
def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getT2SOImmVal(Imm) != -1;
}]> {
diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td
index e746cf2..488c508 100644
--- a/lib/Target/ARM/ARMInstrVFP.td
+++ b/lib/Target/ARM/ARMInstrVFP.td
@@ -69,11 +69,11 @@ def vfp_f64imm : Operand<f64>,
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
- IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
+ IIC_fpLoad64, "vldr", "\t$Dd, $addr",
[(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
- IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
+ IIC_fpLoad32, "vldr", "\t$Sd, $addr",
[(set SPR:$Sd, (load addrmode5:$addr))]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -83,11 +83,11 @@ def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
- IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
+ IIC_fpStore64, "vstr", "\t$Dd, $addr",
[(store (f64 DPR:$Dd), addrmode5:$addr)]>;
def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
- IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
+ IIC_fpStore32, "vstr", "\t$Sd, $addr",
[(store SPR:$Sd, addrmode5:$addr)]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -1163,3 +1163,12 @@ def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
+// VLDR/VSTR accept an optional type suffix.
+defm : VFPDT32InstAlias<"vldr${p}", "$Sd, $addr",
+ (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT32InstAlias<"vstr${p}", "$Sd, $addr",
+ (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT64InstAlias<"vldr${p}", "$Dd, $addr",
+ (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT64InstAlias<"vstr${p}", "$Dd, $addr",
+ (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 4c3be89..c8728f4 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -32,6 +32,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Debug.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -1504,6 +1505,23 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
return AddedRegPressure.size() <= MemRegs.size() * 2;
}
+
+/// Copy Op0 and Op1 operands into a new array assigned to MI.
+static void concatenateMemOperands(MachineInstr *MI, MachineInstr *Op0,
+ MachineInstr *Op1) {
+ assert(MI->memoperands_empty() && "expected a new machineinstr");
+ size_t numMemRefs = (Op0->memoperands_end() - Op0->memoperands_begin())
+ + (Op1->memoperands_end() - Op1->memoperands_begin());
+
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineSDNode::mmo_iterator MemBegin = MF->allocateMemRefsArray(numMemRefs);
+ MachineSDNode::mmo_iterator MemEnd =
+ std::copy(Op0->memoperands_begin(), Op0->memoperands_end(), MemBegin);
+ MemEnd =
+ std::copy(Op1->memoperands_begin(), Op1->memoperands_end(), MemEnd);
+ MI->setMemRefs(MemBegin, MemEnd);
+}
+
bool
ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
DebugLoc &dl,
@@ -1698,6 +1716,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
@@ -1710,6 +1730,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
MBB->erase(Op0);
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index e782975..1d66d12 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -714,7 +714,7 @@ public:
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
bool isPostIdxReg() const {
- return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift;
+ return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
if (!isMemory())
@@ -1101,7 +1101,8 @@ public:
void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!");
+ assert(isRegShiftedReg() &&
+ "addRegShiftedRegOperands() on non RegShiftedReg!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
Inst.addOperand(MCOperand::CreateImm(
@@ -1110,7 +1111,8 @@ public:
void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
- assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!");
+ assert(isRegShiftedImm() &&
+ "addRegShiftedImmOperands() on non RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
Inst.addOperand(MCOperand::CreateImm(
ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
@@ -1189,26 +1191,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
}
- void addImm0_255Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_7Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_15Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_31Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addImm1_16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate-1, and we store in the instruction
@@ -1225,26 +1207,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
}
- void addImm0_32Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm24bitOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate, except for 32, which encodes as
@@ -1254,11 +1216,6 @@ public:
Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
}
- void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// An ASR value of 32 encodes as 0, so that's how we want to add it to
@@ -1268,16 +1225,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
}
- void addARMSOImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addT2SOImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a t2_so_imm, but we have its bitwise
@@ -1294,11 +1241,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
}
- void addSetEndImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
@@ -1486,8 +1428,9 @@ public:
void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
- Memory.ShiftImm, Memory.ShiftType);
+ unsigned Val =
+ ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
+ Memory.ShiftImm, Memory.ShiftType);
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::CreateImm(Val));
@@ -2410,6 +2353,29 @@ static unsigned getNextRegister(unsigned Reg) {
}
}
+// Return the low-subreg of a given Q register.
+static unsigned getDRegFromQReg(unsigned QReg) {
+ switch (QReg) {
+ default: llvm_unreachable("expected a Q register!");
+ case ARM::Q0: return ARM::D0;
+ case ARM::Q1: return ARM::D2;
+ case ARM::Q2: return ARM::D4;
+ case ARM::Q3: return ARM::D6;
+ case ARM::Q4: return ARM::D8;
+ case ARM::Q5: return ARM::D10;
+ case ARM::Q6: return ARM::D12;
+ case ARM::Q7: return ARM::D14;
+ case ARM::Q8: return ARM::D16;
+ case ARM::Q9: return ARM::D19;
+ case ARM::Q10: return ARM::D20;
+ case ARM::Q11: return ARM::D22;
+ case ARM::Q12: return ARM::D24;
+ case ARM::Q13: return ARM::D26;
+ case ARM::Q14: return ARM::D28;
+ case ARM::Q15: return ARM::D30;
+ }
+}
+
/// Parse a register list.
bool ARMAsmParser::
parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -2425,6 +2391,16 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (Reg == -1)
return Error(RegLoc, "register expected");
+ // The reglist instructions have at most 16 registers, so reserve
+ // space for that many.
+ SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
+
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ ++Reg;
+ }
const MCRegisterClass *RC;
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
@@ -2435,10 +2411,7 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
else
return Error(RegLoc, "invalid register in register list");
- // The reglist instructions have at most 16 registers, so reserve
- // space for that many.
- SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
- // Store the first register.
+ // Store the register.
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
// This starts immediately after the first register token in the list,
@@ -2452,6 +2425,9 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
int EndReg = tryParseRegister();
if (EndReg == -1)
return Error(EndLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
+ EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
@@ -2476,6 +2452,12 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ bool isQReg = false;
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ isQReg = true;
+ }
// The register must be in the same register class as the first.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
@@ -2489,6 +2471,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ if (isQReg)
+ Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
}
SMLoc E = Parser.getTok().getLoc();
@@ -2500,29 +2484,6 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return false;
}
-// Return the low-subreg of a given Q register.
-static unsigned getDRegFromQReg(unsigned QReg) {
- switch (QReg) {
- default: llvm_unreachable("expected a Q register!");
- case ARM::Q0: return ARM::D0;
- case ARM::Q1: return ARM::D2;
- case ARM::Q2: return ARM::D4;
- case ARM::Q3: return ARM::D6;
- case ARM::Q4: return ARM::D8;
- case ARM::Q5: return ARM::D10;
- case ARM::Q6: return ARM::D12;
- case ARM::Q7: return ARM::D14;
- case ARM::Q8: return ARM::D16;
- case ARM::Q9: return ARM::D19;
- case ARM::Q10: return ARM::D20;
- case ARM::Q11: return ARM::D22;
- case ARM::Q12: return ARM::D24;
- case ARM::Q13: return ARM::D26;
- case ARM::Q14: return ARM::D28;
- case ARM::Q15: return ARM::D30;
- }
-}
-
// parse a vector register list
ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -4161,6 +4122,22 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
return false;
}
+static bool isDataTypeToken(StringRef Tok) {
+ return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
+ Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
+ Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
+ Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
+ Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
+ Tok == ".f" || Tok == ".d";
+}
+
+// FIXME: This bit should probably be handled via an explicit match class
+// in the .td files that matches the suffix instead of having it be
+// a literal string token the way it is now.
+static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
+ return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
+}
+
/// Parse an arm instruction mnemonic followed by its operands.
bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -4265,9 +4242,12 @@ bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
Next = Name.find('.', Start + 1);
StringRef ExtraToken = Name.slice(Start, Next);
- // For now, we're only parsing Thumb1 (for the most part), so
- // just ignore ".n" qualifiers. We'll use them to restrict
- // matching when we do Thumb2.
+ // Some NEON instructions have an optional datatype suffix that is
+ // completely ignored. Check for that.
+ if (isDataTypeToken(ExtraToken) &&
+ doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
+ continue;
+
if (ExtraToken != ".n") {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 6927d2d..0b9b5d0 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -10,13 +10,13 @@
#define DEBUG_TYPE "arm-disassembler"
#include "ARM.h"
-#include "ARMRegisterInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
@@ -2267,10 +2267,6 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
// Second input register
switch (Inst.getOpcode()) {
- case ARM::VST1q8:
- case ARM::VST1q16:
- case ARM::VST1q32:
- case ARM::VST1q64:
case ARM::VST1d8T:
case ARM::VST1d16T:
case ARM::VST1d32T:
diff --git a/lib/Target/ARM/Disassembler/LLVMBuild.txt b/lib/Target/ARM/Disassembler/LLVMBuild.txt
index dff57b4..baa9bc3 100644
--- a/lib/Target/ARM/Disassembler/LLVMBuild.txt
+++ b/lib/Target/ARM/Disassembler/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = ARMDisassembler
parent = ARM
-required_libraries = ARMDesc ARMInfo MC Support
+required_libraries = ARMCodeGen ARMDesc ARMInfo MC Support
add_to_library_groups = ARM
diff --git a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
index adc37cb..f529314 100644
--- a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
@@ -12,8 +12,8 @@ add_dependencies(LLVMARMDesc ARMCommonTableGen)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..)
add_llvm_library_dependencies(LLVMARMDesc
- LLVMARMInfo
LLVMARMAsmPrinter
+ LLVMARMInfo
LLVMMC
LLVMSupport
)
diff --git a/lib/Target/ARM/TargetInfo/LLVMBuild.txt b/lib/Target/ARM/TargetInfo/LLVMBuild.txt
index 7d7504f..046c1fc 100644
--- a/lib/Target/ARM/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/ARM/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = ARMInfo
parent = ARM
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = ARM
diff --git a/lib/Target/CBackend/TargetInfo/LLVMBuild.txt b/lib/Target/CBackend/TargetInfo/LLVMBuild.txt
index 943fe2d..35752b7 100644
--- a/lib/Target/CBackend/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/CBackend/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = CBackendInfo
parent = CBackend
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = CBackend
diff --git a/lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt b/lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt
index b5147ae..abc44a2 100644
--- a/lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt
+++ b/lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = CellSPUDesc
parent = CellSPU
-required_libraries = CellSPUInfo MC Support
+required_libraries = CellSPUInfo MC
add_to_library_groups = CellSPU
diff --git a/lib/Target/CellSPU/TargetInfo/LLVMBuild.txt b/lib/Target/CellSPU/TargetInfo/LLVMBuild.txt
index 7525359..0710cc3 100644
--- a/lib/Target/CellSPU/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/CellSPU/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = CellSPUInfo
parent = CellSPU
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = CellSPU
diff --git a/lib/Target/CppBackend/TargetInfo/CMakeLists.txt b/lib/Target/CppBackend/TargetInfo/CMakeLists.txt
index 7165d8f..738b215 100644
--- a/lib/Target/CppBackend/TargetInfo/CMakeLists.txt
+++ b/lib/Target/CppBackend/TargetInfo/CMakeLists.txt
@@ -6,5 +6,6 @@ add_llvm_library(LLVMCppBackendInfo
add_llvm_library_dependencies(LLVMCppBackendInfo
LLVMMC
+ LLVMSupport
LLVMTarget
)
diff --git a/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt b/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt
index b130fee..67a23ba 100644
--- a/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = CppBackendInfo
parent = CppBackend
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = CppBackend
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index 60f5230..358cbc8 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -15,39 +15,39 @@
;
;===------------------------------------------------------------------------===;
-[component_0]
-type = Library
-name = Target
-parent = Libraries
-required_libraries = Core MC Support
-
; This is a special group whose required libraries are extended (by llvm-build)
-; with every built target, which makes it easy for tools to include every
-; target.
-[component_1]
+; with the best execution engine (the native JIT, if available, or the
+; interpreter).
+[component_0]
type = LibraryGroup
-name = all-targets
+name = Engine
parent = Libraries
; This is a special group whose required libraries are extended (by llvm-build)
; with the configured native target, if any.
-[component_2]
+[component_1]
type = LibraryGroup
name = Native
parent = Libraries
; This is a special group whose required libraries are extended (by llvm-build)
; with the configured native code generator, if any.
-[component_3]
+[component_2]
type = LibraryGroup
name = NativeCodeGen
parent = Libraries
+; The component for the actual target library itself.
+[component_3]
+type = Library
+name = Target
+parent = Libraries
+required_libraries = Core MC Support
+
; This is a special group whose required libraries are extended (by llvm-build)
-; with the best execution engine (the native JIT, if available, or the
-; interpreter).
+; with every built target, which makes it easy for tools to include every
+; target.
[component_4]
type = LibraryGroup
-name = Engine
+name = all-targets
parent = Libraries
-
diff --git a/lib/Target/MBlaze/Disassembler/CMakeLists.txt b/lib/Target/MBlaze/Disassembler/CMakeLists.txt
index 112c64c..e0a53ee 100644
--- a/lib/Target/MBlaze/Disassembler/CMakeLists.txt
+++ b/lib/Target/MBlaze/Disassembler/CMakeLists.txt
@@ -14,7 +14,6 @@ set_property(
endif()
add_llvm_library_dependencies(LLVMMBlazeDisassembler
- LLVMMBlazeCodeGen
LLVMMBlazeDesc
LLVMMBlazeInfo
LLVMMC
diff --git a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt b/lib/Target/MBlaze/Disassembler/LLVMBuild.txt
index b2b3a3a..c5c4f80 100644
--- a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt
+++ b/lib/Target/MBlaze/Disassembler/LLVMBuild.txt
@@ -19,12 +19,6 @@
type = Library
name = MBlazeDisassembler
parent = MBlaze
-; Strictly speaking, we probably shouldn't have a dependency on
-; MBlazeCodeGen. However, given the current factoring we end up including
-; MBlazeGenRegisterInfo.inc in the disassembler. Those generated headers end up
-; referencing external variables through GPRRegClass, SPRRegClass, and
-; CRCRegClass. These aren't actually used, but some compilers may generate
-; references to them.
-required_libraries = MBlazeCodeGen MBlazeDesc MBlazeInfo MC Support
+required_libraries = MBlazeDesc MBlazeInfo MC Support
add_to_library_groups = MBlaze
diff --git a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
index c3a3833..3087317 100644
--- a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
+++ b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
@@ -13,13 +13,12 @@
//===----------------------------------------------------------------------===//
#include "MBlaze.h"
-#include "MBlazeInstrInfo.h"
#include "MBlazeDisassembler.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/TargetRegistry.h"
diff --git a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt b/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt
index 488c2c7..938a1d9 100644
--- a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = MBlazeInfo
parent = MBlaze
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = MBlaze
diff --git a/lib/Target/MSP430/MCTargetDesc/CMakeLists.txt b/lib/Target/MSP430/MCTargetDesc/CMakeLists.txt
index 04bd03e..c2dd448 100644
--- a/lib/Target/MSP430/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/MSP430/MCTargetDesc/CMakeLists.txt
@@ -7,6 +7,8 @@ add_llvm_library_dependencies(LLVMMSP430Desc
LLVMMC
LLVMMSP430AsmPrinter
LLVMMSP430Info
+ LLVMSupport
+ LLVMTarget
)
add_dependencies(LLVMMSP430Desc MSP430CommonTableGen)
diff --git a/lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt b/lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt
index 5d41082..1890e9d 100644
--- a/lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt
+++ b/lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = MSP430Desc
parent = MSP430
-required_libraries = MC MSP430AsmPrinter MSP430Info Support
+required_libraries = MC MSP430AsmPrinter MSP430Info Support Target
add_to_library_groups = MSP430
diff --git a/lib/Target/MSP430/TargetInfo/LLVMBuild.txt b/lib/Target/MSP430/TargetInfo/LLVMBuild.txt
index 3bcc826..a745ea8 100644
--- a/lib/Target/MSP430/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/MSP430/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = MSP430Info
parent = MSP430
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = MSP430
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index e81ba6f..53656d4d 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_TARGET_DEFINITIONS Mips.td)
tablegen(LLVM MipsGenRegisterInfo.inc -gen-register-info)
tablegen(LLVM MipsGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM MipsGenCodeEmitter.inc -gen-emitter)
+tablegen(LLVM MipsGenMCCodeEmitter.inc -gen-emitter -mc-emitter)
tablegen(LLVM MipsGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM MipsGenDAGISel.inc -gen-dag-isel)
tablegen(LLVM MipsGenCallingConv.inc -gen-callingconv)
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index 4a815f3..f544d39 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -118,7 +118,10 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
OS << Offset;
}
- if (Kind != MCSymbolRefExpr::VK_None)
+ if ((Kind == MCSymbolRefExpr::VK_Mips_GPOFF_HI) ||
+ (Kind == MCSymbolRefExpr::VK_Mips_GPOFF_LO))
+ OS << ")))";
+ else if (Kind != MCSymbolRefExpr::VK_None)
OS << ')';
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index f190ec4..4f017d0 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -1,5 +1,21 @@
+//===-- MipsASMBackend.cpp - ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MipsAsmBackend and MipsELFObjectWriter classes.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include "MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
@@ -8,7 +24,6 @@
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ELF.h"
@@ -16,7 +31,50 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+
+ // Add/subtract and shift
+ switch (Kind) {
+ default:
+ break;
+ case Mips::fixup_Mips_PC16:
+ // So far we are only using this type for branches.
+ // For branches we start 1 instruction after the branch
+ // so the displacement will be one instruction size less.
+ Value -= 4;
+ // The displacement is then divided by 4 to give us an 18 bit
+ // address range.
+ Value >>= 2;
+ break;
+ case Mips::fixup_Mips_26:
+ // So far we are only using this type for jumps.
+ // The displacement is then divided by 4 to give us an 28 bit
+ // address range.
+ Value >>= 2;
+ break;
+ }
+
+ // Mask off value for placement as an operand
+ switch (Kind) {
+ default:
+ break;
+ case FK_Data_4:
+ Value &= 0xffffffff;
+ break;
+ case Mips::fixup_Mips_26:
+ Value &= 0x03ffffff;
+ break;
+ case Mips::fixup_Mips_LO16:
+ case Mips::fixup_Mips_PC16:
+ Value &= 0x0000ffff;
+ break;
+ }
+
+ return Value;
+}
+
namespace {
+
class MipsELFObjectWriter : public MCELFObjectTargetWriter {
public:
MipsELFObjectWriter(bool is64Bit, Triple::OSType OSType, uint16_t EMachine,
@@ -27,18 +85,75 @@ public:
class MipsAsmBackend : public MCAsmBackend {
public:
- MipsAsmBackend(const Target &T)
- : MCAsmBackend() {}
-
- unsigned getNumFixupKinds() const {
- return 1; //tbd
- }
+ MipsAsmBackend(const Target &T) : MCAsmBackend() {}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
+ unsigned Kind = (unsigned)Fixup.getKind();
+ Value = adjustFixupValue(Kind, Value);
+
+ if (!Value)
+ return; // Doesn't change encoding.
+
+ unsigned Offset = Fixup.getOffset();
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case Mips::fixup_Mips_GOT16: // This will be fixed up at link time
+ break;
+ case FK_Data_4:
+ case Mips::fixup_Mips_26:
+ case Mips::fixup_Mips_LO16:
+ case Mips::fixup_Mips_PC16:
+ // For each byte of the fragment that the fixup touches, mask i
+ // the fixup value. The Value has been "split up" into the appr
+ // bitfields above.
+ for (unsigned i = 0; i != 4; ++i) // FIXME - Need to support 2 and 8 bytes
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ break;
+ }
+ }
+
+ unsigned getNumFixupKinds() const { return Mips::NumTargetFixupKinds; }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] = {
+ // This table *must* be in the order that the fixup_* kinds a
+ // MipsFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_Mips_NONE", 0, 0, 0 },
+ { "fixup_Mips_16", 0, 16, 0 },
+ { "fixup_Mips_32", 0, 32, 0 },
+ { "fixup_Mips_REL32", 0, 32, 0 },
+ { "fixup_Mips_26", 0, 26, 0 },
+ { "fixup_Mips_HI16", 0, 16, 0 },
+ { "fixup_Mips_LO16", 0, 16, 0 },
+ { "fixup_Mips_GPREL16", 0, 16, 0 },
+ { "fixup_Mips_LITERAL", 0, 16, 0 },
+ { "fixup_Mips_GOT16", 0, 16, 0 },
+ { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_CALL16", 0, 16, 0 },
+ { "fixup_Mips_GPREL32", 0, 32, 0 },
+ { "fixup_Mips_SHIFT5", 6, 5, 0 },
+ { "fixup_Mips_SHIFT6", 6, 5, 0 },
+ { "fixup_Mips_64", 0, 64, 0 },
+ { "fixup_Mips_TLSGD", 0, 16, 0 },
+ { "fixup_Mips_GOTTPREL", 0, 16, 0 },
+ { "fixup_Mips_TPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_TPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
}
/// @name Target Relaxation Interfaces
@@ -52,24 +167,24 @@ public:
return false;
}
- /// RelaxInstruction - Relax the instruction in the given fragment to the next
- /// wider instruction.
+ /// RelaxInstruction - Relax the instruction in the given fragment
+ /// to the next wider instruction.
///
- /// \param Inst - The instruction to relax, which may be the same as the
- /// output.
+ /// \param Inst - The instruction to relax, which may be the same
+ /// as the output.
/// \parm Res [output] - On return, the relaxed instruction.
void RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
}
/// @}
- /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
- /// output. If the target cannot generate such a sequence, it should return an
- /// error.
+ /// WriteNopData - Write an (optimal) nop sequence of Count bytes
+ /// to the given output. If the target cannot generate such a sequence,
+ /// it should return an error.
///
/// \return - True on success.
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
- return false;
+ return true;
}
};
@@ -106,7 +221,7 @@ public:
return new MipsELFObjectWriter(false, OSType, ELF::EM_MIPS, false);
}
};
-}
+} // namespace
MCAsmBackend *llvm::createMipsAsmBackend(const Target &T, StringRef TT) {
Triple TheTriple(TT);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index f7a6fa9..cebfde0 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -19,6 +19,88 @@
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
+
+/// MipsII - This namespace holds all of the target specific flags that
+/// instruction info tracks.
+///
+namespace MipsII {
+ /// Target Operand Flag enum.
+ enum TOF {
+ //===------------------------------------------------------------------===//
+ // Mips Specific MachineOperand flags.
+
+ MO_NO_FLAG,
+
+ /// MO_GOT - Represents the offset into the global offset table at which
+ /// the address the relocation entry symbol resides during execution.
+ MO_GOT,
+
+ /// MO_GOT_CALL - Represents the offset into the global offset table at
+ /// which the address of a call site relocation entry symbol resides
+ /// during execution. This is different from the above since this flag
+ /// can only be present in call instructions.
+ MO_GOT_CALL,
+
+ /// MO_GPREL - Represents the offset from the current gp value to be used
+ /// for the relocatable object file being produced.
+ MO_GPREL,
+
+ /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
+ /// address.
+ MO_ABS_HI,
+ MO_ABS_LO,
+
+ /// MO_TLSGD - Represents the offset into the global offset table at which
+ // the module ID and TSL block offset reside during execution (General
+ // Dynamic TLS).
+ MO_TLSGD,
+
+ /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
+ // Exec TLS).
+ MO_GOTTPREL,
+
+ /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
+ // the thread pointer (Local Exec TLS).
+ MO_TPREL_HI,
+ MO_TPREL_LO,
+
+ // N32/64 Flags.
+ MO_GPOFF_HI,
+ MO_GPOFF_LO,
+ MO_GOT_DISP,
+ MO_GOT_PAGE,
+ MO_GOT_OFST
+ };
+
+ enum {
+ //===------------------------------------------------------------------===//
+ // Instruction encodings. These are the standard/most common forms for
+ // Mips instructions.
+ //
+
+ // Pseudo - This represents an instruction that is a pseudo instruction
+ // or one that has not been implemented yet. It is illegal to code generate
+ // it, but tolerated for intermediate implementation stages.
+ Pseudo = 0,
+
+ /// FrmR - This form is for instructions of the format R.
+ FrmR = 1,
+ /// FrmI - This form is for instructions of the format I.
+ FrmI = 2,
+ /// FrmJ - This form is for instructions of the format J.
+ FrmJ = 3,
+ /// FrmFR - This form is for instructions of the format FR.
+ FrmFR = 4,
+ /// FrmFI - This form is for instructions of the format FI.
+ FrmFI = 5,
+ /// FrmOther - This form is for instructions that have no specific format.
+ FrmOther = 6,
+
+ FormMask = 15
+ };
+}
+
+
/// getMipsRegisterNumbering - Given the enum value for some register,
/// return the number that it corresponds to.
inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index d66de23..1115fec 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -12,16 +12,18 @@
//===----------------------------------------------------------------------===//
//
#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
-#include "MCTargetDesc/MipsMCTargetDesc.h"
using namespace llvm;
@@ -31,22 +33,217 @@ class MipsMCCodeEmitter : public MCCodeEmitter {
void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
+ MCContext &Ctx;
public:
MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii), STI(sti) {}
+ MCContext &ctx) : MCII(mcii), STI(sti) , Ctx(ctx) {}
~MipsMCCodeEmitter() {}
- void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ void EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
+ }
+
+ void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
+ // Output the instruction encoding in little endian byte order.
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, OS);
+ Val >>= 8;
+ }
}
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ unsigned getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchJumpOpValue - Return binary encoding of the jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchTargetOpValue - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machin
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
}; // class MipsMCCodeEmitter
} // namespace
MCCodeEmitter *llvm::createMipsMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
- MCContext &Ctx) {
+ MCContext &Ctx)
+{
return new MipsMCCodeEmitter(MCII, STI, Ctx);
}
+
+/// EncodeInstruction - Emit the instruction.
+/// Size the instruction (currently only 4 bytes
+void MipsMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const
+{
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+
+ // Check for unimplemented opcodes.
+ // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0
+ // so we have to special check for them.
+ unsigned Opcode = MI.getOpcode();
+ if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary)
+ llvm_unreachable("unimplemented opcode in EncodeInstruction()");
+
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+
+ // Pseudo instructions don't get encoded and shouldn't be here
+ // in the first place!
+ if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo)
+ llvm_unreachable("Pseudo opcode found in EncodeInstruction()");
+
+ // For now all instructions are 4 bytes
+ int Size = 4; // FIXME: Have Desc.getSize() return the correct value!
+
+ EmitInstruction(Binary, Size, OS);
+}
+
+/// getBranchTargetOpValue - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_PC16)));
+ return 0;
+}
+
+/// getJumpTargetOpValue - Return binary encoding of the jump
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_26)));
+ return 0;
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ unsigned RegNo = getMipsRegisterNumbering(Reg);
+ return RegNo;
+ } else if (MO.isImm()) {
+ return static_cast<unsigned>(MO.getImm());
+ } else if (MO.isFPImm()) {
+ return static_cast<unsigned>(APFloat(MO.getFPImm())
+ .bitcastToAPInt().getHiBits(32).getLimitedValue());
+ } else if (MO.isExpr()) {
+ const MCExpr *Expr = MO.getExpr();
+ MCExpr::ExprKind Kind = Expr->getKind();
+ if (Kind == MCExpr::SymbolRef) {
+ Mips::Fixups FixupKind = Mips::fixup_Mips_NONE;
+ MCSymbolRefExpr::VariantKind SymRefKind =
+ cast<MCSymbolRefExpr>(Expr)->getKind();
+ switch(SymRefKind) {
+ case MCSymbolRefExpr::VK_Mips_GPREL:
+ FixupKind = Mips::fixup_Mips_GPREL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_CALL:
+ FixupKind = Mips::fixup_Mips_CALL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT:
+ FixupKind = Mips::fixup_Mips_GOT16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ FixupKind = Mips::fixup_Mips_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ FixupKind = Mips::fixup_Mips_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSGD:
+ FixupKind = Mips::fixup_Mips_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL:
+ FixupKind = Mips::fixup_Mips_GOTTPREL;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ FixupKind = Mips::fixup_Mips_TPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO:
+ FixupKind = Mips::fixup_Mips_TPREL_LO;
+ break;
+ default:
+ return 0;
+ } // switch
+ Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind)));
+ } // if SymbolRef
+ // All of the information is in the fixup.
+ return 0;
+ }
+ llvm_unreachable("Unable to encode MCOperand!");
+ // Not reached
+ return 0;
+}
+
+/// getMemEncoding - Return binary encoding of memory related operand.
+/// If the offset operand requires relocation, record the relocation.
+unsigned
+MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+
+ return (OffBits & 0xFFFF) | RegBits;
+}
+
+unsigned
+MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // FIXME: implement
+ return 0;
+}
+
+unsigned
+MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // FIXME: implement
+ return 0;
+}
+
+#include "MipsGenMCCodeEmitter.inc"
+
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 1f9e3dd..e6040e4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MipsMCTargetDesc.h"
#include "MipsMCAsmInfo.h"
+#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/MC/MCCodeGenInfo.h"
@@ -140,6 +140,9 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterMCAsmBackend(TheMips64Target, createMipsAsmBackend);
TargetRegistry::RegisterMCAsmBackend(TheMips64elTarget, createMipsAsmBackend);
+ TargetRegistry::RegisterMCCodeEmitter(TheMipsTarget, createMipsMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheMipselTarget, createMipsMCCodeEmitter);
+
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(TheMipsTarget,
createMipsMCSubtargetInfo);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 7a0042a..fc43d2d 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -16,12 +16,14 @@
namespace llvm {
class MCAsmBackend;
-class MCInstrInfo;
class MCCodeEmitter;
class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class StringRef;
class Target;
+class raw_ostream;
extern Target TheMipsTarget;
extern Target TheMipselTarget;
@@ -33,6 +35,7 @@ MCCodeEmitter *createMipsMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createMipsAsmBackend(const Target &T, StringRef TT);
+
} // End llvm namespace
// Defines symbolic names for Mips registers. This defines a mapping from
diff --git a/lib/Target/Mips/Makefile b/lib/Target/Mips/Makefile
index d72693c..94f7c18 100644
--- a/lib/Target/Mips/Makefile
+++ b/lib/Target/Mips/Makefile
@@ -15,7 +15,7 @@ TARGET = Mips
BUILT_SOURCES = MipsGenRegisterInfo.inc MipsGenInstrInfo.inc \
MipsGenAsmWriter.inc MipsGenCodeEmitter.inc \
MipsGenDAGISel.inc MipsGenCallingConv.inc \
- MipsGenSubtargetInfo.inc
+ MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc
DIRS = InstPrinter TargetInfo MCTargetDesc
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index 9a769e8..3c97241 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -51,12 +51,58 @@ class shift_rotate_imm64_32<bits<6> func, bits<5> isRotate, string instr_asm,
shift_rotate_imm<func, isRotate, instr_asm, OpNode, imm32_63, shamt,
CPU64Regs>;
+// Jump and Link (Call)
+let isCall=1, hasDelaySlot=1,
+ // All calls clobber the non-callee saved registers...
+ Defs = [AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ K0, K1, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9], Uses = [GP] in {
+ class JumpLink64<bits<6> op, string instr_asm>:
+ FJ<op, (outs), (ins calltarget64:$target, variable_ops),
+ !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
+ IIBranch>;
+
+ class JumpLinkReg64<bits<6> op, bits<6> func, string instr_asm>:
+ FR<op, func, (outs), (ins CPU64Regs:$rs, variable_ops),
+ !strconcat(instr_asm, "\t$rs"),
+ [(MipsJmpLink CPU64Regs:$rs)], IIBranch> {
+ let rt = 0;
+ let rd = 31;
+ let shamt = 0;
+ }
+
+ class BranchLink64<string instr_asm>:
+ FI<0x1, (outs), (ins CPU64Regs:$rs, brtarget:$imm16, variable_ops),
+ !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch>;
+}
+
// Mul, Div
class Mult64<bits<6> func, string instr_asm, InstrItinClass itin>:
Mult<func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
Div<op, func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
+multiclass Atomic2Ops64<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]>;
+}
+
+multiclass AtomicCmpSwap64<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>,
+ Requires<[IsN64]>;
+}
+
+let usesCustomInserter = 1, Predicates = [HasMips64] in {
+ defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">;
+ defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">;
+ defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64, "load_and_64">;
+ defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64, "load_or_64">;
+ defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64, "load_xor_64">;
+ defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64, "load_nand_64">;
+ defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64, "swap_64">;
+ defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64, "64">;
+}
+
//===----------------------------------------------------------------------===//
// Instruction definition
//===----------------------------------------------------------------------===//
@@ -122,7 +168,15 @@ defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>;
defm ULD : LoadM64<0x37, "uld", load_u, 1>;
defm USD : StoreM64<0x3f, "usd", store_u, 1>;
+/// Load-linked, Store-conditional
+def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>;
+def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]>;
+def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>;
+def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]>;
+
/// Jump and Branch Instructions
+def JAL64 : JumpLink64<0x03, "jal">;
+def JALR64 : JumpLinkReg64<0x00, 0x09, "jalr">;
def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>;
def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>;
def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>;
@@ -145,6 +199,12 @@ def MFLO64 : MoveFromLOHI<0x12, "mflo", CPU64Regs, [LO64]>;
def DCLZ : CountLeading0<0x24, "dclz", CPU64Regs>;
def DCLO : CountLeading1<0x25, "dclo", CPU64Regs>;
+def LEA_ADDiu64 : EffectiveAddress<"addiu\t$rt, $addr", CPU64Regs, mem_ea_64>;
+
+let Uses = [SP_64] in
+def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>,
+ Requires<[IsN64]>;
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -155,11 +215,20 @@ def : Pat<(i64 immSExt16:$in),
def : Pat<(i64 immZExt16:$in),
(ORi64 ZERO_64, imm:$in)>;
-// zextloadi32_u
-def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64_P8 addr:$a), 0), 0)>,
- Requires<[IsN64]>;
-def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64 addr:$a), 0), 0)>,
- Requires<[NotN64]>;
+// Arbitrary immediates
+def : Pat<(i64 imm:$imm),
+ (ORi64 (LUi64 (HI16 imm:$imm)), (LO16 imm:$imm))>;
+
+// extended loads
+let Predicates = [NotN64] in {
+ def : Pat<(extloadi32_a addr:$a), (DSRL32 (DSLL32 (LW64 addr:$a), 0), 0)>;
+ def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64 addr:$a), 0), 0)>;
+}
+let Predicates = [IsN64] in {
+ def : Pat<(extloadi32_a addr:$a), (DSRL32 (DSLL32 (LW64_P8 addr:$a), 0), 0)>;
+ def : Pat<(zextloadi32_u addr:$a),
+ (DSRL32 (DSLL32 (ULW64_P8 addr:$a), 0), 0)>;
+}
// hi/lo relocs
def : Pat<(i64 (MipsLo tglobaladdr:$in)), (DADDiu ZERO_64, tglobaladdr:$in)>;
@@ -174,6 +243,9 @@ defm : SetgtPats<CPU64Regs, SLT64, SLTu64>;
defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
+// select MipsDynAlloc
+def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>;
+
// truncate
def : Pat<(i32 (trunc CPU64Regs:$src)),
(SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>;
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index d7b7f06..186a5e3 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -20,6 +20,7 @@
#include "MipsMCInstLower.h"
#include "MipsMCSymbolRefExpr.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 0ae4ef6..3d973ce 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -35,8 +35,9 @@ def RetCC_MipsO32 : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsN : CallingConv<[
- // FIXME: Handle byval, complex and float double parameters.
-
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
+
// Promote i8/i16/i32 arguments to i64.
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
@@ -63,6 +64,25 @@ def CC_MipsN : CallingConv<[
CCIfType<[f32], CCAssignToStack<4, 8>>
]>;
+// N32/64 variable arguments.
+// All arguments are passed in integer registers.
+def CC_MipsN_VarArg : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
+
+ // Promote i8/i16/i32 arguments to i64.
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ CCIfType<[i64, f64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64,
+ T0_64, T1_64, T2_64, T3_64]>>,
+
+ CCIfType<[f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
+
+ // All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>,
+ CCIfType<[f32], CCAssignToStack<4, 8>>
+]>;
+
def RetCC_MipsN : CallingConv<[
// FIXME: Handle complex and float double return values.
diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp
index dc4ecd6..a8f29ae 100644
--- a/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -18,18 +18,20 @@
#include "MipsRelocations.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/PassManager.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,8 +39,6 @@
#include <iomanip>
#endif
-#include "llvm/CodeGen/MachineOperand.h"
-
using namespace llvm;
STATISTIC(NumEmitted, "Number of machine instructions emitted");
@@ -66,9 +66,9 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) :
MachineFunctionPass(ID), JTI(0),
- II((const MipsInstrInfo *) tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
+ II((const MipsInstrInfo *) tm.getInstrInfo()),
+ TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
}
bool runOnMachineFunction(MachineFunction &MF);
@@ -91,7 +91,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
/// Routines that handle operands which add machine relocations which are
/// fixed up by the relocation stage.
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const;
+ bool MayNeedFarStub) const;
void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const;
void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const;
void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const;
@@ -105,6 +105,9 @@ class MipsCodeEmitter : public MachineFunctionPass {
unsigned getRelocation(const MachineInstr &MI,
const MachineOperand &MO) const;
+ unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
+
+ unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
@@ -165,23 +168,34 @@ unsigned MipsCodeEmitter::getRelocation(const MachineInstr &MI,
return Mips::reloc_mips_lo;
}
+unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // FIXME: implement
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // FIXME: implement
+ return 0;
+}
+
unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
assert(MI.getOperand(OpNo).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo)) << 16;
- return
- (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
+ return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
}
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
}
unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as pos+size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo-1)) +
getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
@@ -190,7 +204,7 @@ unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) const {
+ const MachineOperand &MO) const {
if (MO.isReg())
return MipsRegisterInfo::getRegisterNumbering(MO.getReg());
else if (MO.isImm())
@@ -217,9 +231,10 @@ unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
}
void MipsCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const {
+ bool MayNeedFarStub) const {
MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- const_cast<GlobalValue *>(GV), 0, MayNeedFarStub));
+ const_cast<GlobalValue *>(GV), 0,
+ MayNeedFarStub));
}
void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV,
@@ -248,7 +263,7 @@ emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const {
}
void MipsCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
- unsigned Reloc) const {
+ unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
Reloc, BB));
}
@@ -395,7 +410,7 @@ void MipsCodeEmitter::emitWordLE(unsigned Word) {
/// createMipsJITCodeEmitterPass - Return a pass that emits the collected Mips
/// code to the specified MCE object.
FunctionPass *llvm::createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
- JITCodeEmitter &JCE) {
+ JITCodeEmitter &JCE) {
return new MipsCodeEmitter(TM, JCE);
}
diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp
index 71f3116..19bb1a5 100644
--- a/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/lib/Target/Mips/MipsFrameLowering.cpp
@@ -14,6 +14,7 @@
#include "MipsFrameLowering.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -149,6 +150,11 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned NewReg = 0;
int NewImm = 0;
bool ATUsed;
+ unsigned GP = STI.isABI_N64() ? Mips::GP_64 : Mips::GP;
+ unsigned T9 = STI.isABI_N64() ? Mips::T9_64 : Mips::T9;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+ unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
// First, compute final stack size.
unsigned RegSize = STI.isGP32bit() ? 4 : 8;
@@ -157,7 +163,6 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
(MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) :
MipsFI->getMaxCallFrameSize();
unsigned StackSize = AlignOffset(LocalVarAreaOffset, StackAlign) +
- AlignOffset(MipsFI->getRegSaveAreaSize(), StackAlign) +
AlignOffset(MFI->getStackSize(), StackAlign);
// Update stack size
@@ -165,10 +170,25 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
- // TODO: check need from GP here.
+ // Emit instructions that set $gp using the the value of $t9.
+ // O32 uses the directive .cpload while N32/64 requires three instructions to
+ // do this.
+ // TODO: Do not emit these instructions if no instructions use $gp.
if (isPIC && STI.isABI_O32())
BuildMI(MBB, MBBI, dl, TII.get(Mips::CPLOAD))
.addReg(RegInfo->getPICCallReg());
+ else if (STI.isABI_N64() || (isPIC && STI.isABI_N32())) {
+ // lui $28,%hi(%neg(%gp_rel(fname)))
+ // addu $28,$28,$25
+ // addiu $28,$28,%lo(%neg(%gp_rel(fname)))
+ const GlobalValue *FName = MF.getFunction();
+ BuildMI(MBB, MBBI, dl, TII.get(LUi), GP)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), GP).addReg(GP).addReg(T9);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDiu), GP).addReg(GP)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
+ }
+
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
// No need to allocate space on the stack.
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 50aa78f..b595f03 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -24,6 +24,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/CallingConv.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -395,7 +396,8 @@ static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMadd(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMadd(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -407,7 +409,8 @@ static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMsub(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMsub(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -794,60 +797,108 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
assert(false && "Unexpected instr type to insert");
return NULL;
case Mips::ATOMIC_LOAD_ADD_I8:
+ case Mips::ATOMIC_LOAD_ADD_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
+ case Mips::ATOMIC_LOAD_ADD_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
+ case Mips::ATOMIC_LOAD_ADD_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::ADDu);
+ case Mips::ATOMIC_LOAD_ADD_I64:
+ case Mips::ATOMIC_LOAD_ADD_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
+ case Mips::ATOMIC_LOAD_AND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
+ case Mips::ATOMIC_LOAD_AND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
+ case Mips::ATOMIC_LOAD_AND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::AND);
+ case Mips::ATOMIC_LOAD_AND_I64:
+ case Mips::ATOMIC_LOAD_AND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
+ case Mips::ATOMIC_LOAD_OR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
+ case Mips::ATOMIC_LOAD_OR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
+ case Mips::ATOMIC_LOAD_OR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::OR);
+ case Mips::ATOMIC_LOAD_OR_I64:
+ case Mips::ATOMIC_LOAD_OR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
+ case Mips::ATOMIC_LOAD_XOR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
+ case Mips::ATOMIC_LOAD_XOR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
+ case Mips::ATOMIC_LOAD_XOR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::XOR);
+ case Mips::ATOMIC_LOAD_XOR_I64:
+ case Mips::ATOMIC_LOAD_XOR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
+ case Mips::ATOMIC_LOAD_NAND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
+ case Mips::ATOMIC_LOAD_NAND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
+ case Mips::ATOMIC_LOAD_NAND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0, true);
+ case Mips::ATOMIC_LOAD_NAND_I64:
+ case Mips::ATOMIC_LOAD_NAND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
+ case Mips::ATOMIC_LOAD_SUB_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
+ case Mips::ATOMIC_LOAD_SUB_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
+ case Mips::ATOMIC_LOAD_SUB_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::SUBu);
+ case Mips::ATOMIC_LOAD_SUB_I64:
+ case Mips::ATOMIC_LOAD_SUB_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
+ case Mips::ATOMIC_SWAP_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
+ case Mips::ATOMIC_SWAP_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
+ case Mips::ATOMIC_SWAP_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0);
+ case Mips::ATOMIC_SWAP_I64:
+ case Mips::ATOMIC_SWAP_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
+ case Mips::ATOMIC_CMP_SWAP_I8_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
+ case Mips::ATOMIC_CMP_SWAP_I16_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
+ case Mips::ATOMIC_CMP_SWAP_I32_P8:
return EmitAtomicCmpSwap(MI, BB, 4);
+ case Mips::ATOMIC_CMP_SWAP_I64:
+ case Mips::ATOMIC_CMP_SWAP_I64_P8:
+ return EmitAtomicCmpSwap(MI, BB, 8);
}
}
@@ -857,13 +908,31 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode,
bool Nand) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicBinary.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, AND, NOR, ZERO, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ AND = Mips::AND;
+ NOR = Mips::NOR;
+ ZERO = Mips::ZERO;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ AND = Mips::AND64;
+ NOR = Mips::NOR64;
+ ZERO = Mips::ZERO_64;
+ BEQ = Mips::BEQ64;
+ }
unsigned OldVal = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -901,23 +970,20 @@ MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// sc success, storeval, 0(ptr)
// beq success, $0, loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
if (Nand) {
// and andres, oldval, incr
// nor storeval, $0, andres
- BuildMI(BB, dl, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr);
- BuildMI(BB, dl, TII->get(Mips::NOR), StoreVal)
- .addReg(Mips::ZERO).addReg(AndRes);
+ BuildMI(BB, dl, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr);
+ BuildMI(BB, dl, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes);
} else if (BinOpcode) {
// <binop> storeval, oldval, incr
BuildMI(BB, dl, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr);
} else {
StoreVal = Incr;
}
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
- .addReg(StoreVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
+ BuildMI(BB, dl, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -937,6 +1003,8 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1028,7 +1096,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1051,7 +1119,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1082,13 +1150,29 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned Size) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicCmpSwap.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, ZERO, BNE, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ ZERO = Mips::ZERO;
+ BNE = Mips::BNE;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ ZERO = Mips::ZERO_64;
+ BNE = Mips::BNE64;
+ BEQ = Mips::BEQ64;
+ }
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1127,18 +1211,18 @@ MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
// ll dest, 0(ptr)
// bne dest, oldval, exitMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), Dest).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BNE))
+ BuildMI(BB, dl, TII->get(LL), Dest).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BNE))
.addReg(Dest).addReg(OldVal).addMBB(exitMBB);
// loop2MBB:
// sc success, newval, 0(ptr)
// beq success, $0, loop1MBB
BB = loop2MBB;
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(NewVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
+ BuildMI(BB, dl, TII->get(BEQ))
+ .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -1157,6 +1241,8 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1247,7 +1333,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, dl, TII->get(Mips::BNE))
@@ -1263,7 +1349,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1295,6 +1381,7 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
{
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP;
assert(getTargetMachine().getFrameLowering()->getStackAlignment() >=
cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() &&
@@ -1306,20 +1393,19 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
// Get a reference from Mips stack pointer
- SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32);
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy());
// Subtract the dynamic size from the actual stack size to
// obtain the new stack size.
- SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size);
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
- Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub,
- SDValue());
+ Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue());
// This node always has two return values: a new stack pointer
// value and a chain
- SDVTList VTLs = DAG.getVTList(MVT::i32, MVT::Other);
+ SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy());
SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) };
@@ -1658,7 +1744,8 @@ LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Mips::FP, VT);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ IsN64 ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
@@ -1685,8 +1772,6 @@ SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
-#include "MipsGenCallingConv.inc"
-
//===----------------------------------------------------------------------===//
// TODO: Implement a generic logic using tblgen that can support this.
// Mips O32 ABI rules:
@@ -1793,6 +1878,70 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
return false; // CC must always match
}
+static const unsigned Mips64IntRegs[8] =
+ {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
+ Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
+static const unsigned Mips64DPRegs[8] =
+ {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
+ Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64};
+
+static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8);
+ unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8;
+ unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8);
+
+ assert(Align <= 16 && "Cannot handle alignments larger than 16.");
+
+ // If byval is 16-byte aligned, the first arg register must be even.
+ if ((Align == 16) && (FirstIdx % 2)) {
+ State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
+ ++FirstIdx;
+ }
+
+ // Mark the registers allocated.
+ for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I)
+ State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]);
+
+ // Allocate space on caller's stack.
+ unsigned Offset = State.AllocateStack(Size, Align);
+
+ if (FirstIdx < 8)
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
+ LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+ return true;
+}
+
+#include "MipsGenCallingConv.inc"
+
+static void
+AnalyzeMips64CallOperands(CCState CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ unsigned NumOps = Outs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ bool R;
+
+ if (Outs[i].IsFixed)
+ R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+ else
+ R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+
+ if (R) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -1901,6 +2050,90 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
MachinePointerInfo(0), MachinePointerInfo(0));
}
+// Copy Mips64 byVal arg to registers and stack.
+void static
+PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
+ SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass,
+ SmallVector<SDValue, 8>& MemOpChains, int& LastFI,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ EVT PtrTy, bool isLittle) {
+ unsigned ByValSize = Flags.getByValSize();
+ unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
+ bool IsRegLoc = VA.isRegLoc();
+ unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
+ unsigned LocMemOffset = 0;
+
+ if (!IsRegLoc)
+ LocMemOffset = VA.getLocMemOffset();
+ else {
+ const unsigned *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8,
+ VA.getLocReg());
+ const unsigned *RegEnd = Mips64IntRegs + 8;
+
+ // Copy double words to registers.
+ for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) {
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr,
+ MachinePointerInfo(), false, false, false,
+ Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+ RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
+ }
+
+ // If there is an argument register available, copy the remainder of the
+ // byval argument with sub-doubleword loads and shifts.
+ if ((Reg != RegEnd) && (ByValSize != Offset)) {
+ assert((ByValSize < Offset + 8) &&
+ "Size of the remainder should be smaller than 8-byte.");
+ SDValue Val;
+ for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) {
+ unsigned RemSize = ByValSize - Offset;
+
+ if (RemSize < LoadSize)
+ continue;
+
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal =
+ DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
+ MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
+ false, false, Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+
+ // Offset in number of bits from double word boundary.
+ unsigned OffsetDW = (Offset % 8) * 8;
+ unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
+ SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
+ DAG.getConstant(Shamt, MVT::i32));
+
+ Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
+ Shift;
+ Offset += LoadSize;
+ Alignment = std::min(Alignment, LoadSize);
+ }
+
+ RegsToPass.push_back(std::make_pair(*Reg, Val));
+ return;
+ }
+ }
+
+ unsigned MemCpySize = ByValSize - Offset;
+ if (MemCpySize) {
+ // Create a fixed object on stack at offset LocMemOffset and copy
+ // remainder of byval arg to it with memcpy.
+ SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true);
+ SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy);
+ ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src,
+ DAG.getConstant(MemCpySize, PtrTy), Alignment,
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+ }
+}
+
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isTailCall.
@@ -1929,6 +2162,8 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
if (IsO32)
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
+ else if (HasMips64)
+ AnalyzeMips64CallOperands(CCInfo, Outs);
else
CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
@@ -1987,6 +2222,22 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
SDValue Arg = OutVals[i];
CCValAssign &VA = ArgLocs[i];
MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // ByVal Arg.
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32)
+ WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ else
+ PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ continue;
+ }
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -2032,18 +2283,6 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Register can't get to this point...
assert(VA.isMemLoc());
- // ByVal Arg.
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- if (Flags.isByVal()) {
- assert(IsO32 &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, MFI,
- DAG, Arg, VA, Flags, getPointerTy(), Subtarget->isLittle());
- continue;
- }
-
// Create the frame index object for this incoming parameter
LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
@@ -2232,6 +2471,45 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
}
}
+// Create frame object on stack and copy registers used for byval passing to it.
+static unsigned
+CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
+ std::vector<SDValue>& OutChains, SelectionDAG &DAG,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ MachineFrameInfo *MFI, bool IsRegLoc,
+ SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
+ EVT PtrTy) {
+ const unsigned *Reg = Mips64IntRegs + 8;
+ int FOOffset; // Frame object offset from virtual frame pointer.
+
+ if (IsRegLoc) {
+ Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg());
+ FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8;
+ }
+ else
+ FOOffset = VA.getLocMemOffset();
+
+ // Create frame object.
+ unsigned NumRegs = (Flags.getByValSize() + 7) / 8;
+ unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy);
+ InVals.push_back(FIN);
+
+ // Copy arg registers.
+ for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
+ ++Reg, ++I) {
+ unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass);
+ SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
+ DAG.getConstant(I * 8, PtrTy));
+ SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
+ StorePtr, MachinePointerInfo(), false,
+ false, 0);
+ OutChains.push_back(Store);
+ }
+
+ return LastFI;
+}
+
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
@@ -2267,9 +2545,28 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT ValVT = VA.getValVT();
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ bool IsRegLoc = VA.isRegLoc();
+
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32) {
+ unsigned NumWords = (Flags.getByValSize() + 3) / 4;
+ LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
+ true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
+ InVals.push_back(FIN);
+ ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags);
+ } else // N32/64
+ LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags,
+ MFI, IsRegLoc, InVals, MipsFI,
+ getPointerTy());
+ continue;
+ }
// Arguments stored on registers
- if (VA.isRegLoc()) {
+ if (IsRegLoc) {
EVT RegVT = VA.getLocVT();
unsigned ArgReg = VA.getLocReg();
TargetRegisterClass *RC = 0;
@@ -2325,23 +2622,6 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// sanity check
assert(VA.isMemLoc());
- ISD::ArgFlagsTy Flags = Ins[i].Flags;
-
- if (Flags.isByVal()) {
- assert(IsO32 &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- unsigned NumWords = (Flags.getByValSize() + 3) / 4;
- LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
- true);
- SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
- InVals.push_back(FIN);
- ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags);
-
- continue;
- }
-
// The stack pointer offset is relative to the caller stack frame.
LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
@@ -2367,24 +2647,40 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
- if (isVarArg && IsO32) {
+ if (isVarArg) {
+ unsigned NumOfRegs = IsO32 ? 4 : 8;
+ const unsigned *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
+ int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
+ TargetRegisterClass *RC
+ = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass;
+ unsigned RegSize = RC->getSize();
+ int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
+
+ // Offset of the first variable argument from stack pointer.
+ int FirstVaArgOffset;
+
+ if (IsO32 || (Idx == NumOfRegs)) {
+ FirstVaArgOffset =
+ (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize;
+ } else
+ FirstVaArgOffset = RegSlotOffset;
+
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
- unsigned NextStackOffset = CCInfo.getNextStackOffset();
- assert(NextStackOffset % 4 == 0 &&
- "NextStackOffset must be aligned to 4-byte boundaries.");
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true);
MipsFI->setVarArgsFrameIndex(LastFI);
- // If NextStackOffset is smaller than o32's 16-byte reserved argument area,
- // copy the integer registers that have not been used for argument passing
- // to the caller's stack frame.
- for (; NextStackOffset < 16; NextStackOffset += 4) {
- TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
- unsigned Idx = NextStackOffset / 4;
- unsigned Reg = AddLiveIn(DAG.getMachineFunction(), O32IntRegs[Idx], RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, MVT::i32);
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ // Copy the integer registers that have not been used for argument passing
+ // to the argument register save area. For O32, the save area is allocated
+ // in the caller's stack frame, while for N32/64, it is allocated in the
+ // callee's stack frame.
+ for (int StackOffset = RegSlotOffset;
+ Idx < NumOfRegs; ++Idx, StackOffset += RegSize) {
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
+ MVT::getIntegerVT(RegSize * 8));
+ LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true);
SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
MachinePointerInfo(),
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 271d248..8fa3052 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -30,86 +30,6 @@ namespace Mips {
unsigned GetOppositeBranchOpc(unsigned Opc);
}
-/// MipsII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace MipsII {
- /// Target Operand Flag enum.
- enum TOF {
- //===------------------------------------------------------------------===//
- // Mips Specific MachineOperand flags.
-
- MO_NO_FLAG,
-
- /// MO_GOT - Represents the offset into the global offset table at which
- /// the address the relocation entry symbol resides during execution.
- MO_GOT,
-
- /// MO_GOT_CALL - Represents the offset into the global offset table at
- /// which the address of a call site relocation entry symbol resides
- /// during execution. This is different from the above since this flag
- /// can only be present in call instructions.
- MO_GOT_CALL,
-
- /// MO_GPREL - Represents the offset from the current gp value to be used
- /// for the relocatable object file being produced.
- MO_GPREL,
-
- /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
- /// address.
- MO_ABS_HI,
- MO_ABS_LO,
-
- /// MO_TLSGD - Represents the offset into the global offset table at which
- // the module ID and TSL block offset reside during execution (General
- // Dynamic TLS).
- MO_TLSGD,
-
- /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
- // Exec TLS).
- MO_GOTTPREL,
-
- /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
- // the thread pointer (Local Exec TLS).
- MO_TPREL_HI,
- MO_TPREL_LO,
-
- // N32/64 Flags.
- MO_GPOFF_HI,
- MO_GPOFF_LO,
- MO_GOT_DISP,
- MO_GOT_PAGE,
- MO_GOT_OFST
- };
-
- enum {
- //===------------------------------------------------------------------===//
- // Instruction encodings. These are the standard/most common forms for
- // Mips instructions.
- //
-
- // Pseudo - This represents an instruction that is a pseudo instruction
- // or one that has not been implemented yet. It is illegal to code generate
- // it, but tolerated for intermediate implementation stages.
- Pseudo = 0,
-
- /// FrmR - This form is for instructions of the format R.
- FrmR = 1,
- /// FrmI - This form is for instructions of the format I.
- FrmI = 2,
- /// FrmJ - This form is for instructions of the format J.
- FrmJ = 3,
- /// FrmFR - This form is for instructions of the format FR.
- FrmFR = 4,
- /// FrmFI - This form is for instructions of the format FI.
- FrmFI = 5,
- /// FrmOther - This form is for instructions that have no specific format.
- FrmOther = 6,
-
- FormMask = 15
- };
-}
-
class MipsInstrInfo : public MipsGenInstrInfo {
MipsTargetMachine &TM;
bool IsN64;
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 1cc3841..5dca9b6 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -39,8 +39,8 @@ def SDT_MipsDivRem : SDTypeProfile<0, 2,
def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
-def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
- SDTCisVT<1, iPTR>]>;
+def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>,
+ SDTCisSameAs<0, 1>]>;
def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
@@ -138,8 +138,15 @@ def NotN64 : Predicate<"!Subtarget.isABI_N64()">;
//===----------------------------------------------------------------------===//
// Instruction operand types
-def brtarget : Operand<OtherVT>;
+def jmptarget : Operand<OtherVT> {
+ let EncoderMethod = "getJumpTargetOpValue";
+}
+def brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+ let OperandType = "OPERAND_PCREL";
+}
def calltarget : Operand<i32>;
+def calltarget64: Operand<i64>;
def simm16 : Operand<i32>;
def simm16_64 : Operand<i64>;
def shamt : Operand<i32>;
@@ -167,6 +174,12 @@ def mem_ea : Operand<i32> {
let EncoderMethod = "getMemEncoding";
}
+def mem_ea_64 : Operand<i64> {
+ let PrintMethod = "printMemOperandEA";
+ let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let EncoderMethod = "getMemEncoding";
+}
+
// size operand of ext instruction
def size_ext : Operand<i32> {
let EncoderMethod = "getSizeExtEncoding";
@@ -442,7 +455,7 @@ class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
// Unconditional branch
let isBranch=1, isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
class JumpFJ<bits<6> op, string instr_asm>:
- FJ<op, (outs), (ins brtarget:$target),
+ FJ<op, (outs), (ins jmptarget:$target),
!strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch>;
let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1 in
@@ -525,9 +538,9 @@ class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
let Defs = DefRegs;
}
-class EffectiveAddress<string instr_asm> :
- FMem<0x09, (outs CPURegs:$rt), (ins mem_ea:$addr),
- instr_asm, [(set CPURegs:$rt, addr:$addr)], IIAlu>;
+class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
+ FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
+ instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
// Count Leading Ones/Zeros in Word
class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
@@ -587,20 +600,41 @@ class ExtIns<bits<6> _funct, string instr_asm, dag outs, dag ins,
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
-class Atomic2Ops<PatFrag Op, string Opstr> :
- MipsPseudo<(outs CPURegs:$dst), (ins CPURegs:$ptr, CPURegs:$incr),
+class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
!strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$incr))]>;
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
+
+multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
// Atomic Compare & Swap.
-class AtomicCmpSwap<PatFrag Op, string Width> :
- MipsPseudo<(outs CPURegs:$dst),
- (ins CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap),
- !strconcat("atomic_cmp_swap_", Width,
- "\t$dst, $ptr, $cmp, $swap"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap))]>;
+class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
+ !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
+
+multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
+
+class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$rt), (ins Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IILoad> {
+ let mayLoad = 1;
+}
+
+class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IIStore> {
+ let mayStore = 1;
+ let Constraints = "$rt = $dst";
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -636,32 +670,32 @@ def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$picreg), ".cpload\t$picreg", []>;
def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc), ".cprestore\t$loc", []>;
let usesCustomInserter = 1 in {
- def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, "load_add_8">;
- def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, "load_add_16">;
- def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, "load_add_32">;
- def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, "load_sub_8">;
- def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, "load_sub_16">;
- def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, "load_sub_32">;
- def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, "load_and_8">;
- def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, "load_and_16">;
- def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, "load_and_32">;
- def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, "load_or_8">;
- def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, "load_or_16">;
- def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, "load_or_32">;
- def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, "load_xor_8">;
- def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, "load_xor_16">;
- def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, "load_xor_32">;
- def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, "load_nand_8">;
- def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, "load_nand_16">;
- def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, "load_nand_32">;
-
- def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, "swap_8">;
- def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, "swap_16">;
- def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, "swap_32">;
-
- def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, "8">;
- def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, "16">;
- def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, "32">;
+ defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
+ defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">;
+ defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">;
+ defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">;
+ defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">;
+ defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">;
+ defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">;
+ defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">;
+ defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">;
+ defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">;
+ defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">;
+ defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">;
+ defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">;
+ defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">;
+ defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">;
+ defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">;
+ defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">;
+ defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">;
+
+ defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">;
+ defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">;
+ defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">;
+
+ defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">;
+ defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">;
+ defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">;
}
//===----------------------------------------------------------------------===//
@@ -738,12 +772,10 @@ def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
}
/// Load-linked, Store-conditional
-let mayLoad = 1 in
- def LL : FMem<0x30, (outs CPURegs:$rt), (ins mem:$addr),
- "ll\t$rt, $addr", [], IILoad>;
-let mayStore = 1, Constraints = "$rt = $dst" in
- def SC : FMem<0x38, (outs CPURegs:$dst), (ins CPURegs:$rt, mem:$addr),
- "sc\t$rt, $addr", [], IIStore>;
+def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
+def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>;
+def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
+def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>;
/// Jump and Branch Instructions
def J : JumpFJ<0x02, "j">;
@@ -798,13 +830,13 @@ let addr=0 in
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr">;
+def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// DynAlloc node points to dynamically allocated stack space.
// $sp is added to the list of implicitly used registers to prevent dead code
// elimination from removing instructions that modify $sp.
let Uses = [SP] in
-def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr">;
+def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// MADD*/MSUB*
def MADD : MArithR<0, "madd", MipsMAdd, 1>;
diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp
index 6c0e4f6..1fab52c 100644
--- a/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/lib/Target/Mips/MipsMCInstLower.cpp
@@ -15,6 +15,7 @@
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
#include "MipsMCInstLower.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h
index be27606..bc30b6b 100644
--- a/lib/Target/Mips/MipsMachineFunction.h
+++ b/lib/Target/Mips/MipsMachineFunction.h
@@ -51,16 +51,12 @@ private:
mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
unsigned MaxCallFrameSize;
- // Size of area on callee's stack frame which is used to save va_arg or
- // byval arguments passed in registers.
- unsigned RegSaveAreaSize;
-
public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0),
VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)),
OutArgFIRange(std::make_pair(-1, 0)), GPFI(0), DynAllocFI(0),
- MaxCallFrameSize(0), RegSaveAreaSize(0)
+ MaxCallFrameSize(0)
{}
bool isInArgFI(int FI) const {
@@ -104,11 +100,6 @@ public:
unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
-
- unsigned getRegSaveAreaSize() const { return RegSaveAreaSize; }
- void setRegSaveAreaSize(unsigned S) {
- if (RegSaveAreaSize < S) RegSaveAreaSize = S;
- }
};
} // end of namespace llvm
diff --git a/lib/Target/Mips/TargetInfo/LLVMBuild.txt b/lib/Target/Mips/TargetInfo/LLVMBuild.txt
index e8035af..90ae260 100644
--- a/lib/Target/Mips/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/Mips/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = MipsInfo
parent = Mips
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = Mips
diff --git a/lib/Target/PTX/CMakeLists.txt b/lib/Target/PTX/CMakeLists.txt
index 46a458c..6709c1b 100644
--- a/lib/Target/PTX/CMakeLists.txt
+++ b/lib/Target/PTX/CMakeLists.txt
@@ -36,6 +36,7 @@ add_llvm_library_dependencies(LLVMPTXCodeGen
LLVMSelectionDAG
LLVMSupport
LLVMTarget
+ LLVMTransformUtils
)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp b/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
index aabb404..2f6c92d 100644
--- a/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
+++ b/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
@@ -96,9 +96,23 @@ void PTXInstPrinter::printCall(const MCInst *MI, raw_ostream &O) {
O << "), ";
}
- O << *(MI->getOperand(Index++).getExpr()) << ", (";
-
+ const MCExpr* Expr = MI->getOperand(Index++).getExpr();
unsigned NumArgs = MI->getOperand(Index++).getImm();
+
+ // if the function call is to printf or puts, change to vprintf
+ if (const MCSymbolRefExpr *SymRefExpr = dyn_cast<MCSymbolRefExpr>(Expr)) {
+ const MCSymbol &Sym = SymRefExpr->getSymbol();
+ if (Sym.getName() == "printf" || Sym.getName() == "puts") {
+ O << "vprintf";
+ } else {
+ O << Sym.getName();
+ }
+ } else {
+ O << *Expr;
+ }
+
+ O << ", (";
+
if (NumArgs > 0) {
printOperand(MI, Index++, O);
for (unsigned i = 1; i < NumArgs; ++i) {
diff --git a/lib/Target/PTX/LLVMBuild.txt b/lib/Target/PTX/LLVMBuild.txt
index 27807e6..22c70de 100644
--- a/lib/Target/PTX/LLVMBuild.txt
+++ b/lib/Target/PTX/LLVMBuild.txt
@@ -25,6 +25,6 @@ has_asmprinter = 1
type = Library
name = PTXCodeGen
parent = PTX
-required_libraries = Analysis AsmPrinter CodeGen Core MC PTXDesc PTXInfo Scalar SelectionDAG Support Target TransformUtils
+required_libraries = Analysis AsmPrinter CodeGen Core MC PTXDesc PTXInfo SelectionDAG Support Target TransformUtils
add_to_library_groups = PTX
diff --git a/lib/Target/PTX/MCTargetDesc/CMakeLists.txt b/lib/Target/PTX/MCTargetDesc/CMakeLists.txt
index 811ef4b..94dbcee 100644
--- a/lib/Target/PTX/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/PTX/MCTargetDesc/CMakeLists.txt
@@ -5,8 +5,8 @@ add_llvm_library(LLVMPTXDesc
add_llvm_library_dependencies(LLVMPTXDesc
LLVMMC
- LLVMPTXInfo
LLVMPTXAsmPrinter
+ LLVMPTXInfo
LLVMSupport
)
diff --git a/lib/Target/PTX/PTXAsmPrinter.cpp b/lib/Target/PTX/PTXAsmPrinter.cpp
index 45a6afc..bdf238b 100644
--- a/lib/Target/PTX/PTXAsmPrinter.cpp
+++ b/lib/Target/PTX/PTXAsmPrinter.cpp
@@ -165,6 +165,11 @@ void PTXAsmPrinter::EmitStartOfAsmFile(Module &M)
OutStreamer.AddBlankLine();
+ // declare external functions
+ for (Module::const_iterator i = M.begin(), e = M.end();
+ i != e; ++i)
+ EmitFunctionDeclaration(i);
+
// declare global variables
for (Module::const_global_iterator i = M.global_begin(), e = M.global_end();
i != e; ++i)
@@ -454,6 +459,31 @@ void PTXAsmPrinter::EmitFunctionEntryLabel() {
OutStreamer.EmitRawText(os.str());
}
+void PTXAsmPrinter::EmitFunctionDeclaration(const Function* func)
+{
+ const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
+
+ std::string decl = "";
+
+ // hard-coded emission of extern vprintf function
+
+ if (func->getName() == "printf" || func->getName() == "puts") {
+ decl += ".extern .func (.param .b32 __param_1) vprintf (.param .b";
+ if (ST.is64Bit())
+ decl += "64";
+ else
+ decl += "32";
+ decl += " __param_2, .param .b";
+ if (ST.is64Bit())
+ decl += "64";
+ else
+ decl += "32";
+ decl += " __param_3)\n";
+ }
+
+ OutStreamer.EmitRawText(Twine(decl));
+}
+
unsigned PTXAsmPrinter::GetOrCreateSourceID(StringRef FileName,
StringRef DirName) {
// If FE did not provide a file name, then assume stdin.
diff --git a/lib/Target/PTX/PTXAsmPrinter.h b/lib/Target/PTX/PTXAsmPrinter.h
index 538c080..d5ea4db 100644
--- a/lib/Target/PTX/PTXAsmPrinter.h
+++ b/lib/Target/PTX/PTXAsmPrinter.h
@@ -47,7 +47,7 @@ public:
private:
void EmitVariableDeclaration(const GlobalVariable *gv);
- void EmitFunctionDeclaration();
+ void EmitFunctionDeclaration(const Function* func);
StringMap<unsigned> SourceIdMap;
}; // class PTXAsmPrinter
diff --git a/lib/Target/PTX/PTXISelLowering.cpp b/lib/Target/PTX/PTXISelLowering.cpp
index 3307d91..17191fb 100644
--- a/lib/Target/PTX/PTXISelLowering.cpp
+++ b/lib/Target/PTX/PTXISelLowering.cpp
@@ -20,6 +20,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
@@ -46,6 +47,11 @@ PTXTargetLowering::PTXTargetLowering(TargetMachine &TM)
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
setMinFunctionAlignment(2);
+ // Let LLVM use loads/stores for all mem* operations
+ maxStoresPerMemcpy = 4096;
+ maxStoresPerMemmove = 4096;
+ maxStoresPerMemset = 4096;
+
////////////////////////////////////
/////////// Expansion //////////////
////////////////////////////////////
@@ -352,40 +358,101 @@ PTXTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction& MF = DAG.getMachineFunction();
- PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
- PTXParamManager &PM = MFI->getParamManager();
-
+ PTXMachineFunctionInfo *PTXMFI = MF.getInfo<PTXMachineFunctionInfo>();
+ PTXParamManager &PM = PTXMFI->getParamManager();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
assert(getTargetMachine().getSubtarget<PTXSubtarget>().callsAreHandled() &&
"Calls are not handled for the target device");
+ // Identify the callee function
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
+ const Function *function = cast<Function>(GV);
+
+ // allow non-device calls only for printf
+ bool isPrintf = function->getName() == "printf" || function->getName() == "puts";
+
+ assert((isPrintf || function->getCallingConv() == CallingConv::PTX_Device) &&
+ "PTX function calls must be to PTX device functions");
+
+ unsigned outSize = isPrintf ? 2 : Outs.size();
+
std::vector<SDValue> Ops;
// The layout of the ops will be [Chain, #Ins, Ins, Callee, #Outs, Outs]
- Ops.resize(Outs.size() + Ins.size() + 4);
+ Ops.resize(outSize + Ins.size() + 4);
Ops[0] = Chain;
// Identify the callee function
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- assert(cast<Function>(GV)->getCallingConv() == CallingConv::PTX_Device &&
- "PTX function calls must be to PTX device functions");
Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
Ops[Ins.size()+2] = Callee;
- // Generate STORE_PARAM nodes for each function argument. In PTX, function
- // arguments are explicitly stored into .param variables and passed as
- // arguments. There is no register/stack-based calling convention in PTX.
- Ops[Ins.size()+3] = DAG.getTargetConstant(OutVals.size(), MVT::i32);
- for (unsigned i = 0; i != OutVals.size(); ++i) {
- unsigned Size = OutVals[i].getValueType().getSizeInBits();
- unsigned Param = PM.addLocalParam(Size);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
+ // #Outs
+ Ops[Ins.size()+3] = DAG.getTargetConstant(outSize, MVT::i32);
+
+ if (isPrintf) {
+ // first argument is the address of the global string variable in memory
+ unsigned Param0 = PM.addLocalParam(getPointerTy().getSizeInBits());
+ SDValue ParamValue0 = DAG.getTargetExternalSymbol(PM.getParamName(Param0).c_str(),
+ MVT::Other);
Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
- ParamValue, OutVals[i]);
- Ops[i+Ins.size()+4] = ParamValue;
- }
+ ParamValue0, OutVals[0]);
+ Ops[Ins.size()+4] = ParamValue0;
+
+ // alignment is the maximum size of all the arguments
+ unsigned alignment = 0;
+ for (unsigned i = 1; i < OutVals.size(); ++i) {
+ alignment = std::max(alignment,
+ OutVals[i].getValueType().getSizeInBits());
+ }
+
+ // size is the alignment multiplied by the number of arguments
+ unsigned size = alignment * (OutVals.size() - 1);
+
+ // second argument is the address of the stack object (unless no arguments)
+ unsigned Param1 = PM.addLocalParam(getPointerTy().getSizeInBits());
+ SDValue ParamValue1 = DAG.getTargetExternalSymbol(PM.getParamName(Param1).c_str(),
+ MVT::Other);
+ Ops[Ins.size()+5] = ParamValue1;
+
+ if (size > 0)
+ {
+ // create a local stack object to store the arguments
+ unsigned StackObject = MFI->CreateStackObject(size / 8, alignment / 8, false);
+ SDValue FrameIndex = DAG.getFrameIndex(StackObject, getPointerTy());
+
+ // store each of the arguments to the stack in turn
+ for (unsigned int i = 1; i != OutVals.size(); i++) {
+ SDValue FrameAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FrameIndex, DAG.getTargetConstant((i - 1) * 8, getPointerTy()));
+ Chain = DAG.getStore(Chain, dl, OutVals[i], FrameAddr,
+ MachinePointerInfo(),
+ false, false, 0);
+ }
+ // copy the address of the local frame index to get the address in non-local space
+ SDValue genericAddr = DAG.getNode(PTXISD::COPY_ADDRESS, dl, getPointerTy(), FrameIndex);
+
+ // store this address in the second argument
+ Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain, ParamValue1, genericAddr);
+ }
+ }
+ else
+ {
+ // Generate STORE_PARAM nodes for each function argument. In PTX, function
+ // arguments are explicitly stored into .param variables and passed as
+ // arguments. There is no register/stack-based calling convention in PTX.
+ for (unsigned i = 0; i != OutVals.size(); ++i) {
+ unsigned Size = OutVals[i].getValueType().getSizeInBits();
+ unsigned Param = PM.addLocalParam(Size);
+ const std::string &ParamName = PM.getParamName(Param);
+ SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
+ MVT::Other);
+ Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
+ ParamValue, OutVals[i]);
+ Ops[i+Ins.size()+4] = ParamValue;
+ }
+ }
+
std::vector<SDValue> InParams;
// Generate list of .param variables to hold the return value(s).
diff --git a/lib/Target/PTX/PTXInstrInfo.td b/lib/Target/PTX/PTXInstrInfo.td
index fbddac5..bcd5bcf 100644
--- a/lib/Target/PTX/PTXInstrInfo.td
+++ b/lib/Target/PTX/PTXInstrInfo.td
@@ -680,6 +680,12 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def MOVaddr64
: InstPTX<(outs RegI64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
[(set RegI64:$d, (PTXcopyaddress tglobaladdr:$a))]>;
+ def MOVframe32
+ : InstPTX<(outs RegI32:$d), (ins i32imm:$a), "cvta.local.u32\t$d, $a",
+ [(set RegI32:$d, (PTXcopyaddress frameindex:$a))]>;
+ def MOVframe64
+ : InstPTX<(outs RegI64:$d), (ins i64imm:$a), "cvta.local.u64\t$d, $a",
+ [(set RegI64:$d, (PTXcopyaddress frameindex:$a))]>;
}
// PTX cvt instructions
diff --git a/lib/Target/PTX/TargetInfo/LLVMBuild.txt b/lib/Target/PTX/TargetInfo/LLVMBuild.txt
index f35c237..8e5285a 100644
--- a/lib/Target/PTX/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/PTX/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = PTXInfo
parent = PTX
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = PTX
diff --git a/lib/Target/PowerPC/TargetInfo/LLVMBuild.txt b/lib/Target/PowerPC/TargetInfo/LLVMBuild.txt
index 1f5d3e7..f51b417 100644
--- a/lib/Target/PowerPC/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/PowerPC/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = PowerPCInfo
parent = PowerPC
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = PowerPC
diff --git a/lib/Target/Sparc/TargetInfo/LLVMBuild.txt b/lib/Target/Sparc/TargetInfo/LLVMBuild.txt
index 22f4e1f..81c9032 100644
--- a/lib/Target/Sparc/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/Sparc/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = SparcInfo
parent = Sparc
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = Sparc
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index b590199..4542d4b 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -62,6 +62,8 @@ add_llvm_library_dependencies(LLVMX86CodeGen
LLVMTarget
LLVMX86AsmPrinter
LLVMX86Desc
+ LLVMX86Info
+ LLVMX86Utils
)
add_subdirectory(AsmParser)
diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
index 8721912..264e791 100644
--- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
@@ -10,7 +10,6 @@ add_llvm_library_dependencies(LLVMX86Desc
LLVMMC
LLVMSupport
LLVMX86AsmPrinter
- LLVMX86AsmPrinter
LLVMX86Info
)
diff --git a/lib/Target/X86/TargetInfo/LLVMBuild.txt b/lib/Target/X86/TargetInfo/LLVMBuild.txt
index 6b2635b..ee015bd 100644
--- a/lib/Target/X86/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/X86/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = X86Info
parent = X86
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = X86
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 93f7de8..4e11131 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -924,10 +924,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
- // Can turn SHL into an integer multiply.
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
- setOperationAction(ISD::SHL, MVT::v16i8, Custom);
-
setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
setOperationAction(ISD::VSELECT, MVT::v2i64, Legal);
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
@@ -948,25 +944,41 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
+ // FIXME: these should be Legal but thats only for the case where
+ // the index is constant. For now custom expand to deal with that
if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
}
}
if (Subtarget->hasXMMInt()) {
- setOperationAction(ISD::SRL, MVT::v2i64, Custom);
- setOperationAction(ISD::SRL, MVT::v4i32, Custom);
- setOperationAction(ISD::SRL, MVT::v16i8, Custom);
setOperationAction(ISD::SRL, MVT::v8i16, Custom);
+ setOperationAction(ISD::SRL, MVT::v16i8, Custom);
- setOperationAction(ISD::SHL, MVT::v2i64, Custom);
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
setOperationAction(ISD::SHL, MVT::v8i16, Custom);
+ setOperationAction(ISD::SHL, MVT::v16i8, Custom);
- setOperationAction(ISD::SRA, MVT::v4i32, Custom);
setOperationAction(ISD::SRA, MVT::v8i16, Custom);
setOperationAction(ISD::SRA, MVT::v16i8, Custom);
+
+ if (Subtarget->hasAVX2()) {
+ setOperationAction(ISD::SRL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SHL, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::SRA, MVT::v4i32, Legal);
+ } else {
+ setOperationAction(ISD::SRL, MVT::v2i64, Custom);
+ setOperationAction(ISD::SRL, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::SHL, MVT::v2i64, Custom);
+ setOperationAction(ISD::SHL, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::SRA, MVT::v4i32, Custom);
+ }
}
if (Subtarget->hasSSE42() || Subtarget->hasAVX())
@@ -1009,18 +1021,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom);
- setOperationAction(ISD::SRL, MVT::v4i64, Custom);
- setOperationAction(ISD::SRL, MVT::v8i32, Custom);
setOperationAction(ISD::SRL, MVT::v16i16, Custom);
setOperationAction(ISD::SRL, MVT::v32i8, Custom);
- setOperationAction(ISD::SHL, MVT::v4i64, Custom);
- setOperationAction(ISD::SHL, MVT::v8i32, Custom);
setOperationAction(ISD::SHL, MVT::v16i16, Custom);
setOperationAction(ISD::SHL, MVT::v32i8, Custom);
- setOperationAction(ISD::SRA, MVT::v8i32, Custom);
setOperationAction(ISD::SRA, MVT::v16i16, Custom);
+ setOperationAction(ISD::SRA, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
@@ -1050,21 +1058,17 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::MUL, MVT::v4i64, Custom);
setOperationAction(ISD::MUL, MVT::v8i32, Legal);
setOperationAction(ISD::MUL, MVT::v16i16, Legal);
+ // Don't lower v32i8 because there is no 128-bit byte mul
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
- setOperationAction(ISD::SHL, MVT::v4i32, Legal);
- setOperationAction(ISD::SHL, MVT::v2i64, Legal);
- setOperationAction(ISD::SRL, MVT::v4i32, Legal);
- setOperationAction(ISD::SRL, MVT::v2i64, Legal);
- setOperationAction(ISD::SRA, MVT::v4i32, Legal);
-
- setOperationAction(ISD::SHL, MVT::v8i32, Legal);
- setOperationAction(ISD::SHL, MVT::v4i64, Legal);
- setOperationAction(ISD::SRL, MVT::v8i32, Legal);
- setOperationAction(ISD::SRL, MVT::v4i64, Legal);
- setOperationAction(ISD::SRA, MVT::v8i32, Legal);
- // Don't lower v32i8 because there is no 128-bit byte mul
+ setOperationAction(ISD::SRL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v8i32, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SHL, MVT::v8i32, Legal);
+
+ setOperationAction(ISD::SRA, MVT::v8i32, Legal);
} else {
setOperationAction(ISD::ADD, MVT::v4i64, Custom);
setOperationAction(ISD::ADD, MVT::v8i32, Custom);
@@ -1080,6 +1084,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::MUL, MVT::v8i32, Custom);
setOperationAction(ISD::MUL, MVT::v16i16, Custom);
// Don't lower v32i8 because there is no 128-bit byte mul
+
+ setOperationAction(ISD::SRL, MVT::v4i64, Custom);
+ setOperationAction(ISD::SRL, MVT::v8i32, Custom);
+
+ setOperationAction(ISD::SHL, MVT::v4i64, Custom);
+ setOperationAction(ISD::SHL, MVT::v8i32, Custom);
+
+ setOperationAction(ISD::SRA, MVT::v8i32, Custom);
}
// Custom lower several nodes for 256-bit types.
@@ -6613,7 +6625,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned NumElems = VT.getVectorNumElements();
- bool isMMX = VT.getSizeInBits() == 64;
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
bool V1IsSplat = false;
@@ -6622,9 +6633,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
- // Shuffle operations on MMX not supported.
- if (isMMX)
- return Op;
+ assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
// Vector shuffle lowering takes 3 steps:
//
@@ -6636,7 +6645,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// so the shuffle can be broken into other shuffles and the legalizer can
// try the lowering again.
//
- // The general ideia is that no vector_shuffle operation should be left to
+ // The general idea is that no vector_shuffle operation should be left to
// be matched during isel, all of them must be converted to a target specific
// node here.
@@ -6956,8 +6965,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
Op.getOperand(0)),
Op.getOperand(1));
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
- } else if (VT == MVT::i32) {
- // ExtractPS works with constant index.
+ } else if (VT == MVT::i32 || VT == MVT::i64) {
+ // ExtractPS/pextrq works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return Op;
}
@@ -7096,7 +7105,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
- } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) {
+ } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) &&
+ isa<ConstantSDNode>(N2)) {
// PINSR* works with constant index.
return Op;
}
@@ -9522,6 +9532,14 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
// Fix vector shift instructions where the last operand is a non-immediate
// i32 value.
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ case Intrinsic::x86_avx2_psrli_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
case Intrinsic::x86_sse2_pslli_w:
case Intrinsic::x86_sse2_pslli_d:
case Intrinsic::x86_sse2_pslli_q:
@@ -9569,6 +9587,30 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_sse2_psrai_d:
NewIntNo = Intrinsic::x86_sse2_psra_d;
break;
+ case Intrinsic::x86_avx2_pslli_w:
+ NewIntNo = Intrinsic::x86_avx2_psll_w;
+ break;
+ case Intrinsic::x86_avx2_pslli_d:
+ NewIntNo = Intrinsic::x86_avx2_psll_d;
+ break;
+ case Intrinsic::x86_avx2_pslli_q:
+ NewIntNo = Intrinsic::x86_avx2_psll_q;
+ break;
+ case Intrinsic::x86_avx2_psrli_w:
+ NewIntNo = Intrinsic::x86_avx2_psrl_w;
+ break;
+ case Intrinsic::x86_avx2_psrli_d:
+ NewIntNo = Intrinsic::x86_avx2_psrl_d;
+ break;
+ case Intrinsic::x86_avx2_psrli_q:
+ NewIntNo = Intrinsic::x86_avx2_psrl_q;
+ break;
+ case Intrinsic::x86_avx2_psrai_w:
+ NewIntNo = Intrinsic::x86_avx2_psra_w;
+ break;
+ case Intrinsic::x86_avx2_psrai_d:
+ NewIntNo = Intrinsic::x86_avx2_psra_d;
+ break;
default: {
ShAmtVT = MVT::v2i32;
switch (IntNo) {
@@ -10130,47 +10172,6 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
if (!Subtarget->hasXMMInt())
return SDValue();
- // Decompose 256-bit shifts into smaller 128-bit shifts.
- if (VT.getSizeInBits() == 256) {
- int NumElems = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType().getSimpleVT();
- EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- // Extract the two vectors
- SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl);
- SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
-
- // Recreate the shift amount vectors
- SDValue Amt1, Amt2;
- if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
- // Constant shift amount
- SmallVector<SDValue, 4> Amt1Csts;
- SmallVector<SDValue, 4> Amt2Csts;
- for (int i = 0; i < NumElems/2; ++i)
- Amt1Csts.push_back(Amt->getOperand(i));
- for (int i = NumElems/2; i < NumElems; ++i)
- Amt2Csts.push_back(Amt->getOperand(i));
-
- Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
- &Amt1Csts[0], NumElems/2);
- Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
- &Amt2Csts[0], NumElems/2);
- } else {
- // Variable shift amount
- Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl);
- Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
- }
-
- // Issue new vector shifts for the smaller types
- V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
- V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
-
- // Concatenate the result back
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
- }
-
// Optimize shl/srl/sra with constant shift amount.
if (isSplatVector(Amt.getNode())) {
SDValue SclrAmt = Amt->getOperand(0);
@@ -10259,6 +10260,48 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
}
+
+ if (Subtarget->hasAVX2()) {
+ if (VT == MVT::v4i64 && Op.getOpcode() == ISD::SHL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_q, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v8i32 && Op.getOpcode() == ISD::SHL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_d, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v16i16 && Op.getOpcode() == ISD::SHL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_w, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v4i64 && Op.getOpcode() == ISD::SRL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_q, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v8i32 && Op.getOpcode() == ISD::SRL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_d, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v16i16 && Op.getOpcode() == ISD::SRL)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_w, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v8i32 && Op.getOpcode() == ISD::SRA)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrai_d, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+
+ if (VT == MVT::v16i16 && Op.getOpcode() == ISD::SRA)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrai_w, MVT::i32),
+ R, DAG.getConstant(ShiftAmt, MVT::i32));
+ }
}
}
@@ -10328,6 +10371,48 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
R, DAG.getNode(ISD::ADD, dl, VT, R, R));
return R;
}
+
+ // Decompose 256-bit shifts into smaller 128-bit shifts.
+ if (VT.getSizeInBits() == 256) {
+ int NumElems = VT.getVectorNumElements();
+ MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ // Extract the two vectors
+ SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl);
+ SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32),
+ DAG, dl);
+
+ // Recreate the shift amount vectors
+ SDValue Amt1, Amt2;
+ if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
+ // Constant shift amount
+ SmallVector<SDValue, 4> Amt1Csts;
+ SmallVector<SDValue, 4> Amt2Csts;
+ for (int i = 0; i < NumElems/2; ++i)
+ Amt1Csts.push_back(Amt->getOperand(i));
+ for (int i = NumElems/2; i < NumElems; ++i)
+ Amt2Csts.push_back(Amt->getOperand(i));
+
+ Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
+ &Amt1Csts[0], NumElems/2);
+ Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
+ &Amt2Csts[0], NumElems/2);
+ } else {
+ // Variable shift amount
+ Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl);
+ Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32),
+ DAG, dl);
+ }
+
+ // Issue new vector shifts for the smaller types
+ V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
+ V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
+
+ // Concatenate the result back
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
+ }
+
return SDValue();
}
@@ -10951,12 +11036,13 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PSIGNB: return "X86ISD::PSIGNB";
case X86ISD::PSIGNW: return "X86ISD::PSIGNW";
case X86ISD::PSIGND: return "X86ISD::PSIGND";
+ case X86ISD::BLENDV: return "X86ISD::BLENDV";
+ case X86ISD::FHADD: return "X86ISD::FHADD";
+ case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
- case X86ISD::FHADD: return "X86ISD::FHADD";
- case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
@@ -10996,6 +11082,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::XOR: return "X86ISD::XOR";
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::ANDN: return "X86ISD::ANDN";
+ case X86ISD::BLSI: return "X86ISD::BLSI";
+ case X86ISD::BLSMSK: return "X86ISD::BLSMSK";
+ case X86ISD::BLSR: return "X86ISD::BLSR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
@@ -13387,7 +13476,9 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
if (!Subtarget->hasXMMInt())
return SDValue();
- if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
+ if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
+ (!Subtarget->hasAVX2() ||
+ (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
return SDValue();
SDValue ShAmtOp = N->getOperand(1);
@@ -13460,6 +13551,18 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
ValOp, BaseShAmt);
+ if (VT == MVT::v4i64)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_q, MVT::i32),
+ ValOp, BaseShAmt);
+ if (VT == MVT::v8i32)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_d, MVT::i32),
+ ValOp, BaseShAmt);
+ if (VT == MVT::v16i16)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_pslli_w, MVT::i32),
+ ValOp, BaseShAmt);
break;
case ISD::SRA:
if (VT == MVT::v4i32)
@@ -13470,6 +13573,14 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
ValOp, BaseShAmt);
+ if (VT == MVT::v8i32)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrai_d, MVT::i32),
+ ValOp, BaseShAmt);
+ if (VT == MVT::v16i16)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrai_w, MVT::i32),
+ ValOp, BaseShAmt);
break;
case ISD::SRL:
if (VT == MVT::v2i64)
@@ -13484,6 +13595,18 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
ValOp, BaseShAmt);
+ if (VT == MVT::v4i64)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_q, MVT::i32),
+ ValOp, BaseShAmt);
+ if (VT == MVT::v8i32)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_d, MVT::i32),
+ ValOp, BaseShAmt);
+ if (VT == MVT::v16i16)
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::x86_avx2_psrli_w, MVT::i32),
+ ValOp, BaseShAmt);
break;
}
return SDValue();
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 102911f..9428fff 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -456,6 +456,9 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOVZX64rr16, X86::MOVZX64rm16, 0 },
{ X86::MOVZX64rr32, X86::MOVZX64rm32, 0 },
{ X86::MOVZX64rr8, X86::MOVZX64rm8, 0 },
+ { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 },
+ { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 },
+ { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 },
{ X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 },
{ X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 },
{ X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 },
@@ -508,6 +511,9 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVZDI2PDIrr, X86::VMOVZDI2PDIrm, 0 },
{ X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 },
{ X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 },
+ { X86::VPABSBrr128, X86::VPABSBrm128, TB_ALIGN_16 },
+ { X86::VPABSDrr128, X86::VPABSDrm128, TB_ALIGN_16 },
+ { X86::VPABSWrr128, X86::VPABSWrm128, TB_ALIGN_16 },
{ X86::VPSHUFDri, X86::VPSHUFDmi, TB_ALIGN_16 },
{ X86::VPSHUFHWri, X86::VPSHUFHWmi, TB_ALIGN_16 },
{ X86::VPSHUFLWri, X86::VPSHUFLWmi, TB_ALIGN_16 },
@@ -526,7 +532,14 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 },
{ X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_16 },
{ X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 },
- { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 }
+ { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 },
+ // AVX2 foldable instructions
+ { X86::VPABSBrr256, X86::VPABSBrm256, TB_ALIGN_16 },
+ { X86::VPABSDrr256, X86::VPABSDrm256, TB_ALIGN_16 },
+ { X86::VPABSWrr256, X86::VPABSWrm256, TB_ALIGN_16 },
+ { X86::VPSHUFDYri, X86::VPSHUFDYmi, TB_ALIGN_16 },
+ { X86::VPSHUFHWYri, X86::VPSHUFHWYmi, TB_ALIGN_16 },
+ { X86::VPSHUFLWYri, X86::VPSHUFLWYmi, TB_ALIGN_16 }
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
@@ -652,6 +665,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
{ X86::MINSSrr, X86::MINSSrm, 0 },
{ X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
+ { X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 },
{ X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 },
{ X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 },
{ X86::MULSDrr, X86::MULSDrm, 0 },
@@ -664,30 +678,44 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ORPSrr, X86::ORPSrm, TB_ALIGN_16 },
{ X86::PACKSSDWrr, X86::PACKSSDWrm, TB_ALIGN_16 },
{ X86::PACKSSWBrr, X86::PACKSSWBrm, TB_ALIGN_16 },
+ { X86::PACKUSDWrr, X86::PACKUSDWrm, TB_ALIGN_16 },
{ X86::PACKUSWBrr, X86::PACKUSWBrm, TB_ALIGN_16 },
{ X86::PADDBrr, X86::PADDBrm, TB_ALIGN_16 },
{ X86::PADDDrr, X86::PADDDrm, TB_ALIGN_16 },
{ X86::PADDQrr, X86::PADDQrm, TB_ALIGN_16 },
{ X86::PADDSBrr, X86::PADDSBrm, TB_ALIGN_16 },
{ X86::PADDSWrr, X86::PADDSWrm, TB_ALIGN_16 },
+ { X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 },
+ { X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 },
{ X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 },
+ { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 },
{ X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 },
{ X86::PANDrr, X86::PANDrm, TB_ALIGN_16 },
{ X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 },
{ X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 },
{ X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 },
{ X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 },
+ { X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 },
{ X86::PCMPEQWrr, X86::PCMPEQWrm, TB_ALIGN_16 },
{ X86::PCMPGTBrr, X86::PCMPGTBrm, TB_ALIGN_16 },
{ X86::PCMPGTDrr, X86::PCMPGTDrm, TB_ALIGN_16 },
+ { X86::PCMPGTQrr, X86::PCMPGTQrm, TB_ALIGN_16 },
{ X86::PCMPGTWrr, X86::PCMPGTWrm, TB_ALIGN_16 },
+ { X86::PHADDDrr128, X86::PHADDDrm128, TB_ALIGN_16 },
+ { X86::PHADDWrr128, X86::PHADDWrm128, TB_ALIGN_16 },
+ { X86::PHADDSWrr128, X86::PHADDSWrm128, TB_ALIGN_16 },
+ { X86::PHSUBDrr128, X86::PHSUBDrm128, TB_ALIGN_16 },
+ { X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 },
+ { X86::PHSUBWrr128, X86::PHSUBWrm128, TB_ALIGN_16 },
{ X86::PINSRWrri, X86::PINSRWrmi, TB_ALIGN_16 },
+ { X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 },
{ X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 },
{ X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 },
{ X86::PMAXUBrr, X86::PMAXUBrm, TB_ALIGN_16 },
{ X86::PMINSWrr, X86::PMINSWrm, TB_ALIGN_16 },
{ X86::PMINUBrr, X86::PMINUBrm, TB_ALIGN_16 },
{ X86::PMULDQrr, X86::PMULDQrm, TB_ALIGN_16 },
+ { X86::PMULHRSWrr128, X86::PMULHRSWrm128, TB_ALIGN_16 },
{ X86::PMULHUWrr, X86::PMULHUWrm, TB_ALIGN_16 },
{ X86::PMULHWrr, X86::PMULHWrm, TB_ALIGN_16 },
{ X86::PMULLDrr, X86::PMULLDrm, TB_ALIGN_16 },
@@ -695,6 +723,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::PMULUDQrr, X86::PMULUDQrm, TB_ALIGN_16 },
{ X86::PORrr, X86::PORrm, TB_ALIGN_16 },
{ X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 },
+ { X86::PSHUFBrr128, X86::PSHUFBrm128, TB_ALIGN_16 },
+ { X86::PSIGNBrr128, X86::PSIGNBrm128, TB_ALIGN_16 },
+ { X86::PSIGNWrr128, X86::PSIGNWrm128, TB_ALIGN_16 },
+ { X86::PSIGNDrr128, X86::PSIGNDrm128, TB_ALIGN_16 },
{ X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 },
{ X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 },
{ X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 },
@@ -816,6 +848,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 },
{ X86::VMINSSrr, X86::VMINSSrm, 0 },
{ X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 },
+ { X86::VMPSADBWrri, X86::VMPSADBWrmi, TB_ALIGN_16 },
{ X86::VMULPDrr, X86::VMULPDrm, TB_ALIGN_16 },
{ X86::VMULPSrr, X86::VMULPSrm, TB_ALIGN_16 },
{ X86::VMULSDrr, X86::VMULSDrm, 0 },
@@ -824,28 +857,44 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VORPSrr, X86::VORPSrm, TB_ALIGN_16 },
{ X86::VPACKSSDWrr, X86::VPACKSSDWrm, TB_ALIGN_16 },
{ X86::VPACKSSWBrr, X86::VPACKSSWBrm, TB_ALIGN_16 },
+ { X86::VPACKUSDWrr, X86::VPACKUSDWrm, TB_ALIGN_16 },
{ X86::VPACKUSWBrr, X86::VPACKUSWBrm, TB_ALIGN_16 },
{ X86::VPADDBrr, X86::VPADDBrm, TB_ALIGN_16 },
{ X86::VPADDDrr, X86::VPADDDrm, TB_ALIGN_16 },
{ X86::VPADDQrr, X86::VPADDQrm, TB_ALIGN_16 },
{ X86::VPADDSBrr, X86::VPADDSBrm, TB_ALIGN_16 },
{ X86::VPADDSWrr, X86::VPADDSWrm, TB_ALIGN_16 },
+ { X86::VPADDUSBrr, X86::VPADDUSBrm, TB_ALIGN_16 },
+ { X86::VPADDUSWrr, X86::VPADDUSWrm, TB_ALIGN_16 },
{ X86::VPADDWrr, X86::VPADDWrm, TB_ALIGN_16 },
+ { X86::VPALIGNR128rr, X86::VPALIGNR128rm, TB_ALIGN_16 },
{ X86::VPANDNrr, X86::VPANDNrm, TB_ALIGN_16 },
{ X86::VPANDrr, X86::VPANDrm, TB_ALIGN_16 },
+ { X86::VPAVGBrr, X86::VPAVGBrm, TB_ALIGN_16 },
+ { X86::VPAVGWrr, X86::VPAVGWrm, TB_ALIGN_16 },
{ X86::VPCMPEQBrr, X86::VPCMPEQBrm, TB_ALIGN_16 },
{ X86::VPCMPEQDrr, X86::VPCMPEQDrm, TB_ALIGN_16 },
+ { X86::VPCMPEQQrr, X86::VPCMPEQQrm, TB_ALIGN_16 },
{ X86::VPCMPEQWrr, X86::VPCMPEQWrm, TB_ALIGN_16 },
{ X86::VPCMPGTBrr, X86::VPCMPGTBrm, TB_ALIGN_16 },
{ X86::VPCMPGTDrr, X86::VPCMPGTDrm, TB_ALIGN_16 },
+ { X86::VPCMPGTQrr, X86::VPCMPGTQrm, TB_ALIGN_16 },
{ X86::VPCMPGTWrr, X86::VPCMPGTWrm, TB_ALIGN_16 },
+ { X86::VPHADDDrr128, X86::VPHADDDrm128, TB_ALIGN_16 },
+ { X86::VPHADDSWrr128, X86::VPHADDSWrm128, TB_ALIGN_16 },
+ { X86::VPHADDWrr128, X86::VPHADDWrm128, TB_ALIGN_16 },
+ { X86::VPHSUBDrr128, X86::VPHSUBDrm128, TB_ALIGN_16 },
+ { X86::VPHSUBSWrr128, X86::VPHSUBSWrm128, TB_ALIGN_16 },
+ { X86::VPHSUBWrr128, X86::VPHSUBWrm128, TB_ALIGN_16 },
{ X86::VPINSRWrri, X86::VPINSRWrmi, TB_ALIGN_16 },
+ { X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, TB_ALIGN_16 },
{ X86::VPMADDWDrr, X86::VPMADDWDrm, TB_ALIGN_16 },
{ X86::VPMAXSWrr, X86::VPMAXSWrm, TB_ALIGN_16 },
{ X86::VPMAXUBrr, X86::VPMAXUBrm, TB_ALIGN_16 },
{ X86::VPMINSWrr, X86::VPMINSWrm, TB_ALIGN_16 },
{ X86::VPMINUBrr, X86::VPMINUBrm, TB_ALIGN_16 },
{ X86::VPMULDQrr, X86::VPMULDQrm, TB_ALIGN_16 },
+ { X86::VPMULHRSWrr128, X86::VPMULHRSWrm128, TB_ALIGN_16 },
{ X86::VPMULHUWrr, X86::VPMULHUWrm, TB_ALIGN_16 },
{ X86::VPMULHWrr, X86::VPMULHWrm, TB_ALIGN_16 },
{ X86::VPMULLDrr, X86::VPMULLDrm, TB_ALIGN_16 },
@@ -853,6 +902,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VPMULUDQrr, X86::VPMULUDQrm, TB_ALIGN_16 },
{ X86::VPORrr, X86::VPORrm, TB_ALIGN_16 },
{ X86::VPSADBWrr, X86::VPSADBWrm, TB_ALIGN_16 },
+ { X86::VPSHUFBrr128, X86::VPSHUFBrm128, TB_ALIGN_16 },
+ { X86::VPSIGNBrr128, X86::VPSIGNBrm128, TB_ALIGN_16 },
+ { X86::VPSIGNWrr128, X86::VPSIGNWrm128, TB_ALIGN_16 },
+ { X86::VPSIGNDrr128, X86::VPSIGNDrm128, TB_ALIGN_16 },
{ X86::VPSLLDrr, X86::VPSLLDrm, TB_ALIGN_16 },
{ X86::VPSLLQrr, X86::VPSLLQrm, TB_ALIGN_16 },
{ X86::VPSLLWrr, X86::VPSLLWrm, TB_ALIGN_16 },
@@ -886,7 +939,91 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, TB_ALIGN_16 },
{ X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, TB_ALIGN_16 },
{ X86::VXORPDrr, X86::VXORPDrm, TB_ALIGN_16 },
- { X86::VXORPSrr, X86::VXORPSrm, TB_ALIGN_16 }
+ { X86::VXORPSrr, X86::VXORPSrm, TB_ALIGN_16 },
+ // AVX2 foldable instructions
+ { X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, TB_ALIGN_16 },
+ { X86::VPACKSSWBYrr, X86::VPACKSSWBYrm, TB_ALIGN_16 },
+ { X86::VPACKUSDWYrr, X86::VPACKUSDWYrm, TB_ALIGN_16 },
+ { X86::VPACKUSWBYrr, X86::VPACKUSWBYrm, TB_ALIGN_16 },
+ { X86::VPADDBYrr, X86::VPADDBYrm, TB_ALIGN_16 },
+ { X86::VPADDDYrr, X86::VPADDDYrm, TB_ALIGN_16 },
+ { X86::VPADDQYrr, X86::VPADDQYrm, TB_ALIGN_16 },
+ { X86::VPADDSBYrr, X86::VPADDSBYrm, TB_ALIGN_16 },
+ { X86::VPADDSWYrr, X86::VPADDSWYrm, TB_ALIGN_16 },
+ { X86::VPADDUSBYrr, X86::VPADDUSBYrm, TB_ALIGN_16 },
+ { X86::VPADDUSWYrr, X86::VPADDUSWYrm, TB_ALIGN_16 },
+ { X86::VPADDWYrr, X86::VPADDWYrm, TB_ALIGN_16 },
+ { X86::VPALIGNR256rr, X86::VPALIGNR256rm, TB_ALIGN_16 },
+ { X86::VPANDNYrr, X86::VPANDNYrm, TB_ALIGN_16 },
+ { X86::VPANDYrr, X86::VPANDYrm, TB_ALIGN_16 },
+ { X86::VPAVGBYrr, X86::VPAVGBYrm, TB_ALIGN_16 },
+ { X86::VPAVGWYrr, X86::VPAVGWYrm, TB_ALIGN_16 },
+ { X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, TB_ALIGN_16 },
+ { X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, TB_ALIGN_16 },
+ { X86::VPCMPEQQYrr, X86::VPCMPEQQYrm, TB_ALIGN_16 },
+ { X86::VPCMPEQWYrr, X86::VPCMPEQWYrm, TB_ALIGN_16 },
+ { X86::VPCMPGTBYrr, X86::VPCMPGTBYrm, TB_ALIGN_16 },
+ { X86::VPCMPGTDYrr, X86::VPCMPGTDYrm, TB_ALIGN_16 },
+ { X86::VPCMPGTQYrr, X86::VPCMPGTQYrm, TB_ALIGN_16 },
+ { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, TB_ALIGN_16 },
+ { X86::VPHADDDrr256, X86::VPHADDDrm256, TB_ALIGN_16 },
+ { X86::VPHADDSWrr256, X86::VPHADDSWrm256, TB_ALIGN_16 },
+ { X86::VPHADDWrr256, X86::VPHADDWrm256, TB_ALIGN_16 },
+ { X86::VPHSUBDrr256, X86::VPHSUBDrm256, TB_ALIGN_16 },
+ { X86::VPHSUBSWrr256, X86::VPHSUBSWrm256, TB_ALIGN_16 },
+ { X86::VPHSUBWrr256, X86::VPHSUBWrm256, TB_ALIGN_16 },
+ { X86::VPMADDUBSWrr256, X86::VPMADDUBSWrm256, TB_ALIGN_16 },
+ { X86::VPMADDWDYrr, X86::VPMADDWDYrm, TB_ALIGN_16 },
+ { X86::VPMAXSWYrr, X86::VPMAXSWYrm, TB_ALIGN_16 },
+ { X86::VPMAXUBYrr, X86::VPMAXUBYrm, TB_ALIGN_16 },
+ { X86::VPMINSWYrr, X86::VPMINSWYrm, TB_ALIGN_16 },
+ { X86::VPMINUBYrr, X86::VPMINUBYrm, TB_ALIGN_16 },
+ { X86::VMPSADBWYrri, X86::VMPSADBWYrmi, TB_ALIGN_16 },
+ { X86::VPMULDQYrr, X86::VPMULDQYrm, TB_ALIGN_16 },
+ { X86::VPMULHRSWrr256, X86::VPMULHRSWrm256, TB_ALIGN_16 },
+ { X86::VPMULHUWYrr, X86::VPMULHUWYrm, TB_ALIGN_16 },
+ { X86::VPMULHWYrr, X86::VPMULHWYrm, TB_ALIGN_16 },
+ { X86::VPMULLDYrr, X86::VPMULLDYrm, TB_ALIGN_16 },
+ { X86::VPMULLWYrr, X86::VPMULLWYrm, TB_ALIGN_16 },
+ { X86::VPMULUDQYrr, X86::VPMULUDQYrm, TB_ALIGN_16 },
+ { X86::VPORYrr, X86::VPORYrm, TB_ALIGN_16 },
+ { X86::VPSADBWYrr, X86::VPSADBWYrm, TB_ALIGN_16 },
+ { X86::VPSHUFBrr256, X86::VPSHUFBrm256, TB_ALIGN_16 },
+ { X86::VPSIGNBrr256, X86::VPSIGNBrm256, TB_ALIGN_16 },
+ { X86::VPSIGNWrr256, X86::VPSIGNWrm256, TB_ALIGN_16 },
+ { X86::VPSIGNDrr256, X86::VPSIGNDrm256, TB_ALIGN_16 },
+ { X86::VPSLLDYrr, X86::VPSLLDYrm, TB_ALIGN_16 },
+ { X86::VPSLLQYrr, X86::VPSLLQYrm, TB_ALIGN_16 },
+ { X86::VPSLLWYrr, X86::VPSLLWYrm, TB_ALIGN_16 },
+ { X86::VPSLLVDrr, X86::VPSLLVDrm, TB_ALIGN_16 },
+ { X86::VPSLLVDYrr, X86::VPSLLVDYrm, TB_ALIGN_16 },
+ { X86::VPSLLVQrr, X86::VPSLLVQrm, TB_ALIGN_16 },
+ { X86::VPSLLVQYrr, X86::VPSLLVQYrm, TB_ALIGN_16 },
+ { X86::VPSRADYrr, X86::VPSRADYrm, TB_ALIGN_16 },
+ { X86::VPSRAWYrr, X86::VPSRAWYrm, TB_ALIGN_16 },
+ { X86::VPSRAVDrr, X86::VPSRAVDrm, TB_ALIGN_16 },
+ { X86::VPSRAVDYrr, X86::VPSRAVDYrm, TB_ALIGN_16 },
+ { X86::VPSRLDYrr, X86::VPSRLDYrm, TB_ALIGN_16 },
+ { X86::VPSRLQYrr, X86::VPSRLQYrm, TB_ALIGN_16 },
+ { X86::VPSRLWYrr, X86::VPSRLWYrm, TB_ALIGN_16 },
+ { X86::VPSRLVDrr, X86::VPSRLVDrm, TB_ALIGN_16 },
+ { X86::VPSRLVDYrr, X86::VPSRLVDYrm, TB_ALIGN_16 },
+ { X86::VPSRLVQrr, X86::VPSRLVQrm, TB_ALIGN_16 },
+ { X86::VPSRLVQYrr, X86::VPSRLVQYrm, TB_ALIGN_16 },
+ { X86::VPSUBBYrr, X86::VPSUBBYrm, TB_ALIGN_16 },
+ { X86::VPSUBDYrr, X86::VPSUBDYrm, TB_ALIGN_16 },
+ { X86::VPSUBSBYrr, X86::VPSUBSBYrm, TB_ALIGN_16 },
+ { X86::VPSUBSWYrr, X86::VPSUBSWYrm, TB_ALIGN_16 },
+ { X86::VPSUBWYrr, X86::VPSUBWYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKHQDQYrr, X86::VPUNPCKHQDQYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKHWDYrr, X86::VPUNPCKHWDYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKLBWYrr, X86::VPUNPCKLBWYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKLDQYrr, X86::VPUNPCKLDQYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, TB_ALIGN_16 },
+ { X86::VPXORYrr, X86::VPXORYrm, TB_ALIGN_16 },
// FIXME: add AVX 256-bit foldable instructions
};
@@ -2624,6 +2761,10 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
///
static bool hasPartialRegUpdate(unsigned Opcode) {
switch (Opcode) {
+ case X86::CVTSI2SSrr:
+ case X86::CVTSI2SS64rr:
+ case X86::CVTSI2SDrr:
+ case X86::CVTSI2SD64rr:
case X86::CVTSD2SSrr:
case X86::Int_CVTSD2SSrr:
case X86::CVTSS2SDrr:
@@ -2652,6 +2793,54 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
return false;
}
+/// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle
+/// instructions we would like before a partial register update.
+unsigned X86InstrInfo::
+getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
+ return 0;
+
+ // If MI is marked as reading Reg, the partial register update is wanted.
+ const MachineOperand &MO = MI->getOperand(0);
+ unsigned Reg = MO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (MO.readsReg() || MI->readsVirtualRegister(Reg))
+ return 0;
+ } else {
+ if (MI->readsRegister(Reg, TRI))
+ return 0;
+ }
+
+ // If any of the preceding 16 instructions are reading Reg, insert a
+ // dependency breaking instruction. The magic number is based on a few
+ // Nehalem experiments.
+ return 16;
+}
+
+void X86InstrInfo::
+breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ if (X86::VR128RegClass.contains(Reg)) {
+ // These instructions are all floating point domain, so xorps is the best
+ // choice.
+ bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr;
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
+ .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
+ } else if (X86::VR256RegClass.contains(Reg)) {
+ // Use vxorps to clear the full ymm register.
+ // It wants to read and write the xmm sub-register.
+ unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
+ .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
+ .addReg(Reg, RegState::ImplicitDefine);
+ } else
+ return;
+ MI->addRegisterKilled(Reg, TRI, true);
+}
+
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index 97009db..ee488d8 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -345,6 +345,11 @@ public:
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
+ unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const;
+ void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const;
+
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
unsigned OpNum,
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 91c84dd..6deee4f 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -80,8 +80,9 @@ multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
string OpcodeStr, X86MemOperand x86memop,
list<dag> pat_rr, list<dag> pat_rm,
- bit Is2Addr = 1> {
- let isCommutable = 1 in
+ bit Is2Addr = 1,
+ bit rr_hasSideEffects = 0> {
+ let isCommutable = 1, neverHasSideEffects = rr_hasSideEffects in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
@@ -519,6 +520,8 @@ let Predicates = [HasSSE2] in {
// is during lowering, where it's not possible to recognize the fold cause
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
+ def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),sub_sd))>;
def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
@@ -646,6 +649,9 @@ let Predicates = [HasAVX] in {
// is during lowering, where it's not possible to recognize the fold cause
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
+ def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),
+ sub_sd))>;
def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
(VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
sub_sd))>;
@@ -2629,7 +2635,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, [],
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
+ (memopv2i64 addr:$src2)))], 0, 1>, TB, VEX_4V;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem,
@@ -2926,12 +2932,15 @@ multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
/// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
+ let neverHasSideEffects = 1 in {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ let mayLoad = 1 in
def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ }
def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat(OpcodeStr,
@@ -3799,14 +3808,15 @@ let ExeDomain = SSEPackedInt in {
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
"psrldq\t{$src2, $dst|$dst, $src2}", []>;
// PSRADQri doesn't exist in SSE[1-3].
- }
- def PANDNrr : PDI<0xDF, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}", []>;
+ def PANDNrr : PDI<0xDF, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}", []>;
- def PANDNrm : PDI<0xDF, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}", []>;
+ let mayLoad = 1 in
+ def PANDNrm : PDI<0xDF, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}", []>;
+ }
}
} // Constraints = "$src1 = $dst"
@@ -5348,6 +5358,7 @@ let Predicates = [HasAVX] in {
//===---------------------------------------------------------------------===//
multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
+ let neverHasSideEffects = 1 in {
def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!if(Is2Addr,
@@ -5355,6 +5366,7 @@ multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[]>, OpSize;
+ let mayLoad = 1 in
def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!if(Is2Addr,
@@ -5362,19 +5374,23 @@ multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[]>, OpSize;
+ }
}
multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
+ let neverHasSideEffects = 1 in {
def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
+ let mayLoad = 1 in
def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2, i8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
+ }
}
let Predicates = [HasAVX] in
@@ -5721,6 +5737,7 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
OpSize;
+ let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
@@ -5743,6 +5760,7 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
+ let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
@@ -6720,19 +6738,21 @@ let Defs = [EFLAGS], usesCustomInserter = 1 in {
defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
}
-let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
+let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1, Predicates = [HasAVX] in {
def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+ let mayLoad = 1 in
def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
}
-let Defs = [XMM0, EFLAGS] in {
+let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ let mayLoad = 1 in
def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
@@ -6756,19 +6776,21 @@ let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
}
let Predicates = [HasAVX],
- Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+ Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
"vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+ let mayLoad = 1 in
def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
"vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
}
-let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ let mayLoad = 1 in
def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
@@ -7071,12 +7093,14 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
//===----------------------------------------------------------------------===//
// Carry-less Multiplication instructions
+let neverHasSideEffects = 1 in {
let Constraints = "$src1 = $dst" in {
def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[]>;
+let mayLoad = 1 in
def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
@@ -7089,10 +7113,12 @@ def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>;
+let mayLoad = 1 in
def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>;
+}
multiclass pclmul_alias<string asm, int immop> {
@@ -7655,7 +7681,6 @@ defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
// Variable Bit Shifts
//
multiclass avx2_var_shift<bits<8> opc, string OpcodeStr,
- PatFrag pf128, PatFrag pf256,
Intrinsic Int128, Intrinsic Int256> {
def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
@@ -7664,7 +7689,8 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr,
def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (Int128 VR128:$src1, (pf128 addr:$src2)))]>,
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>,
VEX_4V;
def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
@@ -7673,26 +7699,47 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr,
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (Int256 VR256:$src1, (pf256 addr:$src2)))]>,
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (bitconvert (memopv4i64 addr:$src2))))]>,
VEX_4V;
}
-defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", memopv4i32, memopv8i32,
- int_x86_avx2_psllv_d, int_x86_avx2_psllv_d_256>;
-defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", memopv2i64, memopv4i64,
- int_x86_avx2_psllv_q, int_x86_avx2_psllv_q_256>,
- VEX_W;
-defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", memopv4i32, memopv8i32,
- int_x86_avx2_psrlv_d, int_x86_avx2_psrlv_d_256>;
-defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", memopv2i64, memopv4i64,
- int_x86_avx2_psrlv_q, int_x86_avx2_psrlv_q_256>,
- VEX_W;
-defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", memopv4i32, memopv8i32,
- int_x86_avx2_psrav_d, int_x86_avx2_psrav_d_256>;
+multiclass avx2_var_shift_i64<bits<8> opc, string OpcodeStr,
+ Intrinsic Int128, Intrinsic Int256> {
+ def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2))]>, VEX_4V;
+ def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (memopv2i64 addr:$src2)))]>,
+ VEX_4V;
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2))]>, VEX_4V;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (memopv4i64 addr:$src2)))]>,
+ VEX_4V;
+}
+defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", int_x86_avx2_psllv_d,
+ int_x86_avx2_psllv_d_256>;
+defm VPSLLVQ : avx2_var_shift_i64<0x47, "vpsllvq", int_x86_avx2_psllv_q,
+ int_x86_avx2_psllv_q_256>, VEX_W;
+defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", int_x86_avx2_psrlv_d,
+ int_x86_avx2_psrlv_d_256>;
+defm VPSRLVQ : avx2_var_shift_i64<0x45, "vpsrlvq", int_x86_avx2_psrlv_q,
+ int_x86_avx2_psrlv_q_256>, VEX_W;
+defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", int_x86_avx2_psrav_d,
+ int_x86_avx2_psrav_d_256>;
let Predicates = [HasAVX2] in {
-
def : Pat<(v4i32 (shl (v4i32 VR128:$src1), (v4i32 VR128:$src2))),
(VPSLLVDrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (shl (v2i64 VR128:$src1), (v2i64 VR128:$src2))),
@@ -7714,29 +7761,30 @@ let Predicates = [HasAVX2] in {
def : Pat<(v8i32 (sra (v8i32 VR256:$src1), (v8i32 VR256:$src2))),
(VPSRAVDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i32 (shl (v4i32 VR128:$src1),(loadv4i32 addr:$src2))),
- (VPSLLVDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (shl (v4i32 VR128:$src1),(loadv2i64 addr:$src2))),
+ def : Pat<(v4i32 (shl (v4i32 VR128:$src1),
+ (v4i32 (bitconvert (memopv2i64 addr:$src2))))),
(VPSLLVDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (shl (v2i64 VR128:$src1),(loadv2i64 addr:$src2))),
+ def : Pat<(v2i64 (shl (v2i64 VR128:$src1), (memopv2i64 addr:$src2))),
(VPSLLVQrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (srl (v4i32 VR128:$src1),(loadv4i32 addr:$src2))),
+ def : Pat<(v4i32 (srl (v4i32 VR128:$src1),
+ (v4i32 (bitconvert (memopv2i64 addr:$src2))))),
(VPSRLVDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (srl (v2i64 VR128:$src1),(loadv2i64 addr:$src2))),
+ def : Pat<(v2i64 (srl (v2i64 VR128:$src1), (memopv2i64 addr:$src2))),
(VPSRLVQrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (sra (v4i32 VR128:$src1),(loadv4i32 addr:$src2))),
+ def : Pat<(v4i32 (sra (v4i32 VR128:$src1),
+ (v4i32 (bitconvert (memopv2i64 addr:$src2))))),
(VPSRAVDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v8i32 (shl (v8i32 VR256:$src1),(loadv8i32 addr:$src2))),
+ def : Pat<(v8i32 (shl (v8i32 VR256:$src1),
+ (v8i32 (bitconvert (memopv4i64 addr:$src2))))),
(VPSLLVDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (shl (v4i64 VR256:$src1),(loadv4i64 addr:$src2))),
+ def : Pat<(v4i64 (shl (v4i64 VR256:$src1), (memopv4i64 addr:$src2))),
(VPSLLVQYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8i32 (srl (v8i32 VR256:$src1),(loadv8i32 addr:$src2))),
+ def : Pat<(v8i32 (srl (v8i32 VR256:$src1),
+ (v8i32 (bitconvert (memopv4i64 addr:$src2))))),
(VPSRLVDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (srl (v4i64 VR256:$src1),(loadv4i64 addr:$src2))),
+ def : Pat<(v4i64 (srl (v4i64 VR256:$src1), (memopv4i64 addr:$src2))),
(VPSRLVQYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8i32 (sra (v8i32 VR256:$src1),(loadv8i32 addr:$src2))),
+ def : Pat<(v8i32 (sra (v8i32 VR256:$src1),
+ (v8i32 (bitconvert (memopv4i64 addr:$src2))))),
(VPSRAVDYrm VR256:$src1, addr:$src2)>;
}
-
-
-
diff --git a/lib/Target/XCore/MCTargetDesc/LLVMBuild.txt b/lib/Target/XCore/MCTargetDesc/LLVMBuild.txt
index 7f4a433..628afb5 100644
--- a/lib/Target/XCore/MCTargetDesc/LLVMBuild.txt
+++ b/lib/Target/XCore/MCTargetDesc/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = XCoreDesc
parent = XCore
-required_libraries = MC Support XCoreInfo
+required_libraries = MC XCoreInfo
add_to_library_groups = XCore
diff --git a/lib/Target/XCore/TargetInfo/LLVMBuild.txt b/lib/Target/XCore/TargetInfo/LLVMBuild.txt
index 1d1b722..d0b8e54 100644
--- a/lib/Target/XCore/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/XCore/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = XCoreInfo
parent = XCore
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = XCore