aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/ARM
diff options
context:
space:
mode:
authorLogan Chien <loganchien@google.com>2011-11-15 14:23:13 +0800
committerLogan Chien <loganchien@google.com>2011-11-15 14:39:40 +0800
commitf9c1b92c27bf4ac40a52e0f1ef6d006d7e74bed3 (patch)
tree8c51041910d92818635c052f2b5e17f3abc21f98 /lib/Target/ARM
parente21ddb78968d087cf2b970df624abda64fca4e30 (diff)
parentbfc9429c2b814469adf3930dda31539d1c3319d8 (diff)
downloadexternal_llvm-f9c1b92c27bf4ac40a52e0f1ef6d006d7e74bed3.zip
external_llvm-f9c1b92c27bf4ac40a52e0f1ef6d006d7e74bed3.tar.gz
external_llvm-f9c1b92c27bf4ac40a52e0f1ef6d006d7e74bed3.tar.bz2
Merge with LLVM upstream r144606 (Nov 15th 2011)
Conflicts: Makefile.rules configure docs/ReleaseNotes.html lib/Analysis/ScalarEvolution.cpp lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp lib/CodeGen/ExecutionDepsFix.cpp lib/CodeGen/MachineBlockPlacement.cpp lib/CodeGen/MachineBranchProbabilityInfo.cpp lib/CodeGen/SelectionDAG/LegalizeDAG.cpp lib/ExecutionEngine/JIT/LLVMBuild.txt lib/MC/LLVMBuild.txt lib/MC/MCDisassembler/LLVMBuild.txt lib/MC/MCDwarf.cpp lib/Object/LLVMBuild.txt lib/Target/ARM/ARMExpandPseudoInsts.cpp lib/Target/ARM/ARMFastISel.cpp lib/Target/ARM/ARMInstrInfo.td lib/Target/ARM/AsmParser/ARMAsmParser.cpp lib/Target/ARM/Disassembler/ARMDisassembler.cpp lib/Target/ARM/Disassembler/LLVMBuild.txt lib/Target/ARM/TargetInfo/LLVMBuild.txt lib/Target/CBackend/TargetInfo/LLVMBuild.txt lib/Target/CellSPU/MCTargetDesc/LLVMBuild.txt lib/Target/CellSPU/TargetInfo/LLVMBuild.txt lib/Target/CppBackend/TargetInfo/LLVMBuild.txt lib/Target/LLVMBuild.txt lib/Target/MBlaze/Disassembler/LLVMBuild.txt lib/Target/MBlaze/TargetInfo/LLVMBuild.txt lib/Target/MSP430/MCTargetDesc/LLVMBuild.txt lib/Target/MSP430/TargetInfo/LLVMBuild.txt lib/Target/Mips/CMakeLists.txt lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp lib/Target/Mips/Mips64InstrInfo.td lib/Target/Mips/MipsAsmPrinter.cpp lib/Target/Mips/MipsISelLowering.cpp lib/Target/Mips/MipsMCInstLower.cpp lib/Target/Mips/TargetInfo/LLVMBuild.txt lib/Target/PTX/LLVMBuild.txt lib/Target/PTX/PTXAsmPrinter.cpp lib/Target/PTX/TargetInfo/LLVMBuild.txt lib/Target/PowerPC/TargetInfo/LLVMBuild.txt lib/Target/Sparc/TargetInfo/LLVMBuild.txt lib/Target/X86/TargetInfo/LLVMBuild.txt lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86InstrSSE.td lib/Target/XCore/MCTargetDesc/LLVMBuild.txt lib/Target/XCore/TargetInfo/LLVMBuild.txt lib/Transforms/IPO/LLVMBuild.txt lib/Transforms/Utils/LLVMBuild.txt test/CodeGen/ARM/2011-10-26-memset-with-neon.ll test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll test/CodeGen/ARM/fast-isel-cmp-imm.ll test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll test/CodeGen/CellSPU/call_indirect.ll test/CodeGen/X86/avx2-logic.ll test/CodeGen/X86/block-placement.ll test/CodeGen/X86/sse-domains.ll test/CodeGen/X86/sse3.ll test/CodeGen/X86/vec_shuffle-39.ll test/MC/ARM/neon-vld-encoding.s test/MC/ARM/neon-vst-encoding.s tools/llvm-config-2/llvm-config.cpp utils/TableGen/LLVMBuild.txt Change-Id: I70f454db6fc79d7799f56d0f6f2eb7b99561c504
Diffstat (limited to 'lib/Target/ARM')
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp8
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp416
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp1
-rw-r--r--lib/Target/ARM/ARMInstrFormats.td60
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td35
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td172
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td2
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td17
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp22
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp170
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp6
-rw-r--r--lib/Target/ARM/Disassembler/LLVMBuild.txt2
-rw-r--r--lib/Target/ARM/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/ARM/TargetInfo/LLVMBuild.txt2
14 files changed, 701 insertions, 214 deletions
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 5f7b8b2..fb7d96a 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -272,16 +272,16 @@ static const NEONLdStTableEntry NEONLdStTable[] = {
{ ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, false, SingleSpc, 3, 1 ,true},
{ ARM::VST1d64TPseudo_UPD, ARM::VST1d64T_UPD, false, true, true, SingleSpc, 3, 1 ,true},
-{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, false, SingleSpc, 2, 4 ,true},
+{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, false, SingleSpc, 2, 4 ,false},
{ ARM::VST1q16PseudoWB_fixed, ARM::VST1q16wb_fixed, false, true, false, SingleSpc, 2, 4 ,false},
{ ARM::VST1q16PseudoWB_register, ARM::VST1q16wb_register, false, true, true, SingleSpc, 2, 4 ,false},
-{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, false, SingleSpc, 2, 2 ,true},
+{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, false, SingleSpc, 2, 2 ,false},
{ ARM::VST1q32PseudoWB_fixed, ARM::VST1q32wb_fixed, false, true, false, SingleSpc, 2, 2 ,false},
{ ARM::VST1q32PseudoWB_register, ARM::VST1q32wb_register, false, true, true, SingleSpc, 2, 2 ,false},
-{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, false, SingleSpc, 2, 1 ,true},
+{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, false, SingleSpc, 2, 1 ,false},
{ ARM::VST1q64PseudoWB_fixed, ARM::VST1q64wb_fixed, false, true, false, SingleSpc, 2, 1 ,false},
{ ARM::VST1q64PseudoWB_register, ARM::VST1q64wb_register, false, true, true, SingleSpc, 2, 1 ,false},
-{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, false, SingleSpc, 2, 8 ,true},
+{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, false, SingleSpc, 2, 8 ,false},
{ ARM::VST1q8PseudoWB_fixed, ARM::VST1q8wb_fixed, false, true, false, SingleSpc, 2, 8 ,false},
{ ARM::VST1q8PseudoWB_register, ARM::VST1q8wb_register, false, true, true, SingleSpc, 2, 8 ,false},
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 030fab1..4df084f 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -148,6 +148,8 @@ class ARMFastISel : public FastISel {
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
+ virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
#include "ARMGenFastISel.inc"
@@ -164,7 +166,8 @@ class ARMFastISel : public FastISel {
bool SelectFPToSI(const Instruction *I);
bool SelectSDiv(const Instruction *I);
bool SelectSRem(const Instruction *I);
- bool SelectCall(const Instruction *I);
+ bool SelectCall(const Instruction *I, const char *IntrMemName);
+ bool SelectIntrinsicCall(const IntrinsicInst &I);
bool SelectSelect(const Instruction *I);
bool SelectRet(const Instruction *I);
bool SelectTrunc(const Instruction *I);
@@ -176,10 +179,14 @@ class ARMFastISel : public FastISel {
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt);
- bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
+ bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt,
+ bool allocReg);
+
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
- void ARMSimplifyAddress(Address &Addr, EVT VT);
+ void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
+ bool ARMIsMemCpySmall(uint64_t Len);
+ bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
@@ -212,7 +219,7 @@ class ARMFastISel : public FastISel {
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags);
+ unsigned Flags, bool useAM3);
};
} // end anonymous namespace
@@ -563,9 +570,9 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
// Use MVN to emit negative constants.
if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
unsigned Imm = (unsigned)~(CI->getSExtValue());
- bool EncodeImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
(ARM_AM::getSOImmVal(Imm) != -1);
- if (EncodeImm) {
+ if (UseImm) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -723,7 +730,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
// If this is a type than can be sign or zero-extended to a basic operation
// go ahead and accept it now.
- if (VT == MVT::i8 || VT == MVT::i16)
+ if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
return true;
return false;
@@ -852,7 +859,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
return Addr.Base.Reg != 0;
}
-void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
+void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
@@ -860,21 +867,22 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
default:
assert(false && "Unhandled load/store type!");
- case MVT::i16:
- if (isThumb2)
- // Integer loads/stores handle 12-bit offsets.
- needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
- else
- // ARM i16 integer loads/stores handle +/-imm8 offsets.
- // FIXME: Negative offsets require special handling.
- if (Addr.Offset > 255 || Addr.Offset < 0)
- needsLowering = true;
break;
case MVT::i1:
case MVT::i8:
+ case MVT::i16:
case MVT::i32:
- // Integer loads/stores handle 12-bit offsets.
- needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ if (!useAM3) {
+ // Integer loads/stores handle 12-bit offsets.
+ needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ // Handle negative offsets.
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
+ } else {
+ // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
+ needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
+ }
break;
case MVT::f32:
case MVT::f64:
@@ -910,7 +918,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags) {
+ unsigned Flags, bool useAM3) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
@@ -930,41 +938,78 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
// Now add the rest of the operands.
MIB.addFrameIndex(FI);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
}
AddOptionalDefs(MIB);
}
-bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
-
+bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
+ bool isZExt = true, bool allocReg = true) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
- TargetRegisterClass *RC;
+ bool useAM3 = false;
+ TargetRegisterClass *RC;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
+ case MVT::i1:
case MVT::i8:
- Opc = isThumb2 ? ARM::t2LDRBi12 : ARM::LDRBi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
+ else
+ Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
+ } else {
+ if (isZExt) {
+ Opc = ARM::LDRBi12;
+ } else {
+ Opc = ARM::LDRSB;
+ useAM3 = true;
+ }
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::i16:
- Opc = isThumb2 ? ARM::t2LDRHi12 : ARM::LDRH;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
+ else
+ Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
+ } else {
+ Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
+ useAM3 = true;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::i32:
- Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = ARM::t2LDRi8;
+ else
+ Opc = ARM::t2LDRi12;
+ } else {
+ Opc = ARM::LDRi12;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::f32:
@@ -977,13 +1022,15 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
- ResultReg = createResultReg(RC);
+ if (allocReg)
+ ResultReg = createResultReg(RC);
+ assert (ResultReg > 255 && "Expected an allocated virtual register.");
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
return true;
}
@@ -1009,6 +1056,7 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
unsigned StrOpc;
+ bool useAM3 = false;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
@@ -1022,13 +1070,35 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
SrcReg = Res;
} // Fallthrough here.
case MVT::i8:
- StrOpc = isThumb2 ? ARM::t2STRBi12 : ARM::STRBi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRBi8;
+ else
+ StrOpc = ARM::t2STRBi12;
+ } else {
+ StrOpc = ARM::STRBi12;
+ }
break;
case MVT::i16:
- StrOpc = isThumb2 ? ARM::t2STRHi12 : ARM::STRH;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRHi8;
+ else
+ StrOpc = ARM::t2STRHi12;
+ } else {
+ StrOpc = ARM::STRH;
+ useAM3 = true;
+ }
break;
case MVT::i32:
- StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRi8;
+ else
+ StrOpc = ARM::t2STRi12;
+ } else {
+ StrOpc = ARM::STRi12;
+ }
break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
@@ -1040,13 +1110,13 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true));
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
return true;
}
@@ -1231,25 +1301,25 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Check to see if the 2nd operand is a constant that we can encode directly
// in the compare.
- int EncodedImm = 0;
- bool EncodeImm = false;
+ int Imm = 0;
+ bool UseImm = false;
bool isNegativeImm = false;
if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
SrcVT == MVT::i1) {
const APInt &CIVal = ConstInt->getValue();
- EncodedImm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
- if (EncodedImm < 0) {
+ Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
+ if (Imm < 0) {
isNegativeImm = true;
- EncodedImm = -EncodedImm;
+ Imm = -Imm;
}
- EncodeImm = isThumb2 ? (ARM_AM::getT2SOImmVal(EncodedImm) != -1) :
- (ARM_AM::getSOImmVal(EncodedImm) != -1);
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
}
} else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
if (ConstFP->isZero() && !ConstFP->isNegative())
- EncodeImm = true;
+ UseImm = true;
}
unsigned CmpOpc;
@@ -1260,11 +1330,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// TODO: Verify compares.
case MVT::f32:
isICmp = false;
- CmpOpc = EncodeImm ? ARM::VCMPEZS : ARM::VCMPES;
+ CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
break;
case MVT::f64:
isICmp = false;
- CmpOpc = EncodeImm ? ARM::VCMPEZD : ARM::VCMPED;
+ CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
break;
case MVT::i1:
case MVT::i8:
@@ -1273,12 +1343,12 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Intentional fall-through.
case MVT::i32:
if (isThumb2) {
- if (!EncodeImm)
+ if (!UseImm)
CmpOpc = ARM::t2CMPrr;
else
CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
} else {
- if (!EncodeImm)
+ if (!UseImm)
CmpOpc = ARM::CMPrr;
else
CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
@@ -1290,7 +1360,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
if (SrcReg1 == 0) return false;
unsigned SrcReg2;
- if (!EncodeImm) {
+ if (!UseImm) {
SrcReg2 = getRegForValue(Src2Value);
if (SrcReg2 == 0) return false;
}
@@ -1301,14 +1371,14 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
if (ResultReg == 0) return false;
SrcReg1 = ResultReg;
- if (!EncodeImm) {
+ if (!UseImm) {
ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
if (ResultReg == 0) return false;
SrcReg2 = ResultReg;
}
}
- if (!EncodeImm) {
+ if (!UseImm) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CmpOpc))
.addReg(SrcReg1).addReg(SrcReg2));
@@ -1319,7 +1389,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
if (isICmp)
- MIB.addImm(EncodedImm);
+ MIB.addImm(Imm);
AddOptionalDefs(MIB);
}
@@ -1490,17 +1560,49 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
if (CondReg == 0) return false;
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- unsigned Op2Reg = getRegForValue(I->getOperand(2));
- if (Op2Reg == 0) return false;
- unsigned CmpOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
+ // Check to see if we can use an immediate in the conditional move.
+ int Imm = 0;
+ bool UseImm = false;
+ bool isNegativeImm = false;
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
+ assert (VT == MVT::i32 && "Expecting an i32.");
+ Imm = (int)ConstInt->getValue().getZExtValue();
+ if (Imm < 0) {
+ isNegativeImm = true;
+ Imm = ~Imm;
+ }
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
+ }
+
+ unsigned Op2Reg;
+ if (!UseImm) {
+ Op2Reg = getRegForValue(I->getOperand(2));
+ if (Op2Reg == 0) return false;
+ }
+
+ unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addImm(1));
+ .addReg(CondReg).addImm(0));
+
+ unsigned MovCCOpc;
+ if (!UseImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
+ } else {
+ if (!isNegativeImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
+ } else {
+ MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
+ }
+ }
unsigned ResultReg = createResultReg(RC);
- unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op1Reg).addReg(Op2Reg)
- .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ if (!UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1964,12 +2066,13 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
return true;
}
-bool ARMFastISel::SelectCall(const Instruction *I) {
+bool ARMFastISel::SelectCall(const Instruction *I,
+ const char *IntrMemName = 0) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = CI->getCalledValue();
- // Can't handle inline asm or worry about intrinsics yet.
- if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
+ // Can't handle inline asm.
+ if (isa<InlineAsm>(Callee)) return false;
// Only handle global variable Callees.
const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
@@ -2011,8 +2114,12 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
ArgFlags.reserve(CS.arg_size());
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
- unsigned Arg = getRegForValue(*i);
+ // If we're lowering a memory intrinsic instead of a regular call, skip the
+ // last two arguments, which shouldn't be passed to the underlying function.
+ if (IntrMemName && e-i <= 2)
+ break;
+ unsigned Arg = getRegForValue(*i);
if (Arg == 0)
return false;
ISD::ArgFlagsTy Flags;
@@ -2054,17 +2161,26 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(GV);
// Explicitly adding the predicate here.
- if(isThumb2)
- // Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc)))
- .addGlobalAddress(GV, 0, 0);
- else
+ if(isThumb2) {
// Explicitly adding the predicate here.
MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addGlobalAddress(GV, 0, 0));
-
+ TII.get(CallOpc)));
+ if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
+ else
+ MIB.addExternalSymbol(IntrMemName, 0);
+ } else {
+ if (!IntrMemName)
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, 0));
+ else
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addExternalSymbol(IntrMemName, 0));
+ }
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
@@ -2079,6 +2195,98 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
return true;
}
+bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
+ return Len <= 16;
+}
+
+bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
+ // Make sure we don't bloat code by inlining very large memcpy's.
+ if (!ARMIsMemCpySmall(Len))
+ return false;
+
+ // We don't care about alignment here since we just emit integer accesses.
+ while (Len) {
+ MVT VT;
+ if (Len >= 4)
+ VT = MVT::i32;
+ else if (Len >= 2)
+ VT = MVT::i16;
+ else {
+ assert(Len == 1);
+ VT = MVT::i8;
+ }
+
+ bool RV;
+ unsigned ResultReg;
+ RV = ARMEmitLoad(VT, ResultReg, Src);
+ assert (RV = true && "Should be able to handle this load.");
+ RV = ARMEmitStore(VT, ResultReg, Dest);
+ assert (RV = true && "Should be able to handle this store.");
+
+ unsigned Size = VT.getSizeInBits()/8;
+ Len -= Size;
+ Dest.Offset += Size;
+ Src.Offset += Size;
+ }
+
+ return true;
+}
+
+bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
+ // FIXME: Handle more intrinsics.
+ switch (I.getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ const MemTransferInst &MTI = cast<MemTransferInst>(I);
+ // Don't handle volatile.
+ if (MTI.isVolatile())
+ return false;
+
+ // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
+ // we would emit dead code because we don't currently handle memmoves.
+ bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
+ if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
+ // Small memcpy's are common enough that we want to do them without a call
+ // if possible.
+ uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
+ if (ARMIsMemCpySmall(Len)) {
+ Address Dest, Src;
+ if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
+ !ARMComputeAddress(MTI.getRawSource(), Src))
+ return false;
+ if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
+ return true;
+ }
+ }
+
+ if (!MTI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
+ return false;
+
+ const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
+ return SelectCall(&I, IntrMemName);
+ }
+ case Intrinsic::memset: {
+ const MemSetInst &MSI = cast<MemSetInst>(I);
+ // Don't handle volatile.
+ if (MSI.isVolatile())
+ return false;
+
+ if (!MSI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MSI.getDestAddressSpace() > 255)
+ return false;
+
+ return SelectCall(&I, "memset");
+ }
+ }
+ return false;
+}
+
bool ARMFastISel::SelectTrunc(const Instruction *I) {
// The high bits for a type smaller than the register size are assumed to be
// undefined.
@@ -2150,8 +2358,6 @@ unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
bool ARMFastISel::SelectIntExt(const Instruction *I) {
// On ARM, in general, integer casts don't involve legal types; this code
// handles promotable integers.
- // FIXME: We could save an instruction in many cases by special-casing
- // load instructions.
Type *DestTy = I->getType();
Value *Src = I->getOperand(0);
Type *SrcTy = Src->getType();
@@ -2202,6 +2408,8 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
case Instruction::SRem:
return SelectSRem(I);
case Instruction::Call:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ return SelectIntrinsicCall(*II);
return SelectCall(I);
case Instruction::Select:
return SelectSelect(I);
@@ -2217,6 +2425,52 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
return false;
}
+/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+/// vreg is being provided by the specified load instruction. If possible,
+/// try to fold the load as an operand to the instruction, returning true if
+/// successful.
+bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(LI->getType(), VT))
+ return false;
+
+ // Combine load followed by zero- or sign-extend.
+ // ldrb r1, [r0] ldrb r1, [r0]
+ // uxtb r2, r1 =>
+ // mov r3, r2 mov r3, r1
+ bool isZExt = true;
+ switch(MI->getOpcode()) {
+ default: return false;
+ case ARM::SXTH:
+ case ARM::t2SXTH:
+ isZExt = false;
+ case ARM::UXTH:
+ case ARM::t2UXTH:
+ if (VT != MVT::i16)
+ return false;
+ break;
+ case ARM::SXTB:
+ case ARM::t2SXTB:
+ isZExt = false;
+ case ARM::UXTB:
+ case ARM::t2UXTB:
+ if (VT != MVT::i8)
+ return false;
+ break;
+ }
+ // See if we can handle this address.
+ Address Addr;
+ if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
+
+ unsigned ResultReg = MI->getOperand(0).getReg();
+ if (!ARMEmitLoad(VT, ResultReg, Addr, isZExt, false))
+ return false;
+ MI->eraseFromParent();
+ return true;
+}
+
namespace llvm {
llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
// Completely untested on non-darwin.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 6f2b3b8..b55ef70 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -127,6 +127,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand);
if (VT.isInteger()) {
setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td
index c5bf607..06ee2c8 100644
--- a/lib/Target/ARM/ARMInstrFormats.td
+++ b/lib/Target/ARM/ARMInstrFormats.td
@@ -174,7 +174,7 @@ def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> {
// ARM special operands for disassembly only.
//
-def SetEndAsmOperand : AsmOperandClass {
+def SetEndAsmOperand : ImmAsmOperand {
let Name = "SetEndImm";
let ParserMethod = "parseSetEndImm";
}
@@ -820,7 +820,7 @@ class AMiscA1I<bits<8> opcod, bits<4> opc7_4, dag oops, dag iops,
}
// PKH instructions
-def PKHLSLAsmOperand : AsmOperandClass {
+def PKHLSLAsmOperand : ImmAsmOperand {
let Name = "PKHLSLImm";
let ParserMethod = "parsePKHLSLImm";
}
@@ -1991,3 +1991,59 @@ class NVDupLane<bits<4> op19_16, bit op6, dag oops, dag iops,
class NEONFPPat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [HasNEON,UseNEONForFP];
}
+
+// VFP/NEON Instruction aliases for type suffices.
+class VFPDataTypeInstAlias<string opc, string dt, string asm, dag Result> :
+ InstAlias<!strconcat(opc, dt, asm), Result>;
+multiclass VFPDT8ReqInstAlias<string opc, string asm, dag Result> {
+ def I8 : VFPDataTypeInstAlias<opc, ".i8", asm, Result>;
+ def S8 : VFPDataTypeInstAlias<opc, ".s8", asm, Result>;
+ def U8 : VFPDataTypeInstAlias<opc, ".u8", asm, Result>;
+ def F8 : VFPDataTypeInstAlias<opc, ".p8", asm, Result>;
+}
+// VFPDT8ReqInstAlias plus plain ".8"
+multiclass VFPDT8InstAlias<string opc, string asm, dag Result> {
+ def _8 : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
+ defm : VFPDT8ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT16ReqInstAlias<string opc, string asm, dag Result> {
+ def I16 : VFPDataTypeInstAlias<opc, ".i16", asm, Result>;
+ def S16 : VFPDataTypeInstAlias<opc, ".s16", asm, Result>;
+ def U16 : VFPDataTypeInstAlias<opc, ".u16", asm, Result>;
+ def F16 : VFPDataTypeInstAlias<opc, ".p16", asm, Result>;
+}
+// VFPDT16ReqInstAlias plus plain ".16"
+multiclass VFPDT16InstAlias<string opc, string asm, dag Result> {
+ def _16 : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
+ defm : VFPDT16ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT32ReqInstAlias<string opc, string asm, dag Result> {
+ def I32 : VFPDataTypeInstAlias<opc, ".i32", asm, Result>;
+ def S32 : VFPDataTypeInstAlias<opc, ".s32", asm, Result>;
+ def U32 : VFPDataTypeInstAlias<opc, ".u32", asm, Result>;
+ def F32 : VFPDataTypeInstAlias<opc, ".f32", asm, Result>;
+ def F : VFPDataTypeInstAlias<opc, ".f", asm, Result>;
+}
+// VFPDT32ReqInstAlias plus plain ".32"
+multiclass VFPDT32InstAlias<string opc, string asm, dag Result> {
+ def _32 : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
+ defm : VFPDT32ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDT64ReqInstAlias<string opc, string asm, dag Result> {
+ def I64 : VFPDataTypeInstAlias<opc, ".i64", asm, Result>;
+ def S64 : VFPDataTypeInstAlias<opc, ".s64", asm, Result>;
+ def U64 : VFPDataTypeInstAlias<opc, ".u64", asm, Result>;
+ def F64 : VFPDataTypeInstAlias<opc, ".f64", asm, Result>;
+ def D : VFPDataTypeInstAlias<opc, ".d", asm, Result>;
+}
+// VFPDT64ReqInstAlias plus plain ".64"
+multiclass VFPDT64InstAlias<string opc, string asm, dag Result> {
+ def _64 : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+ defm : VFPDT64ReqInstAlias<opc, asm, Result>;
+}
+multiclass VFPDTAnyInstAlias<string opc, string asm, dag Result> {
+ defm : VFPDT8InstAlias<opc, asm, Result>;
+ defm : VFPDT16InstAlias<opc, asm, Result>;
+ defm : VFPDT32InstAlias<opc, asm, Result>;
+ defm : VFPDT64InstAlias<opc, asm, Result>;
+}
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index af1f490..770703c 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -284,14 +284,6 @@ def lo16AllZero : PatLeaf<(i32 imm), [{
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
-/// imm0_65535 - An immediate is in the range [0.65535].
-def Imm0_65535AsmOperand: AsmOperandClass { let Name = "Imm0_65535"; }
-def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
- return Imm >= 0 && Imm < 65536;
-}]> {
- let ParserMatchClass = Imm0_65535AsmOperand;
-}
-
class BinOpWithFlagFrag<dag res> :
PatFrag<(ops node:$LHS, node:$RHS, node:$FLAG), res>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
@@ -326,6 +318,9 @@ def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
// Operand Definitions.
//
+// Immediate operands with a shared generic asm render method.
+class ImmAsmOperand : AsmOperandClass { let RenderMethod = "addImmOperands"; }
+
// Branch target.
// FIXME: rename brtarget to t2_brtarget
def brtarget : Operand<OtherVT> {
@@ -496,7 +491,7 @@ def shift_so_reg_imm : Operand<i32>, // reg reg imm
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits.
-def SOImmAsmOperand: AsmOperandClass { let Name = "ARMSOImm"; }
+def SOImmAsmOperand: ImmAsmOperand { let Name = "ARMSOImm"; }
def so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
@@ -521,7 +516,7 @@ def arm_i32imm : PatLeaf<(imm), [{
}]>;
/// imm0_7 predicate - Immediate in the range [0,7].
-def Imm0_7AsmOperand: AsmOperandClass { let Name = "Imm0_7"; }
+def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 8;
}]> {
@@ -529,7 +524,7 @@ def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_15 predicate - Immediate in the range [0,15].
-def Imm0_15AsmOperand: AsmOperandClass { let Name = "Imm0_15"; }
+def Imm0_15AsmOperand: ImmAsmOperand { let Name = "Imm0_15"; }
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
@@ -537,7 +532,7 @@ def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
-def Imm0_31AsmOperand: AsmOperandClass { let Name = "Imm0_31"; }
+def Imm0_31AsmOperand: ImmAsmOperand { let Name = "Imm0_31"; }
def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
@@ -545,7 +540,7 @@ def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_32 predicate - True if the 32-bit immediate is in the range [0,32].
-def Imm0_32AsmOperand: AsmOperandClass { let Name = "Imm0_32"; }
+def Imm0_32AsmOperand: ImmAsmOperand { let Name = "Imm0_32"; }
def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
@@ -553,25 +548,33 @@ def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_255 predicate - Immediate in the range [0,255].
-def Imm0_255AsmOperand : AsmOperandClass { let Name = "Imm0_255"; }
+def Imm0_255AsmOperand : ImmAsmOperand { let Name = "Imm0_255"; }
def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
let ParserMatchClass = Imm0_255AsmOperand;
}
+/// imm0_65535 - An immediate is in the range [0.65535].
+def Imm0_65535AsmOperand: ImmAsmOperand { let Name = "Imm0_65535"; }
+def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm >= 0 && Imm < 65536;
+}]> {
+ let ParserMatchClass = Imm0_65535AsmOperand;
+}
+
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
// FIXME: This really needs a Thumb version separate from the ARM version.
// While the range is the same, and can thus use the same match class,
// the encoding is different so it should have a different encoder method.
-def Imm0_65535ExprAsmOperand: AsmOperandClass { let Name = "Imm0_65535Expr"; }
+def Imm0_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm0_65535Expr"; }
def imm0_65535_expr : Operand<i32> {
let EncoderMethod = "getHiLo16ImmOpValue";
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
-def Imm24bitAsmOperand: AsmOperandClass { let Name = "Imm24bit"; }
+def Imm24bitAsmOperand: ImmAsmOperand { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm <= 0xffffff;
}]> {
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index d3c4486b..49cc254 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -1238,9 +1238,8 @@ class VST1D<bits<4> op7_4, string Dt>
let DecoderMethod = "DecodeVSTInstruction";
}
class VST1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b1010,op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2), IIC_VST1x2,
- "vst1", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
+ : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListTwoD:$Vd),
+ IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVSTInstruction";
@@ -5180,3 +5179,170 @@ def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
+
+
+//===----------------------------------------------------------------------===//
+// Assembler aliases
+//
+
+// VAND/VEOR/VORR accept but do not require a type suffix.
+defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+
+// VLD1 requires a size suffix, but also accepts type specific variants.
+// Load one D register.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+
+// Load two D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q8 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q16 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q32 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1q64 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q8wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q16wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q32wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1q64wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
+
+// Load three D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64Twb_fixed VecListThreeD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+
+
+// Load four D registers.
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d8Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d16Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d32Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
+ (VLD1d64Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d8Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d16Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d32Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
+ (VLD1d64Qwb_fixed VecListFourD:$Vd, zero_reg,
+ addrmode6:$Rn, pred:$p)>;
+
+// VST1 requires a size suffix, but also accepts type specific variants.
+// Store one D register.
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d8 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d16 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d32 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1d64 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d8wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d16wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d32wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1d64wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
+
+// Store two D registers.
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q8 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q16 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q32 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+ (VST1q64 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+// with writeback, fixed stride
+defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q8wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q16wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q32wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
+ (VST1q64wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
+
+// FIXME: The three and four register VST1 instructions haven't been moved
+// to the VecList* encoding yet, so we can't do assembly parsing support
+// for them. Uncomment these when that happens.
+// Load three D registers.
+//defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d8T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d16T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d32T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+//defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d64T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
+
+// Load four D registers.
+//defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d8Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d16Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d32Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
+//defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
+// (VST1d64Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 0a28226..03077c0 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -65,7 +65,7 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
// t2_so_imm - Match a 32-bit immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
// immediate splatted into multiple bytes of the word.
-def t2_so_imm_asmoperand : AsmOperandClass { let Name = "T2SOImm"; }
+def t2_so_imm_asmoperand : ImmAsmOperand { let Name = "T2SOImm"; }
def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getT2SOImmVal(Imm) != -1;
}]> {
diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td
index e746cf2..488c508 100644
--- a/lib/Target/ARM/ARMInstrVFP.td
+++ b/lib/Target/ARM/ARMInstrVFP.td
@@ -69,11 +69,11 @@ def vfp_f64imm : Operand<f64>,
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
- IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
+ IIC_fpLoad64, "vldr", "\t$Dd, $addr",
[(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
- IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
+ IIC_fpLoad32, "vldr", "\t$Sd, $addr",
[(set SPR:$Sd, (load addrmode5:$addr))]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -83,11 +83,11 @@ def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
- IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
+ IIC_fpStore64, "vstr", "\t$Dd, $addr",
[(store (f64 DPR:$Dd), addrmode5:$addr)]>;
def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
- IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
+ IIC_fpStore32, "vstr", "\t$Sd, $addr",
[(store SPR:$Sd, addrmode5:$addr)]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -1163,3 +1163,12 @@ def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
+// VLDR/VSTR accept an optional type suffix.
+defm : VFPDT32InstAlias<"vldr${p}", "$Sd, $addr",
+ (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT32InstAlias<"vstr${p}", "$Sd, $addr",
+ (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT64InstAlias<"vldr${p}", "$Dd, $addr",
+ (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
+defm : VFPDT64InstAlias<"vstr${p}", "$Dd, $addr",
+ (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 4c3be89..c8728f4 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -32,6 +32,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Debug.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -1504,6 +1505,23 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
return AddedRegPressure.size() <= MemRegs.size() * 2;
}
+
+/// Copy Op0 and Op1 operands into a new array assigned to MI.
+static void concatenateMemOperands(MachineInstr *MI, MachineInstr *Op0,
+ MachineInstr *Op1) {
+ assert(MI->memoperands_empty() && "expected a new machineinstr");
+ size_t numMemRefs = (Op0->memoperands_end() - Op0->memoperands_begin())
+ + (Op1->memoperands_end() - Op1->memoperands_begin());
+
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineSDNode::mmo_iterator MemBegin = MF->allocateMemRefsArray(numMemRefs);
+ MachineSDNode::mmo_iterator MemEnd =
+ std::copy(Op0->memoperands_begin(), Op0->memoperands_end(), MemBegin);
+ MemEnd =
+ std::copy(Op1->memoperands_begin(), Op1->memoperands_end(), MemEnd);
+ MI->setMemRefs(MemBegin, MemEnd);
+}
+
bool
ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
DebugLoc &dl,
@@ -1698,6 +1716,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
@@ -1710,6 +1730,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
MBB->erase(Op0);
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index e782975..1d66d12 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -714,7 +714,7 @@ public:
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
bool isPostIdxReg() const {
- return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift;
+ return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
if (!isMemory())
@@ -1101,7 +1101,8 @@ public:
void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!");
+ assert(isRegShiftedReg() &&
+ "addRegShiftedRegOperands() on non RegShiftedReg!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
Inst.addOperand(MCOperand::CreateImm(
@@ -1110,7 +1111,8 @@ public:
void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
- assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!");
+ assert(isRegShiftedImm() &&
+ "addRegShiftedImmOperands() on non RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
Inst.addOperand(MCOperand::CreateImm(
ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
@@ -1189,26 +1191,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
}
- void addImm0_255Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_7Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_15Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_31Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addImm1_16Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate-1, and we store in the instruction
@@ -1225,26 +1207,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
}
- void addImm0_32Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm24bitOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate, except for 32, which encodes as
@@ -1254,11 +1216,6 @@ public:
Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
}
- void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// An ASR value of 32 encodes as 0, so that's how we want to add it to
@@ -1268,16 +1225,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
}
- void addARMSOImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addT2SOImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a t2_so_imm, but we have its bitwise
@@ -1294,11 +1241,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
}
- void addSetEndImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
@@ -1486,8 +1428,9 @@ public:
void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
- Memory.ShiftImm, Memory.ShiftType);
+ unsigned Val =
+ ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
+ Memory.ShiftImm, Memory.ShiftType);
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::CreateImm(Val));
@@ -2410,6 +2353,29 @@ static unsigned getNextRegister(unsigned Reg) {
}
}
+// Return the low-subreg of a given Q register.
+static unsigned getDRegFromQReg(unsigned QReg) {
+ switch (QReg) {
+ default: llvm_unreachable("expected a Q register!");
+ case ARM::Q0: return ARM::D0;
+ case ARM::Q1: return ARM::D2;
+ case ARM::Q2: return ARM::D4;
+ case ARM::Q3: return ARM::D6;
+ case ARM::Q4: return ARM::D8;
+ case ARM::Q5: return ARM::D10;
+ case ARM::Q6: return ARM::D12;
+ case ARM::Q7: return ARM::D14;
+ case ARM::Q8: return ARM::D16;
+ case ARM::Q9: return ARM::D19;
+ case ARM::Q10: return ARM::D20;
+ case ARM::Q11: return ARM::D22;
+ case ARM::Q12: return ARM::D24;
+ case ARM::Q13: return ARM::D26;
+ case ARM::Q14: return ARM::D28;
+ case ARM::Q15: return ARM::D30;
+ }
+}
+
/// Parse a register list.
bool ARMAsmParser::
parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -2425,6 +2391,16 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (Reg == -1)
return Error(RegLoc, "register expected");
+ // The reglist instructions have at most 16 registers, so reserve
+ // space for that many.
+ SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
+
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ ++Reg;
+ }
const MCRegisterClass *RC;
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
@@ -2435,10 +2411,7 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
else
return Error(RegLoc, "invalid register in register list");
- // The reglist instructions have at most 16 registers, so reserve
- // space for that many.
- SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
- // Store the first register.
+ // Store the register.
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
// This starts immediately after the first register token in the list,
@@ -2452,6 +2425,9 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
int EndReg = tryParseRegister();
if (EndReg == -1)
return Error(EndLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
+ EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
@@ -2476,6 +2452,12 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ bool isQReg = false;
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ isQReg = true;
+ }
// The register must be in the same register class as the first.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
@@ -2489,6 +2471,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ if (isQReg)
+ Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
}
SMLoc E = Parser.getTok().getLoc();
@@ -2500,29 +2484,6 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return false;
}
-// Return the low-subreg of a given Q register.
-static unsigned getDRegFromQReg(unsigned QReg) {
- switch (QReg) {
- default: llvm_unreachable("expected a Q register!");
- case ARM::Q0: return ARM::D0;
- case ARM::Q1: return ARM::D2;
- case ARM::Q2: return ARM::D4;
- case ARM::Q3: return ARM::D6;
- case ARM::Q4: return ARM::D8;
- case ARM::Q5: return ARM::D10;
- case ARM::Q6: return ARM::D12;
- case ARM::Q7: return ARM::D14;
- case ARM::Q8: return ARM::D16;
- case ARM::Q9: return ARM::D19;
- case ARM::Q10: return ARM::D20;
- case ARM::Q11: return ARM::D22;
- case ARM::Q12: return ARM::D24;
- case ARM::Q13: return ARM::D26;
- case ARM::Q14: return ARM::D28;
- case ARM::Q15: return ARM::D30;
- }
-}
-
// parse a vector register list
ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -4161,6 +4122,22 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
return false;
}
+static bool isDataTypeToken(StringRef Tok) {
+ return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
+ Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
+ Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
+ Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
+ Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
+ Tok == ".f" || Tok == ".d";
+}
+
+// FIXME: This bit should probably be handled via an explicit match class
+// in the .td files that matches the suffix instead of having it be
+// a literal string token the way it is now.
+static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
+ return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
+}
+
/// Parse an arm instruction mnemonic followed by its operands.
bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -4265,9 +4242,12 @@ bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
Next = Name.find('.', Start + 1);
StringRef ExtraToken = Name.slice(Start, Next);
- // For now, we're only parsing Thumb1 (for the most part), so
- // just ignore ".n" qualifiers. We'll use them to restrict
- // matching when we do Thumb2.
+ // Some NEON instructions have an optional datatype suffix that is
+ // completely ignored. Check for that.
+ if (isDataTypeToken(ExtraToken) &&
+ doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
+ continue;
+
if (ExtraToken != ".n") {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 6927d2d..0b9b5d0 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -10,13 +10,13 @@
#define DEBUG_TYPE "arm-disassembler"
#include "ARM.h"
-#include "ARMRegisterInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
@@ -2267,10 +2267,6 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
// Second input register
switch (Inst.getOpcode()) {
- case ARM::VST1q8:
- case ARM::VST1q16:
- case ARM::VST1q32:
- case ARM::VST1q64:
case ARM::VST1d8T:
case ARM::VST1d16T:
case ARM::VST1d32T:
diff --git a/lib/Target/ARM/Disassembler/LLVMBuild.txt b/lib/Target/ARM/Disassembler/LLVMBuild.txt
index dff57b4..baa9bc3 100644
--- a/lib/Target/ARM/Disassembler/LLVMBuild.txt
+++ b/lib/Target/ARM/Disassembler/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = ARMDisassembler
parent = ARM
-required_libraries = ARMDesc ARMInfo MC Support
+required_libraries = ARMCodeGen ARMDesc ARMInfo MC Support
add_to_library_groups = ARM
diff --git a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
index adc37cb..f529314 100644
--- a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt
@@ -12,8 +12,8 @@ add_dependencies(LLVMARMDesc ARMCommonTableGen)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..)
add_llvm_library_dependencies(LLVMARMDesc
- LLVMARMInfo
LLVMARMAsmPrinter
+ LLVMARMInfo
LLVMMC
LLVMSupport
)
diff --git a/lib/Target/ARM/TargetInfo/LLVMBuild.txt b/lib/Target/ARM/TargetInfo/LLVMBuild.txt
index 7d7504f..046c1fc 100644
--- a/lib/Target/ARM/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/ARM/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = ARMInfo
parent = ARM
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = ARM