aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp3
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp5
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp2
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp5
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp3
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp2
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp2
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp3
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp19
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp6
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp5
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.cpp2
16 files changed, 42 insertions, 27 deletions
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 8c744d1..22e7674 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -3302,7 +3302,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
- if (MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr())
+ if (MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
--Latency;
}
return Latency;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index f0c2d7e..ed17eb4 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -561,8 +561,9 @@ needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
- bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
- F->getFnAttributes().hasStackAlignmentAttr());
+ bool requiresRealignment =
+ ((MFI->getMaxAlignment() > StackAlign) ||
+ F->getFnAttributes().hasAttribute(Attributes::StackAlignment));
return requiresRealignment && canRealignStack(MF);
}
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index e77cf87..3e70d57 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1151,7 +1151,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->getFnAttributes().hasNakedAttr())
+ if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8363836..d4d29f5 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -6326,7 +6326,8 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->getFnAttributes().hasNoImplicitFloatAttr() &&
+ if (!MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16) {
ldrOpc = ARM::VLD1q32wb_fixed;
@@ -9060,7 +9061,7 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
// See if we can use NEON instructions for this...
if (IsZeroVal &&
- !F->getFnAttributes().hasNoImplicitFloatAttr() &&
+ !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) {
return MVT::v4i32;
diff --git a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 4937c56..4d93dd1 100644
--- a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -50,7 +50,7 @@ bool HexagonRemoveExtendArgs::runOnFunction(Function &F) {
unsigned Idx = 1;
for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); AI != AE;
++AI, ++Idx) {
- if (F.getParamAttributes(Idx).hasSExtAttr()) {
+ if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt)) {
Argument* Arg = AI;
if (!isa<PointerType>(Arg->getType())) {
for (Instruction::use_iterator UI = Arg->use_begin();
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index d3342f6..d3dfb35 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1456,7 +1456,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
continue;
}
- if (PAL.getParamAttributes(paramIndex+1).hasByValAttr() == false) {
+ if (PAL.getParamAttributes(paramIndex+1).
+ hasAttribute(Attributes::ByVal) == false) {
// Just a scalar
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (isKernelFunc) {
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 76ad673..be771e3 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -965,7 +965,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
// to newly created nodes. The SDNOdes for params have to
// appear in the same order as their order of appearance
// in the original function. "idx+1" holds that order.
- if (PAL.getParamAttributes(i+1).hasByValAttr() == false) {
+ if (PAL.getParamAttributes(i+1).hasAttribute(Attributes::ByVal) == false) {
// A plain scalar.
if (isABI || isKernel) {
// If ABI, load from the param symbol
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index d8851a0..b1c02e5 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -193,7 +193,8 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
// to adjust the stack pointer (we fit in the Red Zone). For 64-bit
// SVR4, we also require a stack frame if we need to spill the CR,
// since this spill area is addressed relative to the stack pointer.
- bool DisableRedZone = MF.getFunction()->getFnAttributes().hasNoRedZoneAttr();
+ bool DisableRedZone = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::NoRedZone);
// FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can
// still generate stackless code if all local vars are reg-allocated.
// Try: (FrameSize <= 224
@@ -255,7 +256,7 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->getFnAttributes().hasNakedAttr())
+ if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index bdda8ea..64bbcdf 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -6286,7 +6286,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects()) &&
MFI->getStackSize() &&
- !MF.getFunction()->getFnAttributes().hasNakedAttr();
+ !MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::Naked);
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
(is31 ? PPC::R31 : PPC::R1);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 1665d73..459c358 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -596,7 +596,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// to Offset to get the correct offset.
// Naked functions have stack size 0, although getStackSize may not reflect that
// because we didn't call all the pieces that compute it for naked functions.
- if (!MF.getFunction()->getFnAttributes().hasNakedAttr())
+ if (!MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked))
Offset += MFI->getStackSize();
// If we can, encode the offset directly into the instruction. If this is a
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index c923f3c..8acef9d 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -674,7 +674,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// function, and use up to 128 bytes of stack space, don't have a frame
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone).
- if (Is64Bit && !Fn->getFnAttributes().hasNoRedZoneAttr() &&
+ if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attributes::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index b79dd99..5a19f8a 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -434,7 +434,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
+ OptForSize = MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 58c4e4e..5e0b9c0 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1342,7 +1342,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
// cases like PR2962. This should be removed when PR2962 is fixed.
const Function *F = MF.getFunction();
if (IsZeroVal &&
- !F->getFnAttributes().hasNoImplicitFloatAttr()) {
+ !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -2010,7 +2010,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
TotalNumIntRegs);
- bool NoImplicitFloatOps = Fn->getFnAttributes().hasNoImplicitFloatAttr();
+ bool NoImplicitFloatOps = Fn->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat);
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
@@ -2486,7 +2487,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
OpFlags = X86II::MO_DARWIN_STUB;
} else if (Subtarget->isPICStyleRIPRel() &&
isa<Function>(GV) &&
- cast<Function>(GV)->getFnAttributes().hasNonLazyBindAttr()) {
+ cast<Function>(GV)->getFnAttributes().
+ hasAttribute(Attributes::NonLazyBind)) {
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
@@ -6629,7 +6631,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool HasAVX = Subtarget->hasAVX();
bool HasAVX2 = Subtarget->hasAVX2();
MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
+ bool OptForSize = MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize);
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
@@ -9669,7 +9672,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!getTargetMachine().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
- .getFunction()->getFnAttributes().hasNoImplicitFloatAttr()) &&
+ .getFunction()->getFnAttributes()
+ .hasAttribute(Attributes::NoImplicitFloat)) &&
Subtarget->hasSSE1());
}
@@ -10495,7 +10499,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I, ++Idx)
- if (Attrs.getParamAttributes(Idx).hasInRegAttr())
+ if (Attrs.getParamAttributes(Idx).hasAttribute(Attributes::InReg))
// FIXME: should only count parameters that are lowered to integers.
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
@@ -15439,7 +15443,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->getFnAttributes().hasNoImplicitFloatAttr();
+ bool NoImplicitFloatOps = F->getFnAttributes().
+ hasAttribute(Attributes::NoImplicitFloat);
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
&& Subtarget->hasSSE2();
if ((VT.isVector() ||
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index bf9a1bf..c65d016 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -3828,7 +3828,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr() &&
+ if (!MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
@@ -3869,7 +3870,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr() &&
+ if (!MF.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 7f8c250..c840ea2 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -382,8 +382,9 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
- bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
- F->getFnAttributes().hasStackAlignmentAttr());
+ bool requiresRealignment =
+ ((MFI->getMaxAlignment() > StackAlign) ||
+ F->getFnAttributes().hasAttribute(Attributes::StackAlignment));
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index 6d98b44..e18d973 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -101,7 +101,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
const AttrListPtr &PAL = MF.getFunction()->getAttributes();
for (unsigned I = 0, E = PAL.getNumAttrs(); I != E; ++I)
- if (PAL.getAttributesAtIndex(I).hasNestAttr()) {
+ if (PAL.getAttributesAtIndex(I).hasAttribute(Attributes::Nest)) {
loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII);
break;
}