aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/Target
diff options
context:
space:
mode:
authorPirama Arumuga Nainar <pirama@google.com>2015-04-08 08:55:49 -0700
committerPirama Arumuga Nainar <pirama@google.com>2015-04-09 15:04:38 -0700
commit4c5e43da7792f75567b693105cc53e3f1992ad98 (patch)
tree1b2c9792582e12f5af0b1512e3094425f0dc0df9 /include/llvm/Target
parentc75239e6119d0f9a74c57099d91cbc9bde56bf33 (diff)
downloadexternal_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.zip
external_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.tar.gz
external_llvm-4c5e43da7792f75567b693105cc53e3f1992ad98.tar.bz2
Update aosp/master llvm for rebase to r233350
Change-Id: I07d935f8793ee8ec6b7da003f6483046594bca49
Diffstat (limited to 'include/llvm/Target')
-rw-r--r--include/llvm/Target/Target.td3
-rw-r--r--include/llvm/Target/TargetInstrInfo.h34
-rw-r--r--include/llvm/Target/TargetLowering.h101
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h23
-rw-r--r--include/llvm/Target/TargetMachine.h40
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h35
-rw-r--r--include/llvm/Target/TargetSelectionDAG.td24
-rw-r--r--include/llvm/Target/TargetSubtargetInfo.h16
8 files changed, 195 insertions, 81 deletions
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index 3e65a5d..6c970d0 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -397,8 +397,7 @@ class Instruction {
// captured by any operands of the instruction or other flags.
//
bit hasSideEffects = ?;
- bit hasTwoExplicitDefs = 0; // Does this instruction have 2 explicit
- // destinations?
+
// Is this instruction a "real" instruction (with a distinct machine
// encoding), or is it a pseudo instruction used for codegen modeling
// purposes.
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 247f9d8..110976a 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -207,7 +207,7 @@ public:
/// this, particularly to support spilled vector registers.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
unsigned &Size, unsigned &Offset,
- const TargetMachine *TM) const;
+ const MachineFunction &MF) const;
/// isAsCheapAsAMove - Return true if the instruction is as cheap as a move
/// instruction.
@@ -672,16 +672,15 @@ public:
/// operand folded, otherwise NULL is returned.
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
- MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops, int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr *LoadMI) const;
/// hasPattern - return true when there is potentially a faster code sequence
/// for an instruction chain ending in \p Root. All potential pattern are
@@ -723,20 +722,20 @@ protected:
/// foldMemoryOperandImpl - Target-dependent implementation for
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+ virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ ArrayRef<unsigned> Ops,
+ int FrameIndex) const {
return nullptr;
}
/// foldMemoryOperandImpl - Target-dependent implementation for
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
+ virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr *LoadMI) const {
return nullptr;
}
@@ -786,9 +785,8 @@ protected:
public:
/// canFoldMemoryOperand - Returns true for the specified load / store if
/// folding is possible.
- virtual
- bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ ArrayRef<unsigned> Ops) const;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 4118917..29ecedc 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -123,6 +123,18 @@ public:
// mask (ex: x86 blends).
};
+ /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
+ /// because different targets have different levels of support for these
+ /// atomic RMW instructions, and also have different options w.r.t. what they should
+ /// expand to.
+ enum class AtomicRMWExpansionKind {
+ None, // Don't expand the instruction.
+ LLSC, // Expand the instruction into loadlinked/storeconditional; used
+ // by ARM/AArch64. Implies `hasLoadLinkedStoreConditional`
+ // returns true.
+ CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ };
+
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -148,7 +160,7 @@ protected:
public:
const TargetMachine &getTargetMachine() const { return TM; }
- const DataLayout *getDataLayout() const { return DL; }
+ const DataLayout *getDataLayout() const { return TM.getDataLayout(); }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
@@ -964,6 +976,15 @@ public:
return false;
}
+ /// Return true if the pointer arguments to CI should be aligned by aligning
+ /// the object whose address is being passed. If so then MinSize is set to the
+ /// minimum size the object must be to be aligned and PrefAlign is set to the
+ /// preferred alignment.
+ virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
+ unsigned & /*PrefAlign*/) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
/// \name Helpers for TargetTransformInfo implementations
/// @{
@@ -1059,15 +1080,21 @@ public:
return false;
}
+ /// Returns true if arguments should be sign-extended in lib calls.
+ virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+ return IsSigned;
+ }
+
/// Returns true if the given (atomic) load should be expanded by the
/// IR-level AtomicExpand pass into a load-linked instruction
/// (through emitLoadLinked()).
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
- /// Returns true if the given AtomicRMW should be expanded by the
- /// IR-level AtomicExpand pass into a loop using LoadLinked/StoreConditional.
- virtual bool shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const {
- return false;
+ /// Returns how the IR-level AtomicExpand pass should expand the given
+ /// AtomicRMW, if at all. Default is to never expand.
+ virtual AtomicRMWExpansionKind
+ shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
+ return AtomicRMWExpansionKind::None;
}
/// On some platforms, an AtomicRMW that never actually modifies the value
@@ -1084,6 +1111,25 @@ public:
virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
return nullptr;
}
+
+ /// Returns true if we should normalize
+ /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
+ /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
+ /// that it saves us from materializing N0 and N1 in an integer register.
+ /// Targets that are able to perform and/or on flags should return false here.
+ virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
+ EVT VT) const {
+ // If a target has multiple condition registers, then it likely has logical
+ // operations on those registers.
+ if (hasMultipleConditionRegisters())
+ return false;
+ // Only do the transform if the value won't be split into multiple
+ // registers.
+ LegalizeTypeAction Action = getTypeAction(Context, VT);
+ return Action != TypeExpandInteger && Action != TypeExpandFloat &&
+ Action != TypeSplitVector;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -1452,6 +1498,33 @@ public:
virtual bool isProfitableToHoist(Instruction *I) const { return true; }
+ /// Return true if the extension represented by \p I is free.
+ /// Unlikely the is[Z|FP]ExtFree family which is based on types,
+ /// this method can use the context provided by \p I to decide
+ /// whether or not \p I is free.
+ /// This method extends the behavior of the is[Z|FP]ExtFree family.
+ /// In other words, if is[Z|FP]Free returns true, then this method
+ /// returns true as well. The converse is not true.
+ /// The target can perform the adequate checks by overriding isExtFreeImpl.
+ /// \pre \p I must be a sign, zero, or fp extension.
+ bool isExtFree(const Instruction *I) const {
+ switch (I->getOpcode()) {
+ case Instruction::FPExt:
+ if (isFPExtFree(EVT::getEVT(I->getType())))
+ return true;
+ break;
+ case Instruction::ZExt:
+ if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
+ return true;
+ break;
+ case Instruction::SExt:
+ break;
+ default:
+ llvm_unreachable("Instruction is not an extension");
+ }
+ return isExtFreeImpl(I);
+ }
+
/// Return true if any actual instruction that defines a value of type Ty1
/// implicitly zero-extends the value to Ty2 in the result register.
///
@@ -1607,7 +1680,6 @@ public:
private:
const TargetMachine &TM;
- const DataLayout *DL;
/// True if this is a little endian target.
bool IsLittleEndian;
@@ -1816,6 +1888,11 @@ private:
CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
protected:
+ /// Return true if the extension represented by \p I is free.
+ /// \pre \p I is a sign, zero, or fp extension and
+ /// is[Z|FP]ExtFree of the related types is not true.
+ virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
+
/// \brief Specify maximum number of store instructions per memset call.
///
/// When lowering \@llvm.memset this field specifies the maximum number of
@@ -1953,6 +2030,7 @@ public:
ISD::CondCode &CCCode, SDLoc DL) const;
/// Returns a pair of (return value, chain).
+ /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
EVT RetVT, const SDValue *Ops,
unsigned NumOps, bool isSigned,
@@ -2562,6 +2640,15 @@ public:
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
const std::string &Constraint, MVT VT) const;
+ virtual unsigned
+ getInlineAsmMemConstraint(const std::string &ConstraintCode) const {
+ if (ConstraintCode == "i")
+ return InlineAsm::Constraint_i;
+ else if (ConstraintCode == "m")
+ return InlineAsm::Constraint_m;
+ return InlineAsm::Constraint_Unknown;
+ }
+
/// Try to replace an X constraint, which matches anything, with another that
/// has more specific requirements based on the type of the corresponding
/// operand. This returns null if there is no replacement to make.
@@ -2664,6 +2751,8 @@ public:
/// is created but not inserted into any basic blocks, and this method is
/// called to expand it into a sequence of instructions, potentially also
/// creating new basic blocks and control flow.
+ /// As long as the returned basic block is different (i.e., we created a new
+ /// one), the custom inserter is free to modify the rest of \p MBB.
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index 57c2606..62ae237 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -29,26 +29,29 @@ namespace llvm {
class MCSymbol;
class MCSymbolRefExpr;
class MCStreamer;
+ class MCValue;
class ConstantExpr;
class GlobalValue;
class TargetMachine;
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
- const DataLayout *DL;
TargetLoweringObjectFile(
const TargetLoweringObjectFile&) = delete;
void operator=(const TargetLoweringObjectFile&) = delete;
protected:
+ const DataLayout *DL;
bool SupportIndirectSymViaGOTPCRel;
+ bool SupportGOTPCRelWithOffset;
public:
MCContext &getContext() const { return *Ctx; }
TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(nullptr), DL(nullptr),
- SupportIndirectSymViaGOTPCRel(false) {}
+ SupportIndirectSymViaGOTPCRel(false),
+ SupportGOTPCRelWithOffset(true) {}
virtual ~TargetLoweringObjectFile();
@@ -98,6 +101,11 @@ public:
return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
}
+ virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const GlobalValue *GV,
+ bool CannotUsePrivateLabel, Mangler &Mang,
+ const TargetMachine &TM) const;
+
virtual const MCSection *
getSectionForJumpTable(const Function &F, Mangler &Mang,
const TargetMachine &TM) const;
@@ -168,9 +176,18 @@ public:
return SupportIndirectSymViaGOTPCRel;
}
+ /// \brief Target GOT "PC"-relative relocation supports encoding an additional
+ /// binary expression with an offset?
+ bool supportGOTPCRelWithOffset() const {
+ return SupportGOTPCRelWithOffset;
+ }
+
/// \brief Get the target specific PC relative GOT entry relocation
virtual const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
- int64_t Offset) const {
+ const MCValue &MV,
+ int64_t Offset,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
return nullptr;
}
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index cdf643d..87aba9f 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -15,6 +15,7 @@
#define LLVM_TARGET_TARGETMACHINE_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
@@ -29,6 +30,9 @@ class Mangler;
class MCAsmInfo;
class MCCodeGenInfo;
class MCContext;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCSubtargetInfo;
class MCSymbol;
class Target;
class DataLayout;
@@ -62,12 +66,16 @@ class TargetMachine {
TargetMachine(const TargetMachine &) = delete;
void operator=(const TargetMachine &) = delete;
protected: // Can only create subclasses.
- TargetMachine(const Target &T, StringRef TargetTriple,
- StringRef CPU, StringRef FS, const TargetOptions &Options);
+ TargetMachine(const Target &T, StringRef DataLayoutString,
+ StringRef TargetTriple, StringRef CPU, StringRef FS,
+ const TargetOptions &Options);
/// TheTarget - The Target that this machine was created for.
const Target &TheTarget;
+ /// DataLayout - For ABI type size and alignment.
+ const DataLayout DL;
+
/// TargetTriple, TargetCPU, TargetFS - Triple string, CPU name, and target
/// feature strings the TargetMachine instance is created with.
std::string TargetTriple;
@@ -81,6 +89,9 @@ protected: // Can only create subclasses.
/// AsmInfo - Contains target specific asm information.
///
const MCAsmInfo *AsmInfo;
+ const MCRegisterInfo *MRI;
+ const MCInstrInfo *MII;
+ const MCSubtargetInfo *STI;
unsigned RequireStructuredCFG : 1;
@@ -97,11 +108,8 @@ public:
/// getSubtargetImpl - virtual method implemented by subclasses that returns
/// a reference to that target's TargetSubtargetInfo-derived member variable.
- virtual const TargetSubtargetInfo *getSubtargetImpl() const {
- return nullptr;
- }
virtual const TargetSubtargetInfo *getSubtargetImpl(const Function &) const {
- return getSubtargetImpl();
+ return nullptr;
}
virtual TargetLoweringObjectFile *getObjFileLowering() const {
return nullptr;
@@ -110,18 +118,13 @@ public:
/// getSubtarget - This method returns a pointer to the specified type of
/// TargetSubtargetInfo. In debug builds, it verifies that the object being
/// returned is of the correct type.
- template<typename STC> const STC &getSubtarget() const {
- return *static_cast<const STC*>(getSubtargetImpl());
- }
- template <typename STC> const STC &getSubtarget(const Function &) const {
- return *static_cast<const STC*>(getSubtargetImpl());
+ template <typename STC> const STC &getSubtarget(const Function &F) const {
+ return *static_cast<const STC*>(getSubtargetImpl(F));
}
/// getDataLayout - This method returns a pointer to the DataLayout for
/// the target. It should be unchanging for every subtarget.
- virtual const DataLayout *getDataLayout() const {
- return nullptr;
- }
+ const DataLayout *getDataLayout() const { return &DL; }
/// \brief Reset the target options based on the function's attributes.
// FIXME: Remove TargetOptions that affect per-function code generation
@@ -131,6 +134,9 @@ public:
/// getMCAsmInfo - Return target specific asm information.
///
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
+ const MCRegisterInfo *getMCRegisterInfo() const { return MRI; }
+ const MCInstrInfo *getMCInstrInfo() const { return MII; }
+ const MCSubtargetInfo *getMCSubtargetInfo() const { return STI; }
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
@@ -236,9 +242,9 @@ public:
///
class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
- LLVMTargetMachine(const Target &T, StringRef TargetTriple,
- StringRef CPU, StringRef FS, TargetOptions Options,
- Reloc::Model RM, CodeModel::Model CM,
+ LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
+ StringRef TargetTriple, StringRef CPU, StringRef FS,
+ TargetOptions Options, Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
void initAsmInfo();
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index fc94a84..4184052 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -46,6 +46,8 @@ public:
const uint32_t *SubClassMask;
const uint16_t *SuperRegIndices;
const unsigned LaneMask;
+ /// Whether the class supports two (or more) disjunct subregister indices.
+ const bool HasDisjunctSubRegs;
const sc_iterator SuperClasses;
ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
@@ -357,13 +359,13 @@ public:
///
/// then:
///
- /// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0
+ /// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
///
/// The converse is not necessarily true. If two lane masks have a common
/// bit, the corresponding sub-registers may not overlap, but it can be
/// assumed that they usually will.
+ /// SubIdx == 0 is allowed, it has the lane mask ~0u.
unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
- // SubIdx == 0 is allowed, it has the lane mask ~0u.
assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
return SubRegIndexLaneMasks[SubIdx];
}
@@ -425,10 +427,10 @@ public:
/// closest to the incoming stack pointer if stack grows down, and vice versa.
///
virtual const MCPhysReg*
- getCalleeSavedRegs(const MachineFunction *MF = nullptr) const = 0;
+ getCalleeSavedRegs(const MachineFunction *MF) const = 0;
/// getCallPreservedMask - Return a mask of call-preserved registers for the
- /// given calling convention on the current sub-target. The mask should
+ /// given calling convention on the current function. The mask should
/// include all call-preserved aliases. This is used by the register
/// allocator to determine which registers can be live across a call.
///
@@ -445,7 +447,8 @@ public:
/// instructions should use implicit-def operands to indicate call clobbered
/// registers.
///
- virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const {
+ virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID) const {
// The default mask clobbers everything. All targets should override.
return nullptr;
}
@@ -622,8 +625,9 @@ public:
/// legal to use in the current sub-target and has the same spill size.
/// The returned register class can be used to create virtual registers which
/// means that all its registers can be copied and spilled.
- virtual const TargetRegisterClass*
- getLargestLegalSuperClass(const TargetRegisterClass *RC) const {
+ virtual const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &) const {
/// The default implementation is very conservative and doesn't allow the
/// register allocator to inflate register classes.
return RC;
@@ -655,7 +659,8 @@ public:
/// Get the register unit pressure limit for this dimension.
/// This limit must be adjusted dynamically for reserved registers.
- virtual unsigned getRegPressureSetLimit(unsigned Idx) const = 0;
+ virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
+ unsigned Idx) const = 0;
/// Get the dimensions of register pressure impacted by this register class.
/// Returns a -1 terminated array of pressure set IDs.
@@ -686,14 +691,6 @@ public:
const MachineFunction &MF,
const VirtRegMap *VRM = nullptr) const;
- /// avoidWriteAfterWrite - Return true if the register allocator should avoid
- /// writing a register from RC in two consecutive instructions.
- /// This can avoid pipeline stalls on certain architectures.
- /// It does cause increased register pressure, though.
- virtual bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
- return false;
- }
-
/// updateRegAllocHint - A callback to allow target a chance to update
/// register allocation hints when a register is "changed" (e.g. coalesced)
/// to another register. e.g. On ARM, some virtual registers should target
@@ -802,9 +799,9 @@ public:
llvm_unreachable("resolveFrameIndex does not exist on this target");
}
- /// isFrameOffsetLegal - Determine whether a given offset immediate is
- /// encodable to resolve a frame index.
- virtual bool isFrameOffsetLegal(const MachineInstr *MI,
+ /// isFrameOffsetLegal - Determine whether a given base register plus offset
+ /// immediate is encodable to resolve a frame index.
+ virtual bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
int64_t Offset) const {
llvm_unreachable("isFrameOffsetLegal does not exist on this target");
}
diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td
index d297162..5388962 100644
--- a/include/llvm/Target/TargetSelectionDAG.td
+++ b/include/llvm/Target/TargetSelectionDAG.td
@@ -68,6 +68,18 @@ class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
int OtherOpNum = OtherOp;
}
+// SDTCVecEltisVT - The specified operand is vector type with element type
+// of VT.
+class SDTCVecEltisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
+ ValueType VT = vt;
+}
+
+// SDTCisSameNumEltsAs - The two specified operands have identical number
+// of elements.
+class SDTCisSameNumEltsAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+ int OtherOperandNum = OtherOp;
+}
+
//===----------------------------------------------------------------------===//
// Selection DAG Type Profile definitions.
//
@@ -196,14 +208,6 @@ def SDTMaskedLoad: SDTypeProfile<1, 3, [ // masked load
SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>
]>;
-def SDTMaskedGather: SDTypeProfile<1, 3, [ // masked gather
- SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVec<2>
-]>;
-
-def SDTMaskedScatter: SDTypeProfile<1, 3, [ // masked scatter
- SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>
-]>;
-
def SDTVecShuffle : SDTypeProfile<1, 2, [
SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
]>;
@@ -476,10 +480,6 @@ def masked_store : SDNode<"ISD::MSTORE", SDTMaskedStore,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def masked_load : SDNode<"ISD::MLOAD", SDTMaskedLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-def masked_scatter : SDNode<"ISD::MSCATTER", SDTMaskedScatter,
- [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
-def masked_gather : SDNode<"ISD::MGATHER", SDTMaskedGather,
- [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h
index 83ab4ec..bb5409b6 100644
--- a/include/llvm/Target/TargetSubtargetInfo.h
+++ b/include/llvm/Target/TargetSubtargetInfo.h
@@ -94,16 +94,24 @@ public:
return 0;
}
- /// \brief Temporary API to test migration to MI scheduler.
- bool useMachineScheduler() const;
-
/// \brief True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
/// This currently replaces the SelectionDAG scheduler with the "source" order
- /// scheduler. It does not yet disable the postRA scheduler.
+ /// scheduler (though see below for an option to turn this off and use the
+ /// TargetLowering preference). It does not yet disable the postRA scheduler.
virtual bool enableMachineScheduler() const;
+ /// \brief True if the machine scheduler should disable the TLI preference
+ /// for preRA scheduling with the source level scheduler.
+ virtual bool enableMachineSchedDefaultSched() const { return true; }
+
+ /// \brief True if the subtarget should enable joining global copies.
+ ///
+ /// By default this is enabled if the machine scheduler is enabled, but
+ /// can be overridden.
+ virtual bool enableJoinGlobalCopies() const;
+
/// \brief True if the subtarget should run PostMachineScheduler.
///
/// This only takes effect if the target has configured the