aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/Target/TargetLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Target/TargetLowering.h')
-rw-r--r--include/llvm/Target/TargetLowering.h350
1 files changed, 223 insertions, 127 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index ef63422..eee25c9 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -22,13 +22,14 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
-#include "llvm/CallingConv.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/Attributes.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
@@ -50,7 +51,7 @@ namespace llvm {
class MCContext;
class MCExpr;
template<typename T> class SmallVectorImpl;
- class TargetData;
+ class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
@@ -67,7 +68,6 @@ namespace llvm {
};
}
-
//===----------------------------------------------------------------------===//
/// TargetLowering - This class defines information used to lower LLVM code to
/// legal SelectionDAG operators that the target instruction selector can accept
@@ -77,8 +77,8 @@ namespace llvm {
/// target-specific constructs to SelectionDAG operators.
///
class TargetLowering {
- TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT
- void operator=(const TargetLowering&); // DO NOT IMPLEMENT
+ TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
public:
/// LegalizeAction - This enum indicates whether operations are valid for a
/// target, and if not, what action should be used to make them valid.
@@ -102,6 +102,10 @@ public:
TypeWidenVector // This vector should be widened into a larger vector.
};
+ /// LegalizeKind holds the legalization kind that needs to happen to EVT
+ /// in order to type-legalize it.
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
+
enum BooleanContent { // How the target represents true/false values.
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
@@ -137,12 +141,15 @@ public:
virtual ~TargetLowering();
const TargetMachine &getTargetMachine() const { return TM; }
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
- MVT getPointerTy() const { return PointerTy; }
+ // Return the pointer type for the given address space, defaults to
+ // the pointer type from the data layout.
+ // FIXME: The default needs to be removed once all the code is updated.
+ virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
virtual MVT getShiftAmountTy(EVT LHSTy) const;
/// isSelectExpensive - Return true if the select operation is expensive for
@@ -151,18 +158,23 @@ public:
virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
+ /// shouldSplitVectorElementType - Return true if a vector of the given type
+ /// should be split (TypeSplitVector) instead of promoted
+ /// (TypePromoteInteger) during type legalization.
+ virtual bool shouldSplitVectorElementType(EVT VT) const { return false; }
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
/// isSlowDivBypassed - Returns true if target has indicated at least one
/// type should be bypassed.
- bool isSlowDivBypassed() const { return !BypassSlowDivTypes.empty(); }
+ bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
/// getBypassSlowDivTypes - Returns map of slow types for division or
/// remainder with corresponding fast types
- const DenseMap<Type *, Type *> &getBypassSlowDivTypes() const {
- return BypassSlowDivTypes;
+ const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+ return BypassSlowDivWidths;
}
/// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
@@ -219,9 +231,8 @@ public:
/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
- virtual const TargetRegisterClass *getRegClassFor(EVT VT) const {
- assert(VT.isSimple() && "getRegClassFor called on illegal type!");
- const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
+ virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
+ const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
assert(RC && "This value type is not natively supported!");
return RC;
}
@@ -231,17 +242,15 @@ public:
/// legal super-reg register class for the register class of the value type.
/// For example, on i386 the rep register class for i8, i16, and i32 are GR32;
/// while the rep register class is GR64 on x86_64.
- virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const {
- assert(VT.isSimple() && "getRepRegClassFor called on illegal type!");
- const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy];
+ virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
+ const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
return RC;
}
/// getRepRegClassCostFor - Return the cost of the 'representative' register
/// class for the specified value type.
- virtual uint8_t getRepRegClassCostFor(EVT VT) const {
- assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!");
- return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy];
+ virtual uint8_t getRepRegClassCostFor(MVT VT) const {
+ return RepRegClassCostForVT[VT.SimpleTy];
}
/// isTypeLegal - Return true if the target has native support for the
@@ -267,8 +276,8 @@ public:
return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
}
- void setTypeAction(EVT VT, LegalizeTypeAction Action) {
- unsigned I = VT.getSimpleVT().SimpleTy;
+ void setTypeAction(MVT VT, LegalizeTypeAction Action) {
+ unsigned I = VT.SimpleTy;
ValueTypeActions[I] = Action;
}
};
@@ -329,7 +338,7 @@ public:
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
EVT &IntermediateVT,
unsigned &NumIntermediates,
- EVT &RegisterVT) const;
+ MVT &RegisterVT) const;
/// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the
/// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
@@ -403,6 +412,22 @@ public:
getOperationAction(Op, VT) == Custom);
}
+ /// isOperationLegalOrPromote - Return true if the specified operation is
+ /// legal on this target or can be made legal using promotion. This
+ /// is used to help guide high-level lowering decisions.
+ bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
+ return (VT == MVT::Other || isTypeLegal(VT)) &&
+ (getOperationAction(Op, VT) == Legal ||
+ getOperationAction(Op, VT) == Promote);
+ }
+
+ /// isOperationExpand - Return true if the specified operation is illegal on
+ /// this target or unlikely to be made legal with custom lowering. This is
+ /// used to help guide high-level lowering decisions.
+ bool isOperationExpand(unsigned Op, EVT VT) const {
+ return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+ }
+
/// isOperationLegal - Return true if the specified operation is legal on this
/// target.
bool isOperationLegal(unsigned Op, EVT VT) const {
@@ -414,36 +439,35 @@ public:
/// either it is legal, needs to be promoted to a larger size, needs to be
/// expanded to some other code sequence, or the target has a custom expander
/// for it.
- LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
- assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
+ return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
}
/// isLoadExtLegal - Return true if the specified load with extension is legal
/// on this target.
bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
- return VT.isSimple() && getLoadExtAction(ExtType, VT) == Legal;
+ return VT.isSimple() &&
+ getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
}
/// getTruncStoreAction - Return how this store with truncation should be
/// treated: either it is legal, needs to be promoted to a larger size, needs
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
- LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
- assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
- MemVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
- [MemVT.getSimpleVT().SimpleTy];
+ return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
+ [MemVT.SimpleTy];
}
/// isTruncStoreLegal - Return true if the specified store with truncation is
/// legal on this target.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
return isTypeLegal(ValVT) && MemVT.isSimple() &&
- getTruncStoreAction(ValVT, MemVT) == Legal;
+ getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
}
/// getIndexedLoadAction - Return how the indexed load should be treated:
@@ -451,11 +475,10 @@ public:
/// expanded to some other code sequence, or the target has a custom expander
/// for it.
LegalizeAction
- getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
- assert(IdxMode < ISD::LAST_INDEXED_MODE &&
- VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
+ assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
+ unsigned Ty = (unsigned)VT.SimpleTy;
return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
}
@@ -463,8 +486,8 @@ public:
/// on this target.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
return VT.isSimple() &&
- (getIndexedLoadAction(IdxMode, VT) == Legal ||
- getIndexedLoadAction(IdxMode, VT) == Custom);
+ (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
+ getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
}
/// getIndexedStoreAction - Return how the indexed store should be treated:
@@ -472,11 +495,10 @@ public:
/// expanded to some other code sequence, or the target has a custom expander
/// for it.
LegalizeAction
- getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
- assert(IdxMode < ISD::LAST_INDEXED_MODE &&
- VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
+ assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
+ unsigned Ty = (unsigned)VT.SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
}
@@ -484,50 +506,54 @@ public:
/// on this target.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
return VT.isSimple() &&
- (getIndexedStoreAction(IdxMode, VT) == Legal ||
- getIndexedStoreAction(IdxMode, VT) == Custom);
+ (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
+ getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
}
/// getCondCodeAction - Return how the condition code should be treated:
/// either it is legal, needs to be expanded to some other code sequence,
/// or the target has a custom expander for it.
LegalizeAction
- getCondCodeAction(ISD::CondCode CC, EVT VT) const {
+ getCondCodeAction(ISD::CondCode CC, MVT VT) const {
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
- (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
+ (unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 &&
"Table isn't big enough!");
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
LegalizeAction Action = (LegalizeAction)
- ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3);
+ ((CondCodeActions[CC][VT.SimpleTy >> 5] >> (2*(VT.SimpleTy & 0x1F))) & 3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
/// isCondCodeLegal - Return true if the specified condition code is legal
/// on this target.
- bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const {
- return getCondCodeAction(CC, VT) == Legal ||
- getCondCodeAction(CC, VT) == Custom;
+ bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
+ return
+ getCondCodeAction(CC, VT) == Legal ||
+ getCondCodeAction(CC, VT) == Custom;
}
/// getTypeToPromoteTo - If the action for this operation is to promote, this
/// method returns the ValueType to promote to.
- EVT getTypeToPromoteTo(unsigned Op, EVT VT) const {
+ MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
assert(getOperationAction(Op, VT) == Promote &&
"This operation isn't promoted!");
// See if this has an explicit type specified.
std::map<std::pair<unsigned, MVT::SimpleValueType>,
MVT::SimpleValueType>::const_iterator PTTI =
- PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy));
+ PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
if (PTTI != PromoteToType.end()) return PTTI->second;
assert((VT.isInteger() || VT.isFloatingPoint()) &&
"Cannot autopromote this type, add it with AddPromotedToType.");
- EVT NVT = VT;
+ MVT NVT = VT;
do {
- NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
+ NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
"Didn't find type to promote to!");
} while (!isTypeLegal(NVT) ||
@@ -555,6 +581,11 @@ public:
return EVT::getEVT(Ty, AllowUnknown);
}
+ /// Return the MVT corresponding to this LLVM type. See getValueType.
+ MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
+ return getValueType(Ty, AllowUnknown).getSimpleVT();
+ }
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
@@ -562,21 +593,22 @@ public:
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
- EVT getRegisterType(MVT VT) const {
+ MVT getRegisterType(MVT VT) const {
assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
- EVT getRegisterType(LLVMContext &Context, EVT VT) const {
+ MVT getRegisterType(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
- EVT VT1, RegisterVT;
+ EVT VT1;
+ MVT RegisterVT;
unsigned NumIntermediates;
(void)getVectorTypeBreakdown(Context, VT, VT1,
NumIntermediates, RegisterVT);
@@ -601,7 +633,8 @@ public:
return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
- EVT VT1, VT2;
+ EVT VT1;
+ MVT VT2;
unsigned NumIntermediates;
return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
}
@@ -653,12 +686,14 @@ public:
}
/// This function returns true if the target allows unaligned memory accesses.
- /// of the specified type. This is used, for example, in situations where an
- /// array copy/move/set is converted to a sequence of store operations. It's
- /// use helps to ensure that such replacements don't generate code that causes
- /// an alignment error (trap) on the target machine.
+ /// of the specified type. If true, it also returns whether the unaligned
+ /// memory access is "fast" in the second argument by reference. This is used,
+ /// for example, in situations where an array copy/move/set is converted to a
+ /// sequence of store operations. It's use helps to ensure that such
+ /// replacements don't generate code that causes an alignment error (trap) on
+ /// the target machine.
/// @brief Determine if the target supports unaligned memory accesses.
- virtual bool allowsUnalignedMemoryAccesses(EVT) const {
+ virtual bool allowsUnalignedMemoryAccesses(EVT, bool *Fast = 0) const {
return false;
}
@@ -674,21 +709,31 @@ public:
/// lowering. If DstAlign is zero that means it's safe to destination
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
- /// probably because the source does not need to be loaded. If
- /// 'IsZeroVal' is true, that means it's safe to return a
- /// non-scalar-integer type, e.g. empty string source, constant, or loaded
- /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
- /// constant so it does not need to be loaded.
+ /// probably because the source does not need to be loaded. If 'IsMemset' is
+ /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
+ /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
+ /// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
- bool /*IsZeroVal*/,
+ bool /*IsMemset*/,
+ bool /*ZeroMemset*/,
bool /*MemcpyStrSrc*/,
MachineFunction &/*MF*/) const {
return MVT::Other;
}
+ /// isSafeMemOpType - Returns true if it's safe to use load / store of the
+ /// specified type to expand memcpy / memset inline. This is mostly true
+ /// for all types except for some special cases. For example, on X86
+ /// targets without SSE2 f64 load / store are done with fldl / fstpl which
+ /// also does type conversion. Note the specified type doesn't have to be
+ /// legal as the hook is used before type legalization.
+ virtual bool isSafeMemOpType(MVT VT) const {
+ return true;
+ }
+
/// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
/// to implement llvm.setjmp.
bool usesUnderscoreSetJmp() const {
@@ -707,6 +752,12 @@ public:
return SupportJumpTables;
}
+ /// getMinimumJumpTableEntries - return integer threshold on number of
+ /// blocks to use jump tables rather than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
+ }
+
/// getStackPointerRegisterToSaveRestore - If a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -843,6 +894,18 @@ public:
}
//===--------------------------------------------------------------------===//
+ /// \name Helpers for TargetTransformInfo implementations
+ /// @{
+
+ /// Get the ISD node that corresponds to the Instruction class opcode.
+ int InstructionOpcodeToISD(unsigned Opcode) const;
+
+ /// Estimate the cost of type-legalization and the legalized type.
+ std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
+
+ /// @}
+
+ //===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
@@ -912,18 +975,20 @@ public:
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
- bool BeforeLegalize;
- bool BeforeLegalizeOps;
+ CombineLevel Level;
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
- DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
- : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
- CalledByLegalizer(cl), DAG(dag) {}
+ DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
+ : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
- bool isBeforeLegalize() const { return BeforeLegalize; }
- bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
+ bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
+ bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
+ bool isAfterLegalizeVectorOps() const {
+ return Level == AfterLegalizeDAG;
+ }
+ CombineLevel getDAGCombineLevel() { return Level; }
bool isCalledByLegalizer() const { return CalledByLegalizer; }
void AddToWorklist(SDNode *N);
@@ -1027,6 +1092,12 @@ protected:
SupportJumpTables = Val;
}
+ /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
+ /// jump tables rather than if sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
+
/// setStackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1066,9 +1137,9 @@ protected:
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
- /// addBypassSlowDivType - Tells the code generator which types to bypass.
- void addBypassSlowDivType(Type *slow_type, Type *fast_type) {
- BypassSlowDivTypes[slow_type] = fast_type;
+ /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
+ void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+ BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
}
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
@@ -1079,16 +1150,16 @@ protected:
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
- void addRegisterClass(EVT VT, const TargetRegisterClass *RC) {
- assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
+ void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
+ assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
- RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
+ RegClassForVT[VT.SimpleTy] = RC;
}
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
virtual std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(EVT VT) const;
+ findRepresentativeClass(MVT VT) const;
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
@@ -1153,8 +1224,13 @@ protected:
assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
- CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
- CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2;
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
}
/// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
@@ -1227,7 +1303,7 @@ protected:
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
- // the SelectionDAGLowering code knows how to lower these.
+ // the SelectionDAGBuilder code knows how to lower these.
//
/// LowerFormalArguments - This hook must be implemented to lower the
@@ -1340,7 +1416,7 @@ public:
}
/// HandleByVal - Target-specific cleanup for formal ByVal parameters.
- virtual void HandleByVal(CCState *, unsigned &) const {}
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
/// CanLowerReturn - This hook should be implemented to check whether the
/// return values described by the Outs array can fit into the return
@@ -1392,9 +1468,9 @@ public:
/// but this is not true all the time, e.g. i1 on x86-64. It is also not
/// necessary for non-C calling conventions. The frontend should handle this
/// and include all of the necessary information.
- virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
+ virtual MVT getTypeForExtArgOrReturn(MVT VT,
ISD::NodeType /*ExtendKind*/) const {
- EVT MinVT = getRegisterType(Context, MVT::i32);
+ MVT MinVT = getRegisterType(MVT::i32);
return VT.bitsLT(MinVT) ? MinVT : VT;
}
@@ -1501,7 +1577,7 @@ public:
Value *CallOperandVal;
/// ConstraintVT - The ValueType for the operand value.
- EVT ConstraintVT;
+ MVT ConstraintVT;
/// isMatchingInputConstraint - Return true of this is an input operand that
/// is a matching constraint like "4".
@@ -1610,6 +1686,17 @@ public:
// Addressing mode description hooks (used by LSR etc).
//
+ /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
+ /// same BB as Load/Store instructions reading the address. This allows as
+ /// much computation as possible to be done in the address mode for that
+ /// operand. This hook lets targets also pass back when this should be done
+ /// on intrinsics which load/store.
+ virtual bool GetAddrModeArguments(IntrinsicInst *I,
+ SmallVectorImpl<Value*> &Ops,
+ Type *&AccessTy) const {
+ return false;
+ }
+
/// AddrMode - This represents an addressing mode of:
/// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
/// If BaseGV is null, there is no BaseGV.
@@ -1626,17 +1713,6 @@ public:
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
- /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
- /// same BB as Load/Store instructions reading the address. This allows as
- /// much computation as possible to be done in the address mode for that
- /// operand. This hook lets targets also pass back when this should be done
- /// on intrinsics which load/store.
- virtual bool GetAddrModeArguments(IntrinsicInst *I,
- SmallVectorImpl<Value*> &Ops,
- Type *&AccessTy) const {
- return false;
- }
-
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -1687,6 +1763,13 @@ public:
return false;
}
+ /// isZExtFree - Return true if zero-extending the specific node Val to type
+ /// VT2 is free (either because it's implicitly zero-extended such as ARM
+ /// ldrb / ldrh or because it's folded such as X86 zero-extending loads).
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const {
+ return isZExtFree(Val.getValueType(), VT2);
+ }
+
/// isFNegFree - Return true if an fneg operation is free to the point where
/// it is never worthwhile to replace it with a bitwise operation.
virtual bool isFNegFree(EVT) const {
@@ -1720,9 +1803,9 @@ public:
SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
SelectionDAG &DAG) const;
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*>* Created) const;
+ std::vector<SDNode*> *Created) const;
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*>* Created) const;
+ std::vector<SDNode*> *Created) const;
//===--------------------------------------------------------------------===//
@@ -1767,10 +1850,11 @@ public:
private:
const TargetMachine &TM;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLoweringObjectFile &TLOF;
- /// PointerTy - The type to use for pointers, usually i32 or i64.
+ /// PointerTy - The type to use for pointers for the default address space,
+ /// usually i32 or i64.
///
MVT PointerTy;
@@ -1788,11 +1872,11 @@ private:
/// set to true unconditionally.
bool IntDivIsCheap;
- /// BypassSlowDivTypes - Tells the code generator to bypass slow divide or
- /// remainder instructions. For example, SlowDivBypass[i32,u8] tells the code
- /// generator to bypass 32-bit signed integer div/rem with an 8-bit unsigned
+ /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
+ /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
+ /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
/// integer div/rem when the operands are positive and less than 256.
- DenseMap <Type *, Type *> BypassSlowDivTypes;
+ DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
@@ -1816,6 +1900,9 @@ private:
/// If it's not true, then each jumptable must be lowered into if-then-else's.
bool SupportJumpTables;
+ /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -1884,7 +1971,7 @@ private:
/// each ValueType the target supports natively.
const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
- EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+ MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
/// RepRegClassForVT - This indicates the "representative" register class to
/// use for each ValueType the target supports natively. This information is
@@ -1904,7 +1991,7 @@ private:
/// contains one step of the expand (e.g. i64 -> i32), even if there are
/// multiple steps required (e.g. i64 -> i16). For types natively supported
/// by the system, this holds the same type (e.g. i32 -> i32).
- EVT TransformToType[MVT::LAST_VALUETYPE];
+ MVT TransformToType[MVT::LAST_VALUETYPE];
/// OpActions - For each operation and each value type, keep a LegalizeAction
/// that indicates how instruction selection should deal with the operation.
@@ -1933,26 +2020,34 @@ private:
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
- uint64_t CondCodeActions[ISD::SETCC_INVALID];
+ /// Because each CC action takes up 2 bits, we need to have the array size
+ /// be large enough to fit all of the value types. This can be done by
+ /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
+ uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
ValueTypeActionImpl ValueTypeActions;
- typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
-
+public:
LegalizeKind
getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
if (VT.isSimple()) {
- assert((unsigned)VT.getSimpleVT().SimpleTy <
- array_lengthof(TransformToType));
- EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
- LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT());
+ MVT SVT = VT.getSimpleVT();
+ assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
+ MVT NVT = TransformToType[SVT.SimpleTy];
+ LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
assert(
- (!(NVT.isSimple() && LA != TypeLegal) ||
- ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
+ (LA == TypeLegal ||
+ ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
+ if (LA == TypeSplitVector)
+ return LegalizeKind(LA, EVT::getVectorVT(Context,
+ SVT.getVectorElementType(),
+ SVT.getVectorNumElements()/2));
+ if (LA == TypeScalarizeVector)
+ return LegalizeKind(LA, SVT.getVectorElementType());
return LegalizeKind(LA, NVT);
}
@@ -2055,7 +2150,8 @@ private:
return LegalizeKind(TypeSplitVector, NVT);
}
- std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
+private:
+ std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
/// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
@@ -2148,7 +2244,7 @@ private:
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
/// compute the return value EVTs and flags, and optionally also
/// the offsets, if the return value is being lowered to memory.
-void GetReturnInfo(Type* ReturnType, Attributes attr,
+void GetReturnInfo(Type* ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI);