aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/CodeGen')
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h4
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h4
-rw-r--r--include/llvm/CodeGen/LiveInterval.h9
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h2
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h24
-rw-r--r--include/llvm/CodeGen/MachineFunction.h8
-rw-r--r--include/llvm/CodeGen/MachineInstr.h52
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h9
-rw-r--r--include/llvm/CodeGen/MachineOperand.h42
-rw-r--r--include/llvm/CodeGen/PBQP/HeuristicBase.h5
-rw-r--r--include/llvm/CodeGen/Passes.h4
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h19
-rw-r--r--include/llvm/CodeGen/ScheduleDAGInstrs.h25
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h13
14 files changed, 168 insertions, 52 deletions
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index 170a528..2920675 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -17,6 +17,7 @@
#define LLVM_CODEGEN_ASMPRINTER_H
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -460,7 +461,8 @@ namespace llvm {
mutable unsigned SetCounter;
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
- void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0) const;
+ void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0,
+ InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index f387bd5..5d0a3b4 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -637,6 +637,10 @@ namespace ISD {
ATOMIC_LOAD_UMIN,
ATOMIC_LOAD_UMAX,
+ /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
+ /// is the chain and the second operand is the alloca pointer.
+ LIFETIME_START, LIFETIME_END,
+
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific pre-isel opcode values start here.
BUILTIN_OP_END
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index a3ce47c..5aeb1a8 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -29,6 +29,7 @@
#include <climits>
namespace llvm {
+ class CoalescerPair;
class LiveIntervals;
class MachineInstr;
class MachineRegisterInfo;
@@ -366,6 +367,14 @@ namespace llvm {
return overlapsFrom(other, other.begin());
}
+ /// overlaps - Return true if the two intervals have overlapping segments
+ /// that are not coalescable according to CP.
+ ///
+ /// Overlapping segments where one interval is defined by a coalescable
+ /// copy are allowed.
+ bool overlaps(const LiveInterval &Other, const CoalescerPair &CP,
+ const SlotIndexes&) const;
+
/// overlaps - Return true if the live interval overlaps a range specified
/// by [Start, End).
bool overlaps(SlotIndex Start, SlotIndex End) const;
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index da521db..bf74690 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -252,7 +252,7 @@ namespace llvm {
/// addKillFlags - Add kill flags to any instruction that kills a virtual
/// register.
- void addKillFlags();
+ void addKillFlags(const VirtRegMap*);
/// handleMove - call this method to notify LiveIntervals that
/// instruction 'mi' has been moved within a basic block. This will update
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index 8b958e4..3c07ceb 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -28,6 +28,7 @@ class MachineFunction;
class MachineBasicBlock;
class TargetFrameLowering;
class BitVector;
+class Value;
/// The CalleeSavedInfo class tracks the information need to locate where a
/// callee saved register is in the current frame.
@@ -103,14 +104,18 @@ class MachineFrameInfo {
// protector.
bool MayNeedSP;
+ /// Alloca - If this stack object is originated from an Alloca instruction
+ /// this value saves the original IR allocation. Can be NULL.
+ const Value *Alloca;
+
// PreAllocated - If true, the object was mapped into the local frame
// block and doesn't need additional handling for allocation beyond that.
bool PreAllocated;
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
- bool isSS, bool NSP)
+ bool isSS, bool NSP, const Value *Val)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
- isSpillSlot(isSS), MayNeedSP(NSP), PreAllocated(false) {}
+ isSpillSlot(isSS), MayNeedSP(NSP), Alloca(Val), PreAllocated(false) {}
};
/// Objects - The list of stack objects allocated...
@@ -362,6 +367,14 @@ public:
ensureMaxAlignment(Align);
}
+ /// getObjectAllocation - Return the underlying Alloca of the specified
+ /// stack object if it exists. Returns 0 if none exists.
+ const Value* getObjectAllocation(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Alloca;
+ }
+
/// NeedsStackProtector - Returns true if the object may need stack
/// protectors.
bool MayNeedStackProtector(int ObjectIdx) const {
@@ -482,9 +495,10 @@ public:
/// a nonnegative identifier to represent it.
///
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
- bool MayNeedSP = false) {
+ bool MayNeedSP = false, const Value *Alloca = 0) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP));
+ Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
+ Alloca));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
ensureMaxAlignment(Alignment);
@@ -516,7 +530,7 @@ public:
///
int CreateVariableSizedObject(unsigned Alignment) {
HasVarSizedObjects = true;
- Objects.push_back(StackObject(0, Alignment, 0, false, false, true));
+ Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0));
ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index 062c750..0eb9d0e 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -138,15 +138,19 @@ public:
MachineModuleInfo &getMMI() const { return MMI; }
GCModuleInfo *getGMI() const { return GMI; }
MCContext &getContext() const { return Ctx; }
-
+
/// getFunction - Return the LLVM function that this machine code represents
///
const Function *getFunction() const { return Fn; }
+ /// getName - Return the name of the corresponding LLVM function.
+ ///
+ StringRef getName() const;
+
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const { return FunctionNumber; }
-
+
/// getTarget - Return the target machine this machine code is compiled with
///
const TargetMachine &getTarget() const { return Target; }
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index 27756ab..4e1533a 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Support/DebugLoc.h"
#include <vector>
@@ -610,6 +611,7 @@ public:
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
bool isStackAligningInlineAsm() const;
+ InlineAsm::AsmDialect getInlineAsmDialect() const;
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
@@ -782,16 +784,43 @@ public:
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
+ /// tieOperands - Add a tie between the register operands at DefIdx and
+ /// UseIdx. The tie will cause the register allocator to ensure that the two
+ /// operands are assigned the same physical register.
+ ///
+ /// Tied operands are managed automatically for explicit operands in the
+ /// MCInstrDesc. This method is for exceptional cases like inline asm.
+ void tieOperands(unsigned DefIdx, unsigned UseIdx);
+
+ /// findTiedOperandIdx - Given the index of a tied register operand, find the
+ /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
+ /// index of the tied operand which must exist.
+ unsigned findTiedOperandIdx(unsigned OpIdx) const;
+
/// isRegTiedToUseOperand - Given the index of a register def operand,
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
- bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const;
+ bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const {
+ const MachineOperand &MO = getOperand(DefOpIdx);
+ if (!MO.isReg() || !MO.isDef() || !MO.isTied())
+ return false;
+ if (UseOpIdx)
+ *UseOpIdx = findTiedOperandIdx(DefOpIdx);
+ return true;
+ }
/// isRegTiedToDefOperand - Return true if the use operand of the specified
/// index is tied to an def operand. It also returns the def operand index by
/// reference if DefOpIdx is not null.
- bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const;
+ bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const {
+ const MachineOperand &MO = getOperand(UseOpIdx);
+ if (!MO.isReg() || !MO.isUse() || !MO.isTied())
+ return false;
+ if (DefOpIdx)
+ *DefOpIdx = findTiedOperandIdx(UseOpIdx);
+ return true;
+ }
/// clearKillInfo - Clears kill flags on all operands.
///
@@ -852,11 +881,11 @@ public:
bool isSafeToReMat(const TargetInstrInfo *TII, AliasAnalysis *AA,
unsigned DstReg) const;
- /// hasVolatileMemoryRef - Return true if this instruction may have a
- /// volatile memory reference, or if the information describing the
- /// memory reference is not available. Return false if it is known to
- /// have no volatile memory references.
- bool hasVolatileMemoryRef() const;
+ /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
+ /// or volatile memory reference, or if the information describing the memory
+ /// reference is not available. Return false if it is known to have no
+ /// ordered or volatile memory references.
+ bool hasOrderedMemoryRef() const;
/// isInvariantLoad - Return true if this instruction is loading from a
/// location whose value is invariant across the function. For example,
@@ -935,6 +964,15 @@ private:
/// return null.
MachineRegisterInfo *getRegInfo();
+ /// untieRegOperand - Break any tie involving OpIdx.
+ void untieRegOperand(unsigned OpIdx) {
+ MachineOperand &MO = getOperand(OpIdx);
+ if (MO.isReg() && MO.isTied()) {
+ getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
+ MO.TiedTo = 0;
+ }
+ }
+
/// addImplicitDefUseOperands - Add all implicit def and use operands to
/// this instruction.
void addImplicitDefUseOperands();
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index 1ac9080..ddb1271 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -151,6 +151,15 @@ public:
bool isNonTemporal() const { return Flags & MONonTemporal; }
bool isInvariant() const { return Flags & MOInvariant; }
+ /// isUnordered - Returns true if this memory operation doesn't have any
+ /// ordering constraints other than normal aliasing. Volatile and atomic
+ /// memory operations can't be reordered.
+ ///
+ /// Currently, we don't model the difference between volatile and atomic
+ /// operations. They should retain their ordering relative to all memory
+ /// operations.
+ bool isUnordered() const { return !isVolatile(); }
+
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
/// of MMO, if it has a greater alignment. This must only be used when the
/// new alignment applies to all users of this MachineMemOperand.
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
index 37d42b3..0b9d67f 100644
--- a/include/llvm/CodeGen/MachineOperand.h
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -14,7 +14,6 @@
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
-#include "llvm/ADT/Hashing.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -30,6 +29,7 @@ class MachineRegisterInfo;
class MDNode;
class TargetMachine;
class TargetRegisterInfo;
+class hash_code;
class raw_ostream;
class MCSymbol;
@@ -60,12 +60,20 @@ private:
/// union.
unsigned char OpKind; // MachineOperandType
- /// SubReg - Subregister number, only valid for MO_Register. A value of 0
- /// indicates the MO_Register has no subReg.
- unsigned char SubReg;
+ // This union is discriminated by OpKind.
+ union {
+ /// SubReg - Subregister number, only valid for MO_Register. A value of 0
+ /// indicates the MO_Register has no subReg.
+ unsigned char SubReg;
+
+ /// TargetFlags - This is a set of target-specific operand flags.
+ unsigned char TargetFlags;
+ };
- /// TargetFlags - This is a set of target-specific operand flags.
- unsigned char TargetFlags;
+ /// TiedTo - Non-zero when this register operand is tied to another register
+ /// operand. The encoding of this field is described in the block comment
+ /// before MachineInstr::tieOperands().
+ unsigned char TiedTo : 4;
/// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
/// operands.
@@ -176,9 +184,17 @@ public:
///
MachineOperandType getType() const { return (MachineOperandType)OpKind; }
- unsigned char getTargetFlags() const { return TargetFlags; }
- void setTargetFlags(unsigned char F) { TargetFlags = F; }
- void addTargetFlag(unsigned char F) { TargetFlags |= F; }
+ unsigned char getTargetFlags() const {
+ return isReg() ? 0 : TargetFlags;
+ }
+ void setTargetFlags(unsigned char F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ TargetFlags = F;
+ }
+ void addTargetFlag(unsigned char F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ TargetFlags |= F;
+ }
/// getParent - Return the instruction that this operand belongs to.
@@ -288,6 +304,11 @@ public:
return IsEarlyClobber;
}
+ bool isTied() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return TiedTo;
+ }
+
bool isDebug() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDebug;
@@ -421,7 +442,7 @@ public:
int64_t getOffset() const {
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
isBlockAddress()) && "Wrong MachineOperand accessor");
- return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+ return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
SmallContents.OffsetLo;
}
@@ -548,6 +569,7 @@ public:
Op.IsUndef = isUndef;
Op.IsInternalRead = isInternalRead;
Op.IsEarlyClobber = isEarlyClobber;
+ Op.TiedTo = 0;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
Op.Contents.Reg.Prev = 0;
diff --git a/include/llvm/CodeGen/PBQP/HeuristicBase.h b/include/llvm/CodeGen/PBQP/HeuristicBase.h
index 3fee18c..0c1fcb7 100644
--- a/include/llvm/CodeGen/PBQP/HeuristicBase.h
+++ b/include/llvm/CodeGen/PBQP/HeuristicBase.h
@@ -113,7 +113,7 @@ namespace PBQP {
}
/// \brief Add the given node to the list of nodes to be optimally reduced.
- /// @return nItr Node iterator to be added.
+ /// @param nItr Node iterator to be added.
///
/// You probably don't want to over-ride this, except perhaps to record
/// statistics before calling this implementation. HeuristicBase relies on
@@ -193,8 +193,9 @@ namespace PBQP {
/// reduce list.
/// @return True if a reduction takes place, false if the heuristic reduce
/// list is empty.
- void heuristicReduce() {
+ bool heuristicReduce() {
llvm_unreachable("Must be implemented in derived class.");
+ return false;
}
/// \brief Prepare a change in the costs on the given edge.
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 07b3b45..7bd5764 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -404,6 +404,10 @@ namespace llvm {
/// inserting cmov instructions.
extern char &EarlyIfConverterID;
+ /// StackSlotColoring - This pass performs stack coloring and merging.
+ /// It merges disjoint allocas to reduce the stack size.
+ extern char &StackColoringID;
+
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 85ab47b..2567a65 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -85,6 +85,8 @@ namespace llvm {
/// the value of the Latency field of the predecessor, however advanced
/// models may provide additional information about specific edges.
unsigned Latency;
+ /// Record MinLatency seperately from "expected" Latency.
+ unsigned MinLatency;
public:
/// SDep - Construct a null SDep. This is only for use by container
@@ -96,7 +98,7 @@ namespace llvm {
SDep(SUnit *S, Kind kind, unsigned latency = 1, unsigned Reg = 0,
bool isNormalMemory = false, bool isMustAlias = false,
bool isArtificial = false)
- : Dep(S, kind), Contents(), Latency(latency) {
+ : Dep(S, kind), Contents(), Latency(latency), MinLatency(latency) {
switch (kind) {
case Anti:
case Output:
@@ -135,7 +137,8 @@ namespace llvm {
}
bool operator==(const SDep &Other) const {
- return overlaps(Other) && Latency == Other.Latency;
+ return overlaps(Other)
+ && Latency == Other.Latency && MinLatency == Other.MinLatency;
}
bool operator!=(const SDep &Other) const {
@@ -155,6 +158,18 @@ namespace llvm {
Latency = Lat;
}
+ /// getMinLatency - Return the minimum latency for this edge. Minimum
+ /// latency is used for scheduling groups, while normal (expected) latency
+ /// is for instruction cost and critical path.
+ unsigned getMinLatency() const {
+ return MinLatency;
+ }
+
+ /// setMinLatency - Set the minimum latency for this edge.
+ void setMinLatency(unsigned Lat) {
+ MinLatency = Lat;
+ }
+
//// getSUnit - Return the SUnit to which this edge points.
SUnit *getSUnit() const {
return Dep.getPointer();
diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 1bde942..8b52b5a 100644
--- a/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -108,6 +108,15 @@ namespace llvm {
}
};
+ /// Record a physical register access.
+ /// For non data-dependent uses, OpIdx == -1.
+ struct PhysRegSUOper {
+ SUnit *SU;
+ int OpIdx;
+
+ PhysRegSUOper(SUnit *su, int op): SU(su), OpIdx(op) {}
+ };
+
/// Combine a SparseSet with a 1x1 vector to track physical registers.
/// The SparseSet allows iterating over the (few) live registers for quickly
/// comparing against a regmask or clearing the set.
@@ -116,7 +125,7 @@ namespace llvm {
/// cleared between scheduling regions without freeing unused entries.
class Reg2SUnitsMap {
SparseSet<unsigned> PhysRegSet;
- std::vector<std::vector<SUnit*> > SUnits;
+ std::vector<std::vector<PhysRegSUOper> > SUnits;
public:
typedef SparseSet<unsigned>::const_iterator const_iterator;
@@ -140,7 +149,7 @@ namespace llvm {
/// If this register is mapped, return its existing SUnits vector.
/// Otherwise map the register and return an empty SUnits vector.
- std::vector<SUnit *> &operator[](unsigned Reg) {
+ std::vector<PhysRegSUOper> &operator[](unsigned Reg) {
bool New = PhysRegSet.insert(Reg).second;
assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
(void)New;
@@ -288,16 +297,6 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU);
- /// computeOperandLatency - Return dependence edge latency using
- /// operand use/def information
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- virtual unsigned computeOperandLatency(SUnit *Def, SUnit *Use,
- const SDep& dep,
- bool FindMin = false) const;
-
/// schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
@@ -319,7 +318,7 @@ namespace llvm {
protected:
void initSUnits();
- void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO);
+ void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index db361ee..3bea2de 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1011,11 +1011,6 @@ class AtomicSDNode : public MemSDNode {
SubclassData |= SynchScope << 12;
assert(getOrdering() == Ordering && "Ordering encoding error!");
assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
-
- assert((readMem() || getOrdering() <= Monotonic) &&
- "Acquire/Release MachineMemOperand must be a load!");
- assert((writeMem() || getOrdering() <= Monotonic) &&
- "Acquire/Release MachineMemOperand must be a store!");
}
public:
@@ -1750,10 +1745,10 @@ public:
class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
SDNode, ptrdiff_t> {
- SDNode *Node;
+ const SDNode *Node;
unsigned Operand;
- SDNodeIterator(SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+ SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
public:
bool operator==(const SDNodeIterator& x) const {
return Operand == x.Operand;
@@ -1784,8 +1779,8 @@ public:
return Operand - Other.Operand;
}
- static SDNodeIterator begin(SDNode *N) { return SDNodeIterator(N, 0); }
- static SDNodeIterator end (SDNode *N) {
+ static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
+ static SDNodeIterator end (const SDNode *N) {
return SDNodeIterator(N, N->getNumOperands());
}