aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h2
-rw-r--r--include/llvm/Target/TargetInstrInfo.h10
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp8
-rw-r--r--lib/Target/ARM/ARMInstrInfo.h10
-rw-r--r--lib/Target/Alpha/AlphaInstrInfo.cpp4
-rw-r--r--lib/Target/Alpha/AlphaInstrInfo.h6
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.cpp2
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.h4
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp4
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h6
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp8
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h10
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp2
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.h4
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp20
-rw-r--r--lib/Target/X86/X86InstrInfo.h11
16 files changed, 57 insertions, 54 deletions
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 09cca93..46a8288 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -102,6 +102,8 @@ public:
MachineInstr& front() { return Insts.front(); }
MachineInstr& back() { return Insts.back(); }
+ const MachineInstr& front() const { return Insts.front(); }
+ const MachineInstr& back() const { return Insts.back(); }
iterator begin() { return Insts.begin(); }
const_iterator begin() const { return Insts.begin(); }
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 510da1a..a357574 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -288,7 +288,7 @@ public:
/// stream.
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
return 0;
}
@@ -298,7 +298,7 @@ public:
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
@@ -306,8 +306,8 @@ public:
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
virtual
- bool canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const{
+ bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
return false;
}
@@ -338,7 +338,7 @@ public:
/// fall-through into its successor block. This is primarily used when a
/// branch is unanalyzable. It is useful for things like unconditional
/// indirect branches (jump tables).
- virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+ virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
return false;
}
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index e1f44bd..e03977b 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -663,7 +663,7 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
@@ -747,8 +747,8 @@ MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
return NewMI;
}
-bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const {
+bool ARMInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned OpNum = Ops[0];
@@ -780,7 +780,7 @@ bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
return false;
}
-bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+bool ARMInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
diff --git a/lib/Target/ARM/ARMInstrInfo.h b/lib/Target/ARM/ARMInstrInfo.h
index cd19ae1..c59b03c 100644
--- a/lib/Target/ARM/ARMInstrInfo.h
+++ b/lib/Target/ARM/ARMInstrInfo.h
@@ -198,20 +198,20 @@ public:
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
- virtual bool canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
- virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
+ virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/lib/Target/Alpha/AlphaInstrInfo.cpp b/lib/Target/Alpha/AlphaInstrInfo.cpp
index 7f3b32f..b365aba 100644
--- a/lib/Target/Alpha/AlphaInstrInfo.cpp
+++ b/lib/Target/Alpha/AlphaInstrInfo.cpp
@@ -255,7 +255,7 @@ void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
@@ -408,7 +408,7 @@ void AlphaInstrInfo::insertNoop(MachineBasicBlock &MBB,
.addReg(Alpha::R31);
}
-bool AlphaInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+bool AlphaInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
diff --git a/lib/Target/Alpha/AlphaInstrInfo.h b/lib/Target/Alpha/AlphaInstrInfo.h
index 9aa5ecd..abee722 100644
--- a/lib/Target/Alpha/AlphaInstrInfo.h
+++ b/lib/Target/Alpha/AlphaInstrInfo.h
@@ -69,12 +69,12 @@ public:
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
@@ -85,7 +85,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const;
void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
- bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
+ bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
};
diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp
index cc562eb..29facb9 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -399,7 +399,7 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr *
SPUInstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const
{
#if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
diff --git a/lib/Target/CellSPU/SPUInstrInfo.h b/lib/Target/CellSPU/SPUInstrInfo.h
index 5f3aaaa..9c8bcaf 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.h
+++ b/lib/Target/CellSPU/SPUInstrInfo.h
@@ -79,13 +79,13 @@ namespace llvm {
//! Fold spills into load/store instructions
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
//! Fold any load/store to an operand
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index 3e6ce53..146c5ca 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -281,7 +281,7 @@ void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr *MipsInstrInfo::
foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops, int FI) const
+ const SmallVectorImpl<unsigned> &Ops, int FI) const
{
if (Ops.size() != 1) return NULL;
@@ -602,7 +602,7 @@ RemoveBranch(MachineBasicBlock &MBB) const
/// BlockHasNoFallThrough - Analyze if MachineBasicBlock does not
/// fall-through into its successor block.
bool MipsInstrInfo::
-BlockHasNoFallThrough(MachineBasicBlock &MBB) const
+BlockHasNoFallThrough(const MachineBasicBlock &MBB) const
{
if (MBB.empty()) return false;
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 7615c71..31e9cfa 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -196,17 +196,17 @@ public:
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
- virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
+ virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index ca65feb..c275668 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -657,7 +657,7 @@ void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
/// copy instructions, turning them into load/store instructions.
MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
@@ -730,8 +730,8 @@ MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF,
return NewMI;
}
-bool PPCInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const {
+bool PPCInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
@@ -751,7 +751,7 @@ bool PPCInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
}
-bool PPCInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+bool PPCInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index cb5a0e6..2e950a3 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -142,20 +142,20 @@ public:
/// copy instructions, turning them into load/store instructions.
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
- virtual bool canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
- virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
+ virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp
index 0d7370b..47c0672 100644
--- a/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -227,7 +227,7 @@ void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h
index aadbefd..7c63316 100644
--- a/lib/Target/Sparc/SparcInstrInfo.h
+++ b/lib/Target/Sparc/SparcInstrInfo.h
@@ -96,12 +96,12 @@ public:
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index d2e2cf1..e105b0f 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -1543,7 +1543,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
- MachineOperand &MO) {
+ const MachineOperand &MO) {
if (MO.isReg())
MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
MO.isKill(), MO.isDead(), MO.getSubReg());
@@ -1872,7 +1872,7 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
}
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
- SmallVector<MachineOperand,4> &MOs,
+ const SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI, const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true);
@@ -1898,7 +1898,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
static MachineInstr *FuseInst(MachineFunction &MF,
unsigned Opcode, unsigned OpNo,
- SmallVector<MachineOperand,4> &MOs,
+ const SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI, const TargetInstrInfo &TII) {
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true);
MachineInstrBuilder MIB(NewMI);
@@ -1920,7 +1920,7 @@ static MachineInstr *FuseInst(MachineFunction &MF,
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
- SmallVector<MachineOperand,4> &MOs,
+ const SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI) {
MachineFunction &MF = *MI->getParent()->getParent();
MachineInstrBuilder MIB = BuildMI(MF, TII.get(Opcode));
@@ -1936,7 +1936,7 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
MachineInstr*
X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI, unsigned i,
- SmallVector<MachineOperand,4> &MOs) const {
+ const SmallVector<MachineOperand,4> &MOs) const{
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
bool isTwoAddrFold = false;
unsigned NumOps = MI->getDesc().getNumOperands();
@@ -1995,7 +1995,7 @@ X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
@@ -2042,7 +2042,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
@@ -2093,8 +2093,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
}
-bool X86InstrInfo::canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const {
+bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
// Check switch flag
if (NoFusing) return 0;
@@ -2350,7 +2350,7 @@ unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
return I->second.first;
}
-bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+bool X86InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index 1413310..5edc19d 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -359,7 +359,7 @@ public:
/// references has been changed.
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
@@ -367,12 +367,13 @@ public:
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
- virtual bool canFoldMemoryOperand(MachineInstr*, SmallVectorImpl<unsigned> &) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr*,
+ const SmallVectorImpl<unsigned> &) const;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
@@ -391,7 +392,7 @@ public:
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore) const;
- virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
+ virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
@@ -431,7 +432,7 @@ private:
MachineInstr* foldMemoryOperand(MachineFunction &MF,
MachineInstr* MI,
unsigned OpNum,
- SmallVector<MachineOperand,4> &MOs) const;
+ const SmallVector<MachineOperand,4> &MOs) const;
};
} // End llvm namespace