aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp8
-rw-r--r--lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp4
-rw-r--r--lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h2
-rw-r--r--lib/Target/X86/X86CodeEmitter.cpp60
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp8
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp30
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp134
-rw-r--r--lib/Target/X86/X86InstrInfo.h16
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.h5
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp6
11 files changed, 139 insertions, 138 deletions
diff --git a/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
index 9415871..e66edca 100644
--- a/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
@@ -546,9 +546,9 @@ void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
const MachineOperand &DispSpec = MI->getOperand(Op+3);
bool NotRIPRel = IndexReg.getReg() || BaseReg.getReg();
- if (DispSpec.isGlobalAddress() ||
- DispSpec.isConstantPoolIndex() ||
- DispSpec.isJumpTableIndex()) {
+ if (DispSpec.isGlobal() ||
+ DispSpec.isCPI() ||
+ DispSpec.isJTI()) {
printOperand(MI, Op+3, "mem", NotRIPRel);
} else {
int DispVal = DispSpec.getImm();
@@ -675,7 +675,7 @@ bool X86ATTAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
case 'w': // Print HImode register
case 'k': // Print SImode register
case 'q': // Print DImode register
- if (MI->getOperand(OpNo).isRegister())
+ if (MI->getOperand(OpNo).isReg())
return printAsmMRegister(MI->getOperand(OpNo), ExtraCode[0]);
printOperand(MI, OpNo);
return false;
diff --git a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
index fdec3bc..a9141d3 100644
--- a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
@@ -298,8 +298,8 @@ void X86IntelAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
NeedPlus = true;
}
- if (DispSpec.isGlobalAddress() || DispSpec.isConstantPoolIndex() ||
- DispSpec.isJumpTableIndex()) {
+ if (DispSpec.isGlobal() || DispSpec.isCPI() ||
+ DispSpec.isJTI()) {
if (NeedPlus)
O << " + ";
printOp(DispSpec, "mem");
diff --git a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h
index ff31617..9762e93 100644
--- a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h
+++ b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h
@@ -44,7 +44,7 @@ struct VISIBILITY_HIDDEN X86IntelAsmPrinter : public AsmPrinter {
void printOperand(const MachineInstr *MI, unsigned OpNo,
const char *Modifier = 0) {
const MachineOperand &MO = MI->getOperand(OpNo);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
"Not physreg??");
O << TM.getRegisterInfo()->get(MO.getReg()).Name; // Capitalized names
diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp
index 1c81b4b..37c1e99 100644
--- a/lib/Target/X86/X86CodeEmitter.cpp
+++ b/lib/Target/X86/X86CodeEmitter.cpp
@@ -268,7 +268,7 @@ void Emitter::emitDisplacementField(const MachineOperand *RelocOp,
// Otherwise, this is something that requires a relocation. Emit it as such
// now.
- if (RelocOp->isGlobalAddress()) {
+ if (RelocOp->isGlobal()) {
// In 64-bit static small code model, we could potentially emit absolute.
// But it's probably not beneficial.
// 89 05 00 00 00 00 mov %eax,0(%rip) # PC-relative
@@ -279,11 +279,11 @@ void Emitter::emitDisplacementField(const MachineOperand *RelocOp,
bool isLazy = gvNeedsLazyPtr(RelocOp->getGlobal());
emitGlobalAddress(RelocOp->getGlobal(), rt, RelocOp->getOffset(),
PCAdj, NeedStub, isLazy);
- } else if (RelocOp->isConstantPoolIndex()) {
+ } else if (RelocOp->isCPI()) {
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word : X86::reloc_picrel_word;
emitConstPoolAddress(RelocOp->getIndex(), rt,
RelocOp->getOffset(), PCAdj);
- } else if (RelocOp->isJumpTableIndex()) {
+ } else if (RelocOp->isJTI()) {
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word : X86::reloc_picrel_word;
emitJumpTableAddress(RelocOp->getIndex(), rt, PCAdj);
} else {
@@ -299,16 +299,16 @@ void Emitter::emitMemModRMByte(const MachineInstr &MI,
const MachineOperand *DispForReloc = 0;
// Figure out what sort of displacement we have to handle here.
- if (Op3.isGlobalAddress()) {
+ if (Op3.isGlobal()) {
DispForReloc = &Op3;
- } else if (Op3.isConstantPoolIndex()) {
+ } else if (Op3.isCPI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
DispVal += MCE.getConstantPoolEntryAddress(Op3.getIndex());
DispVal += Op3.getOffset();
}
- } else if (Op3.isJumpTableIndex()) {
+ } else if (Op3.isJTI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
@@ -522,14 +522,14 @@ void Emitter::emitInstruction(const MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(CurOp++);
DOUT << "RawFrm CurOp " << CurOp << "\n";
- DOUT << "isMachineBasicBlock " << MO.isMachineBasicBlock() << "\n";
- DOUT << "isGlobalAddress " << MO.isGlobalAddress() << "\n";
- DOUT << "isExternalSymbol " << MO.isExternalSymbol() << "\n";
- DOUT << "isImmediate " << MO.isImmediate() << "\n";
+ DOUT << "isMBB " << MO.isMBB() << "\n";
+ DOUT << "isGlobal " << MO.isGlobal() << "\n";
+ DOUT << "isSymbol " << MO.isSymbol() << "\n";
+ DOUT << "isImm " << MO.isImm() << "\n";
- if (MO.isMachineBasicBlock()) {
+ if (MO.isMBB()) {
emitPCRelativeBlockAddress(MO.getMBB());
- } else if (MO.isGlobalAddress()) {
+ } else if (MO.isGlobal()) {
// Assume undefined functions may be outside the Small codespace.
bool NeedStub =
(Is64BitMode &&
@@ -538,9 +538,9 @@ void Emitter::emitInstruction(const MachineInstr &MI,
Opcode == X86::TAILJMPd;
emitGlobalAddress(MO.getGlobal(), X86::reloc_pcrel_word,
0, 0, NeedStub);
- } else if (MO.isExternalSymbol()) {
+ } else if (MO.isSymbol()) {
emitExternalSymbolAddress(MO.getSymbolName(), X86::reloc_pcrel_word);
- } else if (MO.isImmediate()) {
+ } else if (MO.isImm()) {
emitConstant(MO.getImm(), X86InstrInfo::sizeOfImm(Desc));
} else {
assert(0 && "Unknown RawFrm operand!");
@@ -554,7 +554,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
emitConstant(MO1.getImm(), Size);
else {
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
@@ -562,16 +562,16 @@ void Emitter::emitInstruction(const MachineInstr &MI,
// This should not occur on Darwin for relocatable objects.
if (Opcode == X86::MOV64ri)
rt = X86::reloc_absolute_dword; // FIXME: add X86II flag?
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
bool NeedStub = isa<Function>(MO1.getGlobal());
bool isLazy = gvNeedsLazyPtr(MO1.getGlobal());
emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
NeedStub, isLazy);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
emitExternalSymbolAddress(MO1.getSymbolName(), rt);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
emitConstPoolAddress(MO1.getIndex(), rt);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
emitJumpTableAddress(MO1.getIndex(), rt);
}
}
@@ -627,23 +627,23 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
emitConstant(MO1.getImm(), Size);
else {
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
if (Opcode == X86::MOV64ri32)
rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
bool NeedStub = isa<Function>(MO1.getGlobal());
bool isLazy = gvNeedsLazyPtr(MO1.getGlobal());
emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
NeedStub, isLazy);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
emitExternalSymbolAddress(MO1.getSymbolName(), rt);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
emitConstPoolAddress(MO1.getIndex(), rt);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
emitJumpTableAddress(MO1.getIndex(), rt);
}
}
@@ -654,7 +654,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
intptr_t PCAdj = (CurOp+4 != NumOps) ?
- (MI.getOperand(CurOp+4).isImmediate() ? X86InstrInfo::sizeOfImm(Desc) : 4) : 0;
+ (MI.getOperand(CurOp+4).isImm() ? X86InstrInfo::sizeOfImm(Desc) : 4) : 0;
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
@@ -664,23 +664,23 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO.isImmediate())
+ if (MO.isImm())
emitConstant(MO.getImm(), Size);
else {
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
if (Opcode == X86::MOV64mi32)
rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
- if (MO.isGlobalAddress()) {
+ if (MO.isGlobal()) {
bool NeedStub = isa<Function>(MO.getGlobal());
bool isLazy = gvNeedsLazyPtr(MO.getGlobal());
emitGlobalAddress(MO.getGlobal(), rt, MO.getOffset(), 0,
NeedStub, isLazy);
- } else if (MO.isExternalSymbol())
+ } else if (MO.isSymbol())
emitExternalSymbolAddress(MO.getSymbolName(), rt);
- else if (MO.isConstantPoolIndex())
+ else if (MO.isCPI())
emitConstPoolAddress(MO.getIndex(), rt);
- else if (MO.isJumpTableIndex())
+ else if (MO.isJTI())
emitJumpTableAddress(MO.getIndex(), rt);
}
}
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index ccb344d..3908228 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -168,7 +168,7 @@ FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); }
/// getFPReg - Return the X86::FPx register number for the specified operand.
/// For example, this returns 3 for X86::FP3.
static unsigned getFPReg(const MachineOperand &MO) {
- assert(MO.isRegister() && "Expected an FP register!");
+ assert(MO.isReg() && "Expected an FP register!");
unsigned Reg = MO.getReg();
assert(Reg >= X86::FP0 && Reg <= X86::FP6 && "Expected FP register!");
return Reg - X86::FP0;
@@ -240,7 +240,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
SmallVector<unsigned, 8> DeadRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (MO.isRegister() && MO.isDead())
+ if (MO.isReg() && MO.isDead())
DeadRegs.push_back(MO.getReg());
}
@@ -1021,7 +1021,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
unsigned NumKills = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
- if (!Op.isRegister() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
+ if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
assert(Op.isUse() && "Only handle inline asm uses right now");
@@ -1061,7 +1061,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
- if (!Op.isRegister() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
+ if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
// FP Register uses must be kills unless there are two uses of the same
// register, in which case only one will be a kill.
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index a60b13e..658d939 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -703,10 +703,10 @@ void X86DAGToDAGISel::InstructionSelectPostProcessing() {
bool ContainsFPCode = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
!ContainsFPCode && I != E; ++I) {
- if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
+ if (I->getNumOperands() != 0 && I->getOperand(0).isReg()) {
const TargetRegisterClass *clas;
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
- if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
+ if (I->getOperand(op).isReg() && I->getOperand(op).isDef() &&
TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
X86::RFP32RegisterClass ||
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index f65167b..7f84d4c 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -6385,10 +6385,10 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
tt = t1;
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
- assert((argOpers[valArgIndx]->isRegister() ||
- argOpers[valArgIndx]->isImmediate()) &&
+ assert((argOpers[valArgIndx]->isReg() ||
+ argOpers[valArgIndx]->isImm()) &&
"invalid operand");
- if (argOpers[valArgIndx]->isRegister())
+ if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, TII->get(regOpc), t2);
else
MIB = BuildMI(newMBB, TII->get(immOpc), t2);
@@ -6507,19 +6507,19 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
tt2 = t2;
}
- assert((argOpers[4]->isRegister() || argOpers[4]->isImmediate()) &&
+ assert((argOpers[4]->isReg() || argOpers[4]->isImm()) &&
"invalid operand");
unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
- if (argOpers[4]->isRegister())
+ if (argOpers[4]->isReg())
MIB = BuildMI(newMBB, TII->get(regOpcL), t5);
else
MIB = BuildMI(newMBB, TII->get(immOpcL), t5);
MIB.addReg(tt1);
(*MIB).addOperand(*argOpers[4]);
- assert(argOpers[5]->isRegister() == argOpers[4]->isRegister());
- assert(argOpers[5]->isImmediate() == argOpers[4]->isImmediate());
- if (argOpers[5]->isRegister())
+ assert(argOpers[5]->isReg() == argOpers[4]->isReg());
+ assert(argOpers[5]->isImm() == argOpers[4]->isImm());
+ if (argOpers[5]->isReg())
MIB = BuildMI(newMBB, TII->get(regOpcH), t6);
else
MIB = BuildMI(newMBB, TII->get(immOpcH), t6);
@@ -6613,12 +6613,12 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
(*MIB).addOperand(*argOpers[i]);
// We only support register and immediate values
- assert((argOpers[valArgIndx]->isRegister() ||
- argOpers[valArgIndx]->isImmediate()) &&
+ assert((argOpers[valArgIndx]->isReg() ||
+ argOpers[valArgIndx]->isImm()) &&
"invalid operand");
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
- if (argOpers[valArgIndx]->isRegister())
+ if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
else
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
@@ -6766,7 +6766,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86AddressMode AM;
MachineOperand &Op = MI->getOperand(0);
- if (Op.isRegister()) {
+ if (Op.isReg()) {
AM.BaseType = X86AddressMode::RegBase;
AM.Base.Reg = Op.getReg();
} else {
@@ -6774,13 +6774,13 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
AM.Base.FrameIndex = Op.getIndex();
}
Op = MI->getOperand(1);
- if (Op.isImmediate())
+ if (Op.isImm())
AM.Scale = Op.getImm();
Op = MI->getOperand(2);
- if (Op.isImmediate())
+ if (Op.isImm())
AM.IndexReg = Op.getImm();
Op = MI->getOperand(3);
- if (Op.isGlobalAddress()) {
+ if (Op.isGlobal()) {
AM.GV = Op.getGlobal();
} else {
AM.Disp = Op.getImm();
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index cac35b1..6a1d166 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -679,8 +679,8 @@ bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
case X86::MMX_MOVD64rr:
case X86::MMX_MOVQ64rr:
assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isRegister() &&
- MI.getOperand(1).isRegister() &&
+ MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isReg() &&
"invalid register-register move instruction");
sourceReg = MI.getOperand(1).getReg();
destReg = MI.getOperand(0).getReg();
@@ -705,8 +705,8 @@ unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
case X86::MOVAPDrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
- if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() &&
- MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() &&
+ if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
+ MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
MI->getOperand(2).getImm() == 1 &&
MI->getOperand(3).getReg() == 0 &&
MI->getOperand(4).getImm() == 0) {
@@ -736,8 +736,8 @@ unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
- if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() &&
- MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() &&
+ if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
+ MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
MI->getOperand(1).getImm() == 1 &&
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
@@ -789,17 +789,17 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm: {
// Loads from constant pools are trivially rematerializable.
- if (MI->getOperand(1).isRegister() &&
- MI->getOperand(2).isImmediate() &&
- MI->getOperand(3).isRegister() && MI->getOperand(3).getReg() == 0 &&
- (MI->getOperand(4).isConstantPoolIndex() ||
- (MI->getOperand(4).isGlobalAddress() &&
+ if (MI->getOperand(1).isReg() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
+ (MI->getOperand(4).isCPI() ||
+ (MI->getOperand(4).isGlobal() &&
isGVStub(MI->getOperand(4).getGlobal(), TM)))) {
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of PIC load.
- if (!ReMatPICStubLoad && MI->getOperand(4).isGlobalAddress())
+ if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
return false;
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -819,11 +819,11 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
case X86::LEA32r:
case X86::LEA64r: {
- if (MI->getOperand(2).isImmediate() &&
- MI->getOperand(3).isRegister() && MI->getOperand(3).getReg() == 0 &&
- !MI->getOperand(4).isRegister()) {
+ if (MI->getOperand(2).isImm() &&
+ MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
+ !MI->getOperand(4).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
- if (!MI->getOperand(1).isRegister())
+ if (!MI->getOperand(1).isReg())
return true;
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0)
@@ -857,7 +857,7 @@ static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
bool SeenDef = false;
for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) {
MachineOperand &MO = I->getOperand(j);
- if (!MO.isRegister())
+ if (!MO.isReg())
continue;
if (MO.getReg() == X86::EFLAGS) {
if (MO.isUse())
@@ -880,7 +880,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
- unsigned SubIdx = Orig->getOperand(0).isRegister()
+ unsigned SubIdx = Orig->getOperand(0).isReg()
? Orig->getOperand(0).getSubReg() : 0;
bool ChangeSubIdx = SubIdx != 0;
if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
@@ -942,14 +942,14 @@ bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
// Loads from constant pools are trivially invariant.
- if (MO.isConstantPoolIndex())
+ if (MO.isCPI())
return true;
- if (MO.isGlobalAddress())
+ if (MO.isGlobal())
return isGVStub(MO.getGlobal(), TM);
// If this is a load from an invariant stack slot, the load is a constant.
- if (MO.isFrameIndex()) {
+ if (MO.isFI()) {
const MachineFrameInfo &MFI =
*MI->getParent()->getParent()->getFrameInfo();
int Idx = MO.getIndex();
@@ -967,7 +967,7 @@ bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const {
static bool hasLiveCondCodeDef(MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (MO.isRegister() && MO.isDef() &&
+ if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
}
@@ -1162,7 +1162,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD64ri32:
case X86::ADD64ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate())
+ if (MI->getOperand(2).isImm())
NewMI = addRegOffset(BuildMI(MF, get(X86::LEA64r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, MI->getOperand(2).getImm());
@@ -1170,7 +1170,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32ri:
case X86::ADD32ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate()) {
+ if (MI->getOperand(2).isImm()) {
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
NewMI = addRegOffset(BuildMI(MF, get(Opc))
.addReg(Dest, true, false, false, isDead),
@@ -1181,7 +1181,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16ri8:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate())
+ if (MI->getOperand(2).isImm())
NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r))
.addReg(Dest, true, false, false, isDead),
Src, isKill, MI->getOperand(2).getImm());
@@ -1190,7 +1190,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16) return 0;
case X86::SHL32ri:
case X86::SHL64ri: {
- assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
+ assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImm() &&
"Unknown shl instruction!");
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
@@ -1544,20 +1544,20 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
MachineOperand &MO) {
- if (MO.isRegister())
+ if (MO.isReg())
MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
MO.isKill(), MO.isDead(), MO.getSubReg());
- else if (MO.isImmediate())
+ else if (MO.isImm())
MIB = MIB.addImm(MO.getImm());
- else if (MO.isFrameIndex())
+ else if (MO.isFI())
MIB = MIB.addFrameIndex(MO.getIndex());
- else if (MO.isGlobalAddress())
+ else if (MO.isGlobal())
MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
- else if (MO.isConstantPoolIndex())
+ else if (MO.isCPI())
MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
- else if (MO.isJumpTableIndex())
+ else if (MO.isJTI())
MIB = MIB.addJumpTableIndex(MO.getIndex());
- else if (MO.isExternalSymbol())
+ else if (MO.isSymbol())
MIB = MIB.addExternalSymbol(MO.getSymbolName());
else
assert(0 && "Unknown operand for X86InstrAddOperand!");
@@ -1916,7 +1916,7 @@ static MachineInstr *FuseInst(MachineFunction &MF,
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
- assert(MO.isRegister() && "Expected to fold into reg operand!");
+ assert(MO.isReg() && "Expected to fold into reg operand!");
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB = X86InstrAddOperand(MIB, MOs[i]);
@@ -1958,8 +1958,8 @@ X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && i < 2 &&
- MI->getOperand(0).isRegister() &&
- MI->getOperand(1).isRegister() &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg() &&
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
@@ -2190,7 +2190,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
MachineOperand &Op = MI->getOperand(i);
if (i >= Index && i < Index+4)
AddrOps.push_back(Op);
- else if (Op.isRegister() && Op.isImplicit())
+ else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
else if (i < Index)
BeforeOps.push_back(Op);
@@ -2205,7 +2205,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 5; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
- if (MO.isRegister())
+ if (MO.isReg())
MO.setIsKill(false);
}
}
@@ -2411,7 +2411,7 @@ unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
/// e.g. r8, xmm8, etc.
bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) {
- if (!MO.isRegister()) return false;
+ if (!MO.isReg()) return false;
switch (MO.getReg()) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
@@ -2452,7 +2452,7 @@ unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
unsigned i = isTwoAddr ? 1 : 0;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
unsigned Reg = MO.getReg();
if (isX86_64NonExtLowByteReg(Reg))
REX |= 0x40;
@@ -2482,7 +2482,7 @@ unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
@@ -2502,7 +2502,7 @@ unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
unsigned Bit = 0;
for (; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
@@ -2581,11 +2581,11 @@ static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) {
}
// Otherwise, this is something that requires a relocation.
- if (RelocOp->isGlobalAddress()) {
+ if (RelocOp->isGlobal()) {
FinalSize += sizeGlobalAddress(false);
- } else if (RelocOp->isConstantPoolIndex()) {
+ } else if (RelocOp->isCPI()) {
FinalSize += sizeConstPoolAddress(false);
- } else if (RelocOp->isJumpTableIndex()) {
+ } else if (RelocOp->isJTI()) {
FinalSize += sizeJumpTableAddress(false);
} else {
assert(0 && "Unknown value to relocate!");
@@ -2601,15 +2601,15 @@ static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op,
unsigned FinalSize = 0;
// Figure out what sort of displacement we have to handle here.
- if (Op3.isGlobalAddress()) {
+ if (Op3.isGlobal()) {
DispForReloc = &Op3;
- } else if (Op3.isConstantPoolIndex()) {
+ } else if (Op3.isCPI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
DispVal = 1;
}
- } else if (Op3.isJumpTableIndex()) {
+ } else if (Op3.isJTI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
@@ -2774,13 +2774,13 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
- if (MO.isMachineBasicBlock()) {
+ if (MO.isMBB()) {
FinalSize += sizePCRelativeBlockAddress();
- } else if (MO.isGlobalAddress()) {
+ } else if (MO.isGlobal()) {
FinalSize += sizeGlobalAddress(false);
- } else if (MO.isExternalSymbol()) {
+ } else if (MO.isSymbol()) {
FinalSize += sizeExternalSymbolAddress(false);
- } else if (MO.isImmediate()) {
+ } else if (MO.isImm()) {
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
} else {
assert(0 && "Unknown RawFrm operand!");
@@ -2795,19 +2795,19 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri)
dword = true;
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
@@ -2867,19 +2867,19 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri32)
dword = true;
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
@@ -2897,19 +2897,19 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO.isImmediate())
+ if (MO.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64mi32)
dword = true;
- if (MO.isGlobalAddress()) {
+ if (MO.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO.isExternalSymbol())
+ } else if (MO.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO.isConstantPoolIndex())
+ else if (MO.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO.isJumpTableIndex())
+ else if (MO.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index 20f0010..b2de310 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -228,20 +228,20 @@ namespace X86II {
}
inline static bool isScale(const MachineOperand &MO) {
- return MO.isImmediate() &&
+ return MO.isImm() &&
(MO.getImm() == 1 || MO.getImm() == 2 ||
MO.getImm() == 4 || MO.getImm() == 8);
}
inline static bool isMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFrameIndex()) return true;
+ if (MI->getOperand(Op).isFI()) return true;
return Op+4 <= MI->getNumOperands() &&
- MI->getOperand(Op ).isRegister() && isScale(MI->getOperand(Op+1)) &&
- MI->getOperand(Op+2).isRegister() &&
- (MI->getOperand(Op+3).isImmediate() ||
- MI->getOperand(Op+3).isGlobalAddress() ||
- MI->getOperand(Op+3).isConstantPoolIndex() ||
- MI->getOperand(Op+3).isJumpTableIndex());
+ MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) &&
+ MI->getOperand(Op+2).isReg() &&
+ (MI->getOperand(Op+3).isImm() ||
+ MI->getOperand(Op+3).isGlobal() ||
+ MI->getOperand(Op+3).isCPI() ||
+ MI->getOperand(Op+3).isJTI());
}
class X86InstrInfo : public TargetInstrInfoImpl {
diff --git a/lib/Target/X86/X86MachineFunctionInfo.h b/lib/Target/X86/X86MachineFunctionInfo.h
index edce2a9..df79520 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/lib/Target/X86/X86MachineFunctionInfo.h
@@ -58,8 +58,9 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// holds the virtual register into which the sret argument is passed.
unsigned SRetReturnReg;
- /// GlobalBaseReg - keeps track of the virtual register mapped onto global
- /// base register.
+ /// GlobalBaseReg - keeps track of the virtual register initialized for
+ /// use as the global base register. This is used for PIC in some PIC
+ /// relocation models.
unsigned GlobalBaseReg;
public:
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 5b6fc07..97e1de7 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -416,7 +416,7 @@ void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned i = 0;
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
- while (!MI.getOperand(i).isFrameIndex()) {
+ while (!MI.getOperand(i).isFI()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
}
@@ -895,7 +895,7 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
MBBI = prior(MBB.end());
MachineOperand &DestAddr = MBBI->getOperand(0);
- assert(DestAddr.isRegister() && "Offset should be in register!");
+ assert(DestAddr.isReg() && "Offset should be in register!");
BuildMI(MBB, MBBI,
TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
StackPtr).addReg(DestAddr.getReg());
@@ -905,7 +905,7 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
MBBI = prior(MBB.end());
MachineOperand &JumpTarget = MBBI->getOperand(0);
MachineOperand &StackAdjust = MBBI->getOperand(1);
- assert( StackAdjust.isImmediate() && "Expecting immediate value.");
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();