diff options
Diffstat (limited to 'lib/Target/ARM')
43 files changed, 1241 insertions, 835 deletions
diff --git a/lib/Target/ARM/ARMAsmBackend.cpp b/lib/Target/ARM/ARMAsmBackend.cpp index 79e9897..5e438a9 100644 --- a/lib/Target/ARM/ARMAsmBackend.cpp +++ b/lib/Target/ARM/ARMAsmBackend.cpp @@ -174,7 +174,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { Value >>= 16; // Fallthrough case ARM::fixup_t2_movw_lo16: - case ARM::fixup_t2_movt_hi16_pcrel: + case ARM::fixup_t2_movt_hi16_pcrel: //FIXME: Shouldn't this be shifted like + // the other hi16 fixup? case ARM::fixup_t2_movw_lo16_pcrel: { unsigned Hi4 = (Value & 0xF000) >> 12; unsigned i = (Value & 0x800) >> 11; @@ -184,8 +185,10 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { // inst{26} = i; // inst{14-12} = Mid3; // inst{7-0} = Lo8; - assert ((((int64_t)Value) >= -0x8000) && (((int64_t)Value) <= 0x7fff) && - "Out of range pc-relative fixup value!"); + // The value comes in as the whole thing, not just the portion required + // for this fixup, so we need to mask off the bits not handled by this + // portion (lo vs. hi). + Value &= 0xffff; Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); uint64_t swapped = (Value & 0xFFFF0000) >> 16; swapped |= (Value & 0x0000FFFF) << 16; diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index eb73902..7240837 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -1010,19 +1010,16 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { MI->dump(); assert(0 && "Unsupported opcode for unwinding information"); case ARM::MOVr: - case ARM::tMOVgpr2gpr: - case ARM::tMOVgpr2tgpr: Offset = 0; break; case ARM::ADDri: Offset = -MI->getOperand(2).getImm(); break; case ARM::SUBri: - case ARM::t2SUBrSPi: - Offset = MI->getOperand(2).getImm(); + Offset = MI->getOperand(2).getImm(); break; case ARM::tSUBspi: - Offset = MI->getOperand(2).getImm()*4; + Offset = MI->getOperand(2).getImm()*4; break; case ARM::tADDspi: case ARM::tADDrSPi: @@ -1097,13 +1094,22 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer.EmitInstruction(TmpInst); return; } - case ARM::t2ADDrSPi: - case ARM::t2ADDrSPi12: - case ARM::t2SUBrSPi: - case ARM::t2SUBrSPi12: - assert ((MI->getOperand(1).getReg() == ARM::SP) && - "Unexpected source register!"); - break; + case ARM::t2LDMIA_RET: { + // As above for LDMIA_RET. Map to the tPOP instruction. + MCInst TmpInst; + LowerARMMachineInstrToMCInst(MI, TmpInst, *this); + TmpInst.setOpcode(ARM::t2LDMIA_UPD); + OutStreamer.EmitInstruction(TmpInst); + return; + } + case ARM::tPOP_RET: { + // As above for LDMIA_RET. Map to the tPOP instruction. + MCInst TmpInst; + LowerARMMachineInstrToMCInst(MI, TmpInst, *this); + TmpInst.setOpcode(ARM::tPOP); + OutStreamer.EmitInstruction(TmpInst); + return; + } case ARM::t2MOVi32imm: assert(0 && "Should be lowered by thumb2it pass"); case ARM::DBG_VALUE: { @@ -1215,6 +1221,9 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { TmpInst.setOpcode(ARM::tMOVr); TmpInst.addOperand(MCOperand::CreateReg(ARM::LR)); TmpInst.addOperand(MCOperand::CreateReg(ARM::PC)); + // Add predicate operands. + TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL)); + TmpInst.addOperand(MCOperand::CreateReg(0)); OutStreamer.EmitInstruction(TmpInst); } { @@ -1445,7 +1454,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { case ARM::t2BR_JT: { // Lower and emit the instruction itself, then the jump table following it. MCInst TmpInst; - TmpInst.setOpcode(ARM::tMOVgpr2gpr); + TmpInst.setOpcode(ARM::tMOVr); TmpInst.addOperand(MCOperand::CreateReg(ARM::PC)); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); // Add predicate operands. @@ -1494,7 +1503,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // mov pc, target MCInst TmpInst; unsigned Opc = MI->getOpcode() == ARM::BR_JTr ? - ARM::MOVr : ARM::tMOVgpr2gpr; + ARM::MOVr : ARM::tMOVr; TmpInst.setOpcode(Opc); TmpInst.addOperand(MCOperand::CreateReg(ARM::PC)); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); @@ -1507,7 +1516,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer.EmitInstruction(TmpInst); // Make sure the Thumb jump table is 4-byte aligned. - if (Opc == ARM::tMOVgpr2gpr) + if (Opc == ARM::tMOVr) EmitAlignment(2); // Output the data for the jump table itself @@ -1599,11 +1608,12 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCSymbol *Label = GetARMSJLJEHLabel(); { MCInst TmpInst; - TmpInst.setOpcode(ARM::tMOVgpr2tgpr); + TmpInst.setOpcode(ARM::tMOVr); TmpInst.addOperand(MCOperand::CreateReg(ValReg)); TmpInst.addOperand(MCOperand::CreateReg(ARM::PC)); - // 's' bit operand - TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR)); + // Predicate. + TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL)); + TmpInst.addOperand(MCOperand::CreateReg(0)); OutStreamer.AddComment("eh_setjmp begin"); OutStreamer.EmitInstruction(TmpInst); } @@ -1817,7 +1827,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { } { MCInst TmpInst; - TmpInst.setOpcode(ARM::tMOVtgpr2gpr); + TmpInst.setOpcode(ARM::tMOVr); TmpInst.addOperand(MCOperand::CreateReg(ARM::SP)); TmpInst.addOperand(MCOperand::CreateReg(ScratchReg)); // Predicate. diff --git a/lib/Target/ARM/ARMBaseInfo.h b/lib/Target/ARM/ARMBaseInfo.h index 36edbad..4c9ecdf 100644 --- a/lib/Target/ARM/ARMBaseInfo.h +++ b/lib/Target/ARM/ARMBaseInfo.h @@ -25,11 +25,13 @@ // Defines symbolic names for ARM registers. This defines a mapping from // register name to register number. // -#include "ARMGenRegisterNames.inc" +#define GET_REGINFO_ENUM +#include "ARMGenRegisterInfo.inc" // Defines symbolic names for the ARM instructions. // -#include "ARMGenInstrNames.inc" +#define GET_INSTRINFO_ENUM +#include "ARMGenInstrInfo.inc" namespace llvm { diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 44a3976..9f56637 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -18,7 +18,6 @@ #include "ARMHazardRecognizer.h" #include "ARMMachineFunctionInfo.h" #include "ARMRegisterInfo.h" -#include "ARMGenInstrInfo.inc" #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/GlobalValue.h" @@ -35,6 +34,10 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/ADT/STLExtras.h" + +#define GET_INSTRINFO_MC_DESC +#include "ARMGenInstrInfo.inc" + using namespace llvm; static cl::opt<bool> @@ -74,7 +77,8 @@ static const ARM_MLxEntry ARM_MLxTable[] = { }; ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) - : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)), + : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts), + ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), Subtarget(STI) { for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) @@ -136,9 +140,9 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *UpdateMI = NULL; MachineInstr *MemMI = NULL; unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); - const TargetInstrDesc &TID = MI->getDesc(); - unsigned NumOps = TID.getNumOperands(); - bool isLoad = !TID.mayStore(); + const MCInstrDesc &MCID = MI->getDesc(); + unsigned NumOps = MCID.getNumOperands(); + bool isLoad = !MCID.mayStore(); const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); const MachineOperand &Base = MI->getOperand(2); const MachineOperand &Offset = MI->getOperand(NumOps-3); @@ -475,8 +479,8 @@ SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const { // FIXME: This confuses implicit_def with optional CPSR def. - const TargetInstrDesc &TID = MI->getDesc(); - if (!TID.getImplicitDefs() && !TID.hasOptionalDef()) + const MCInstrDesc &MCID = MI->getDesc(); + if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef()) return false; bool Found = false; @@ -495,11 +499,11 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, /// By default, this returns true for every instruction with a /// PredicateOperand. bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { - const TargetInstrDesc &TID = MI->getDesc(); - if (!TID.isPredicable()) + const MCInstrDesc &MCID = MI->getDesc(); + if (!MCID.isPredicable()) return false; - if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { + if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { ARMFunctionInfo *AFI = MI->getParent()->getParent()->getInfo<ARMFunctionInfo>(); return AFI->isThumb2Function(); @@ -525,8 +529,8 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); // Basic size info comes from the TSFlags field. - const TargetInstrDesc &TID = MI->getDesc(); - uint64_t TSFlags = TID.TSFlags; + const MCInstrDesc &MCID = MI->getDesc(); + uint64_t TSFlags = MCID.TSFlags; unsigned Opc = MI->getOpcode(); switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) { @@ -588,9 +592,9 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { // entry is one byte; TBH two byte each. unsigned EntrySize = (Opc == ARM::t2TBB_JT) ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); - unsigned NumOps = TID.getNumOperands(); + unsigned NumOps = MCID.getNumOperands(); MachineOperand JTOP = - MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2)); + MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2)); unsigned JTI = JTOP.getIndex(); const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); assert(MJTI != 0); @@ -788,7 +792,7 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, break; case ARM::STRi12: case ARM::t2STRi12: - case ARM::tSpill: + case ARM::tSTRspi: case ARM::VSTRD: case ARM::VSTRS: if (MI->getOperand(1).isFI() && @@ -923,7 +927,7 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, break; case ARM::LDRi12: case ARM::t2LDRi12: - case ARM::tRestore: + case ARM::tLDRspi: case ARM::VLDRD: case ARM::VLDRS: if (MI->getOperand(1).isFI() && @@ -1363,7 +1367,7 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int &Offset, const ARMBaseInstrInfo &TII) { unsigned Opcode = MI.getOpcode(); - const TargetInstrDesc &Desc = MI.getDesc(); + const MCInstrDesc &Desc = MI.getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); bool isSub = false; @@ -1803,7 +1807,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, if (!ItinData || ItinData->isEmpty()) return 1; - const TargetInstrDesc &Desc = MI->getDesc(); + const MCInstrDesc &Desc = MI->getDesc(); unsigned Class = Desc.getSchedClass(); unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; if (UOps) @@ -1906,10 +1910,10 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const { - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; if (RegNo <= 0) // Def is the address writeback. return ItinData->getOperandCycle(DefClass, DefIdx); @@ -1924,7 +1928,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, DefCycle = RegNo; bool isSLoad = false; - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::VLDMSIA: case ARM::VLDMSIA_UPD: @@ -1947,10 +1951,10 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const { - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; if (RegNo <= 0) // Def is the address writeback. return ItinData->getOperandCycle(DefClass, DefIdx); @@ -1982,10 +1986,10 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; if (RegNo <= 0) return ItinData->getOperandCycle(UseClass, UseIdx); @@ -1999,7 +2003,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, UseCycle = RegNo; bool isSStore = false; - switch (UseTID.getOpcode()) { + switch (UseMCID.getOpcode()) { default: break; case ARM::VSTMSIA: case ARM::VSTMSIA_UPD: @@ -2022,10 +2026,10 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; if (RegNo <= 0) return ItinData->getOperandCycle(UseClass, UseIdx); @@ -2051,14 +2055,14 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefIdx, unsigned DefAlign, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseIdx, unsigned UseAlign) const { - unsigned DefClass = DefTID.getSchedClass(); - unsigned UseClass = UseTID.getSchedClass(); + unsigned DefClass = DefMCID.getSchedClass(); + unsigned UseClass = UseMCID.getSchedClass(); - if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands()) + if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); // This may be a def / use of a variable_ops instruction, the operand @@ -2066,7 +2070,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, // figure it out. int DefCycle = -1; bool LdmBypass = false; - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); break; @@ -2077,7 +2081,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::VLDMSIA: case ARM::VLDMSIA_UPD: case ARM::VLDMSDB_UPD: - DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); + DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); break; case ARM::LDMIA_RET: @@ -2098,7 +2102,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::t2LDMIA_UPD: case ARM::t2LDMDB_UPD: LdmBypass = 1; - DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); + DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); break; } @@ -2107,7 +2111,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, DefCycle = 2; int UseCycle = -1; - switch (UseTID.getOpcode()) { + switch (UseMCID.getOpcode()) { default: UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); break; @@ -2118,7 +2122,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::VSTMSIA: case ARM::VSTMSIA_UPD: case ARM::VSTMSDB_UPD: - UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); + UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); break; case ARM::STMIA: @@ -2137,7 +2141,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::t2STMDB: case ARM::t2STMIA_UPD: case ARM::t2STMDB_UPD: - UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); + UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); break; } @@ -2150,7 +2154,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (LdmBypass) { // It's a variable_ops instruction so we can't use DefIdx here. Just use // first def operand. - if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1, + if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, UseClass, UseIdx)) --UseCycle; } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, @@ -2170,11 +2174,11 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, DefMI->isRegSequence() || DefMI->isImplicitDef()) return 1; - const TargetInstrDesc &DefTID = DefMI->getDesc(); + const MCInstrDesc &DefMCID = DefMI->getDesc(); if (!ItinData || ItinData->isEmpty()) - return DefTID.mayLoad() ? 3 : 1; + return DefMCID.mayLoad() ? 3 : 1; - const TargetInstrDesc &UseTID = UseMI->getDesc(); + const MCInstrDesc &UseMCID = UseMI->getDesc(); const MachineOperand &DefMO = DefMI->getOperand(DefIdx); if (DefMO.getReg() == ARM::CPSR) { if (DefMI->getOpcode() == ARM::FMSTAT) { @@ -2183,7 +2187,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } // CPSR set and branch can be paired in the same cycle. - if (UseTID.isBranch()) + if (UseMCID.isBranch()) return 0; } @@ -2191,14 +2195,14 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, ? (*DefMI->memoperands_begin())->getAlignment() : 0; unsigned UseAlign = UseMI->hasOneMemOperand() ? (*UseMI->memoperands_begin())->getAlignment() : 0; - int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign, - UseTID, UseIdx, UseAlign); + int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, + UseMCID, UseIdx, UseAlign); if (Latency > 1 && (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::LDRrs: case ARM::LDRBrs: { @@ -2223,7 +2227,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } if (DefAlign < 8 && Subtarget.isCortexA9()) - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::VLD1q8: case ARM::VLD1q16: @@ -2327,37 +2331,37 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (!DefNode->isMachineOpcode()) return 1; - const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode()); + const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); - if (isZeroCost(DefTID.Opcode)) + if (isZeroCost(DefMCID.Opcode)) return 0; if (!ItinData || ItinData->isEmpty()) - return DefTID.mayLoad() ? 3 : 1; + return DefMCID.mayLoad() ? 3 : 1; if (!UseNode->isMachineOpcode()) { - int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx); + int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); if (Subtarget.isCortexA9()) return Latency <= 2 ? 1 : Latency - 1; else return Latency <= 3 ? 1 : Latency - 2; } - const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode()); + const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); unsigned DefAlign = !DefMN->memoperands_empty() ? (*DefMN->memoperands_begin())->getAlignment() : 0; const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); unsigned UseAlign = !UseMN->memoperands_empty() ? (*UseMN->memoperands_begin())->getAlignment() : 0; - int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign, - UseTID, UseIdx, UseAlign); + int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, + UseMCID, UseIdx, UseAlign); if (Latency > 1 && (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::LDRrs: case ARM::LDRBrs: { @@ -2384,7 +2388,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } if (DefAlign < 8 && Subtarget.isCortexA9()) - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::VLD1q8Pseudo: case ARM::VLD1q16Pseudo: @@ -2503,10 +2507,10 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, if (!ItinData || ItinData->isEmpty()) return 1; - const TargetInstrDesc &TID = MI->getDesc(); - unsigned Class = TID.getSchedClass(); + const MCInstrDesc &MCID = MI->getDesc(); + unsigned Class = MCID.getSchedClass(); unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; - if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR)) + if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) // When predicated, CPSR is an additional source operand for CPSR updating // instructions, this apparently increases their latencies. *PredCost = 1; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 96f0e76..ab93cde 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -353,25 +353,25 @@ public: SDNode *UseNode, unsigned UseIdx) const; private: int getVLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const; int getLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const; int getVSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const; int getSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const; int getOperandLatency(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefIdx, unsigned DefAlign, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseIdx, unsigned UseAlign) const; int getInstrLatency(const InstrItineraryData *ItinData, diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 9dc51b8..e46082d 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -40,6 +40,10 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/CommandLine.h" +#define GET_REGINFO_MC_DESC +#define GET_REGINFO_TARGET_DESC +#include "ARMGenRegisterInfo.inc" + using namespace llvm; static cl::opt<bool> @@ -54,8 +58,7 @@ EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true), ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &sti) - : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), - TII(tii), STI(sti), + : ARMGenRegisterInfo(), TII(tii), STI(sti), FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11), BasePtr(ARM::R6) { } @@ -955,7 +958,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, int64_t ARMBaseRegisterInfo:: getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { - const TargetInstrDesc &Desc = MI->getDesc(); + const MCInstrDesc &Desc = MI->getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); int64_t InstrOffs = 0;; int Scale = 1; @@ -1105,11 +1108,11 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB, if (Ins != MBB->end()) DL = Ins->getDebugLoc(); - const TargetInstrDesc &TID = TII.get(ADDriOpc); + const MCInstrDesc &MCID = TII.get(ADDriOpc); MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - MRI.constrainRegClass(BaseReg, TID.OpInfo[0].getRegClass(this)); + MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this)); - MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, TID, BaseReg) + MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) .addFrameIndex(FrameIdx).addImm(Offset); if (!AFI->isThumb1OnlyFunction()) @@ -1145,7 +1148,7 @@ ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I, bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const { - const TargetInstrDesc &Desc = MI->getDesc(); + const MCInstrDesc &Desc = MI->getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); unsigned i = 0; @@ -1281,11 +1284,5 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, } // Update the original instruction to use the scratch register. MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true); - if (MI.getOpcode() == ARM::t2ADDrSPi) - MI.setDesc(TII.get(ARM::t2ADDri)); - else if (MI.getOpcode() == ARM::t2SUBrSPi) - MI.setDesc(TII.get(ARM::t2SUBri)); } } - -#include "ARMGenRegisterInfo.inc" diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h index 70b6f01..b4b4059 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -16,7 +16,9 @@ #include "ARM.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "ARMGenRegisterInfo.h.inc" + +#define GET_REGINFO_HEADER +#include "ARMGenRegisterInfo.inc" namespace llvm { class ARMSubtarget; diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index 97bac88..7ed07c2 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -98,13 +98,13 @@ namespace { void addPCLabel(unsigned LabelID); void emitPseudoInstruction(const MachineInstr &MI); unsigned getMachineSoRegOpValue(const MachineInstr &MI, - const TargetInstrDesc &TID, + const MCInstrDesc &MCID, const MachineOperand &MO, unsigned OpIdx); unsigned getMachineSoImmOpValue(unsigned SoImm); unsigned getAddrModeSBit(const MachineInstr &MI, - const TargetInstrDesc &TID) const; + const MCInstrDesc &MCID) const; void emitDataProcessingInstruction(const MachineInstr &MI, unsigned ImplicitRd = 0, @@ -461,9 +461,9 @@ unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI, else if (MO.isSymbol()) emitExternalSymbolAddress(MO.getSymbolName(), ARM::reloc_arm_branch); else if (MO.isCPI()) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // For VFP load, the immediate offset is multiplied by 4. - unsigned Reloc = ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm) + unsigned Reloc = ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm) ? ARM::reloc_arm_vfp_cp_entry : ARM::reloc_arm_cp_entry; emitConstPoolAddress(MO.getIndex(), Reloc); } else if (MO.isJTI()) @@ -830,7 +830,7 @@ void ARMCodeEmitter::emitLEApcrelInstruction(const MachineInstr &MI) { void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) { // It's basically add r, pc, (LJTI - $+8) - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Emit the 'add' instruction. unsigned Binary = 0x4 << 21; // add: Insts{24-21} = 0b0100 @@ -839,7 +839,7 @@ void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) { Binary |= II->getPredicate(&MI) << ARMII::CondShift; // Encode S bit if MI modifies CPSR. - Binary |= getAddrModeSBit(MI, TID); + Binary |= getAddrModeSBit(MI, MCID); // Encode Rd. Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift; @@ -999,7 +999,7 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) { } unsigned ARMCodeEmitter::getMachineSoRegOpValue(const MachineInstr &MI, - const TargetInstrDesc &TID, + const MCInstrDesc &MCID, const MachineOperand &MO, unsigned OpIdx) { unsigned Binary = getMachineOpValue(MI, MO); @@ -1069,8 +1069,8 @@ unsigned ARMCodeEmitter::getMachineSoImmOpValue(unsigned SoImm) { } unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI, - const TargetInstrDesc &TID) const { - for (unsigned i = MI.getNumOperands(), e = TID.getNumOperands(); i >= e; --i){ + const MCInstrDesc &MCID) const { + for (unsigned i = MI.getNumOperands(), e = MCID.getNumOperands(); i >= e; --i){ const MachineOperand &MO = MI.getOperand(i-1); if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) return 1 << ARMII::S_BitShift; @@ -1081,7 +1081,7 @@ unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI, void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, unsigned ImplicitRd, unsigned ImplicitRn) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1095,10 +1095,10 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, Binary |= II->getPredicate(&MI) << ARMII::CondShift; // Encode S bit if MI modifies CPSR. - Binary |= getAddrModeSBit(MI, TID); + Binary |= getAddrModeSBit(MI, MCID); // Encode register def if there is one. - unsigned NumDefs = TID.getNumDefs(); + unsigned NumDefs = MCID.getNumDefs(); unsigned OpIdx = 0; if (NumDefs) Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift; @@ -1106,7 +1106,23 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, // Special handling for implicit use (e.g. PC). Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift); - if ((TID.Opcode == ARM::BFC) || (TID.Opcode == ARM::BFI)) { + if (MCID.Opcode == ARM::MOVi16) { + // Get immediate from MI. + unsigned Lo16 = getMovi32Value(MI, MI.getOperand(OpIdx), + ARM::reloc_arm_movw); + // Encode imm which is the same as in emitMOVi32immInstruction(). + Binary |= Lo16 & 0xFFF; + Binary |= ((Lo16 >> 12) & 0xF) << 16; + emitWordLE(Binary); + return; + } else if(MCID.Opcode == ARM::MOVTi16) { + unsigned Hi16 = (getMovi32Value(MI, MI.getOperand(OpIdx), + ARM::reloc_arm_movt) >> 16); + Binary |= Hi16 & 0xFFF; + Binary |= ((Hi16 >> 12) & 0xF) << 16; + emitWordLE(Binary); + return; + } else if ((MCID.Opcode == ARM::BFC) || (MCID.Opcode == ARM::BFI)) { uint32_t v = ~MI.getOperand(2).getImm(); int32_t lsb = CountTrailingZeros_32(v); int32_t msb = (32 - CountLeadingZeros_32(v)) - 1; @@ -1115,7 +1131,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, Binary |= (lsb & 0x1F) << 7; emitWordLE(Binary); return; - } else if ((TID.Opcode == ARM::UBFX) || (TID.Opcode == ARM::SBFX)) { + } else if ((MCID.Opcode == ARM::UBFX) || (MCID.Opcode == ARM::SBFX)) { // Encode Rn in Instr{0-3} Binary |= getMachineOpValue(MI, OpIdx++); @@ -1130,11 +1146,11 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, } // If this is a two-address operand, skip it. e.g. MOVCCr operand 1. - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; // Encode first non-shifter register operand if there is one. - bool isUnary = TID.TSFlags & ARMII::UnaryDP; + bool isUnary = MCID.TSFlags & ARMII::UnaryDP; if (!isUnary) { if (ImplicitRn) // Special handling for implicit use (e.g. PC). @@ -1147,9 +1163,9 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, // Encode shifter operand. const MachineOperand &MO = MI.getOperand(OpIdx); - if ((TID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) { + if ((MCID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) { // Encode SoReg. - emitWordLE(Binary | getMachineSoRegOpValue(MI, TID, MO, OpIdx)); + emitWordLE(Binary | getMachineSoRegOpValue(MI, MCID, MO, OpIdx)); return; } @@ -1168,9 +1184,9 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI, void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI, unsigned ImplicitRd, unsigned ImplicitRn) { - const TargetInstrDesc &TID = MI.getDesc(); - unsigned Form = TID.TSFlags & ARMII::FormMask; - bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0; + const MCInstrDesc &MCID = MI.getDesc(); + unsigned Form = MCID.TSFlags & ARMII::FormMask; + bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0; // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1216,7 +1232,7 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI, Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift; // If this is a two-address operand, skip it. e.g. LDR_PRE. - if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; const MachineOperand &MO2 = MI.getOperand(OpIdx); @@ -1252,9 +1268,9 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI, void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI, unsigned ImplicitRn) { - const TargetInstrDesc &TID = MI.getDesc(); - unsigned Form = TID.TSFlags & ARMII::FormMask; - bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0; + const MCInstrDesc &MCID = MI.getDesc(); + unsigned Form = MCID.TSFlags & ARMII::FormMask; + bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0; // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1276,7 +1292,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI, Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift; // Skip LDRD and STRD's second operand. - if (TID.Opcode == ARM::LDRD || TID.Opcode == ARM::STRD) + if (MCID.Opcode == ARM::LDRD || MCID.Opcode == ARM::STRD) ++OpIdx; // Set second operand @@ -1287,7 +1303,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI, Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift; // If this is a two-address operand, skip it. e.g. LDRH_POST. - if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; const MachineOperand &MO2 = MI.getOperand(OpIdx); @@ -1337,8 +1353,8 @@ static unsigned getAddrModeUPBits(unsigned Mode) { } void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); - bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0; + const MCInstrDesc &MCID = MI.getDesc(); + bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0; // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1382,7 +1398,7 @@ void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1391,12 +1407,12 @@ void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) { Binary |= II->getPredicate(&MI) << ARMII::CondShift; // Encode S bit if MI modifies CPSR. - Binary |= getAddrModeSBit(MI, TID); + Binary |= getAddrModeSBit(MI, MCID); // 32x32->64bit operations have two destination registers. The number // of register definitions will tell us if that's what we're dealing with. unsigned OpIdx = 0; - if (TID.getNumDefs() == 2) + if (MCID.getNumDefs() == 2) Binary |= getMachineOpValue (MI, OpIdx++) << ARMII::RegRdLoShift; // Encode Rd @@ -1410,16 +1426,16 @@ void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) { // Many multiple instructions (e.g. MLA) have three src operands. Encode // it as Rn (for multiply, that's in the same offset as RdLo. - if (TID.getNumOperands() > OpIdx && - !TID.OpInfo[OpIdx].isPredicate() && - !TID.OpInfo[OpIdx].isOptionalDef()) + if (MCID.getNumOperands() > OpIdx && + !MCID.OpInfo[OpIdx].isPredicate() && + !MCID.OpInfo[OpIdx].isOptionalDef()) Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRdLoShift; emitWordLE(Binary); } void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1448,15 +1464,15 @@ void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) { // Encode rot imm (0, 8, 16, or 24) if it has a rotate immediate operand. if (MI.getOperand(OpIdx).isImm() && - !TID.OpInfo[OpIdx].isPredicate() && - !TID.OpInfo[OpIdx].isOptionalDef()) + !MCID.OpInfo[OpIdx].isPredicate() && + !MCID.OpInfo[OpIdx].isOptionalDef()) Binary |= (getMachineOpValue(MI, OpIdx) / 8) << ARMII::ExtRotImmShift; emitWordLE(Binary); } void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1465,7 +1481,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) { Binary |= II->getPredicate(&MI) << ARMII::CondShift; // PKH instructions are finished at this point - if (TID.Opcode == ARM::PKHBT || TID.Opcode == ARM::PKHTB) { + if (MCID.Opcode == ARM::PKHBT || MCID.Opcode == ARM::PKHTB) { emitWordLE(Binary); return; } @@ -1476,9 +1492,9 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) { Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift; const MachineOperand &MO = MI.getOperand(OpIdx++); - if (OpIdx == TID.getNumOperands() || - TID.OpInfo[OpIdx].isPredicate() || - TID.OpInfo[OpIdx].isOptionalDef()) { + if (OpIdx == MCID.getNumOperands() || + MCID.OpInfo[OpIdx].isPredicate() || + MCID.OpInfo[OpIdx].isOptionalDef()) { // Encode Rm and it's done. Binary |= getMachineOpValue(MI, MO); emitWordLE(Binary); @@ -1493,7 +1509,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) { // Encode shift_imm. unsigned ShiftAmt = MI.getOperand(OpIdx).getImm(); - if (TID.Opcode == ARM::PKHTB) { + if (MCID.Opcode == ARM::PKHTB) { assert(ShiftAmt != 0 && "PKHTB shift_imm is 0!"); if (ShiftAmt == 32) ShiftAmt = 0; @@ -1505,7 +1521,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGen. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1518,11 +1534,11 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) { // Encode saturate bit position. unsigned Pos = MI.getOperand(1).getImm(); - if (TID.Opcode == ARM::SSAT || TID.Opcode == ARM::SSAT16) + if (MCID.Opcode == ARM::SSAT || MCID.Opcode == ARM::SSAT16) Pos -= 1; assert((Pos < 16 || (Pos < 32 && - TID.Opcode != ARM::SSAT16 && - TID.Opcode != ARM::USAT16)) && + MCID.Opcode != ARM::SSAT16 && + MCID.Opcode != ARM::USAT16)) && "saturate bit position out of range"); Binary |= Pos << 16; @@ -1530,7 +1546,7 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) { Binary |= getMachineOpValue(MI, 2); // Encode shift_imm. - if (TID.getNumOperands() == 4) { + if (MCID.getNumOperands() == 4) { unsigned ShiftOp = MI.getOperand(3).getImm(); ARM_AM::ShiftOpc Opc = ARM_AM::getSORegShOp(ShiftOp); if (Opc == ARM_AM::asr) @@ -1546,9 +1562,9 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitBranchInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); - if (TID.Opcode == ARM::TPsoft) { + if (MCID.Opcode == ARM::TPsoft) { llvm_unreachable("ARM::TPsoft FIXME"); // FIXME } @@ -1589,20 +1605,20 @@ void ARMCodeEmitter::emitInlineJumpTable(unsigned JTIndex) { } void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Handle jump tables. - if (TID.Opcode == ARM::BR_JTr || TID.Opcode == ARM::BR_JTadd) { + if (MCID.Opcode == ARM::BR_JTr || MCID.Opcode == ARM::BR_JTadd) { // First emit a ldr pc, [] instruction. emitDataProcessingInstruction(MI, ARM::PC); // Then emit the inline jump table. unsigned JTIndex = - (TID.Opcode == ARM::BR_JTr) + (MCID.Opcode == ARM::BR_JTr) ? MI.getOperand(1).getIndex() : MI.getOperand(2).getIndex(); emitInlineJumpTable(JTIndex); return; - } else if (TID.Opcode == ARM::BR_JTm) { + } else if (MCID.Opcode == ARM::BR_JTm) { // First emit a ldr pc, [] instruction. emitLoadStoreInstruction(MI, ARM::PC); @@ -1617,7 +1633,7 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) { // Set the conditional execution predicate Binary |= II->getPredicate(&MI) << ARMII::CondShift; - if (TID.Opcode == ARM::BX_RET || TID.Opcode == ARM::MOVPCLR) + if (MCID.Opcode == ARM::BX_RET || MCID.Opcode == ARM::MOVPCLR) // The return register is LR. Binary |= getARMRegisterNumbering(ARM::LR); else @@ -1673,7 +1689,7 @@ static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) { } void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1687,16 +1703,16 @@ void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) { Binary |= encodeVFPRd(MI, OpIdx++); // If this is a two-address operand, skip it, e.g. FMACD. - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; // Encode Dn / Sn. - if ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm) + if ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm) Binary |= encodeVFPRn(MI, OpIdx++); - if (OpIdx == TID.getNumOperands() || - TID.OpInfo[OpIdx].isPredicate() || - TID.OpInfo[OpIdx].isOptionalDef()) { + if (OpIdx == MCID.getNumOperands() || + MCID.OpInfo[OpIdx].isPredicate() || + MCID.OpInfo[OpIdx].isOptionalDef()) { // FCMPEZD etc. has only one operand. emitWordLE(Binary); return; @@ -1709,8 +1725,8 @@ void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitVFPConversionInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); - unsigned Form = TID.TSFlags & ARMII::FormMask; + const MCInstrDesc &MCID = MI.getDesc(); + unsigned Form = MCID.TSFlags & ARMII::FormMask; // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1806,8 +1822,8 @@ void ARMCodeEmitter::emitVFPLoadStoreInstruction(const MachineInstr &MI) { void ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); - bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0; + const MCInstrDesc &MCID = MI.getDesc(); + bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0; // Part of binary is determined by TableGn. unsigned Binary = getBinaryCodeForInstr(MI); @@ -1912,8 +1928,8 @@ void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) { unsigned Binary = getBinaryCodeForInstr(MI); unsigned RegTOpIdx, RegNOpIdx, LnOpIdx; - const TargetInstrDesc &TID = MI.getDesc(); - if ((TID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) { + const MCInstrDesc &MCID = MI.getDesc(); + if ((MCID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) { RegTOpIdx = 0; RegNOpIdx = 1; LnOpIdx = 2; @@ -1980,12 +1996,12 @@ void ARMCodeEmitter::emitNEON1RegModImmInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); unsigned Binary = getBinaryCodeForInstr(MI); // Destination register is encoded in Dd; source register in Dm. unsigned OpIdx = 0; Binary |= encodeNEONRd(MI, OpIdx++); - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; Binary |= encodeNEONRm(MI, OpIdx); if (IsThumb) @@ -1995,15 +2011,15 @@ void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) { } void ARMCodeEmitter::emitNEON3RegInstruction(const MachineInstr &MI) { - const TargetInstrDesc &TID = MI.getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); unsigned Binary = getBinaryCodeForInstr(MI); // Destination register is encoded in Dd; source registers in Dn and Dm. unsigned OpIdx = 0; Binary |= encodeNEONRd(MI, OpIdx++); - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; Binary |= encodeNEONRn(MI, OpIdx++); - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) ++OpIdx; Binary |= encodeNEONRm(MI, OpIdx); if (IsThumb) diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index baf95a3..309caee 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1692,9 +1692,9 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) { const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { MachineInstr *MI = T2JumpTables[i]; - const TargetInstrDesc &TID = MI->getDesc(); - unsigned NumOps = TID.getNumOperands(); - unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2); + const MCInstrDesc &MCID = MI->getDesc(); + unsigned NumOps = MCID.getNumOperands(); + unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); MachineOperand JTOP = MI->getOperand(JTOpIdx); unsigned JTI = JTOP.getIndex(); assert(JTI < JT.size()); @@ -1815,9 +1815,9 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) { const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { MachineInstr *MI = T2JumpTables[i]; - const TargetInstrDesc &TID = MI->getDesc(); - unsigned NumOps = TID.getNumOperands(); - unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2); + const MCInstrDesc &MCID = MI->getDesc(); + unsigned NumOps = MCID.getNumOperands(); + unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); MachineOperand JTOP = MI->getOperand(JTOpIdx); unsigned JTI = JTOP.getIndex(); assert(JTI < JT.size()); diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index b6b3c75..53a5f7d 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -68,7 +68,7 @@ namespace { void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI) { - const TargetInstrDesc &Desc = OldMI.getDesc(); + const MCInstrDesc &Desc = OldMI.getDesc(); for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e; ++i) { const MachineOperand &MO = OldMI.getOperand(i); @@ -856,10 +856,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MI.eraseFromParent(); return true; } + case ARM::tTPsoft: case ARM::TPsoft: { MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(ARM::BL)) + TII->get(Opcode == ARM::tTPsoft ? ARM::tBL : ARM::BL)) .addExternalSymbol("__aeabi_read_tp", 0); MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 5cf73c4..f469d7e 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -219,8 +219,8 @@ class ARMFastISel : public FastISel { // we don't care about implicit defs here, just places we'll need to add a // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { - const TargetInstrDesc &TID = MI->getDesc(); - if (!TID.hasOptionalDef()) + const MCInstrDesc &MCID = MI->getDesc(); + if (!MCID.hasOptionalDef()) return false; // Look to see if our OptionalDef is defining CPSR or CCR. @@ -234,15 +234,15 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { } bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { - const TargetInstrDesc &TID = MI->getDesc(); + const MCInstrDesc &MCID = MI->getDesc(); // If we're a thumb2 or not NEON function we were handled via isPredicable. - if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || + if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || AFI->isThumb2Function()) return false; - for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) - if (TID.OpInfo[i].isPredicate()) + for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) + if (MCID.OpInfo[i].isPredicate()) return true; return false; @@ -278,7 +278,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass* RC) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); return ResultReg; @@ -288,7 +288,7 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -308,7 +308,7 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -331,7 +331,7 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -355,7 +355,7 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, unsigned Op0, bool Op0IsKill, uint64_t Imm) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -377,7 +377,7 @@ unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -400,7 +400,7 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, unsigned Op1, bool Op1IsKill, uint64_t Imm) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -423,7 +423,7 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -442,7 +442,7 @@ unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm1, uint64_t Imm2) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) @@ -1549,7 +1549,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, NumBytes = CCInfo.getNextStackOffset(); // Issue CALLSEQ_START - unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); + unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) .addImm(NumBytes)); @@ -1647,7 +1647,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, const Instruction *I, CallingConv::ID CC, unsigned &NumBytes) { // Issue CALLSEQ_END - unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); + unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(0)); diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 4ef2666..9e943e4 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -268,14 +268,14 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { // bic r4, r4, MaxAlign // mov sp, r4 // FIXME: It will be better just to find spare register here. - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4) - .addReg(ARM::SP, RegState::Kill); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4) + .addReg(ARM::SP, RegState::Kill)); AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2BICri), ARM::R4) .addReg(ARM::R4, RegState::Kill) .addImm(MaxAlign-1))); - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP) - .addReg(ARM::R4, RegState::Kill); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) + .addReg(ARM::R4, RegState::Kill)); } AFI->setShouldRestoreSPFromFP(true); @@ -293,9 +293,9 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { .addReg(ARM::SP) .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); else - BuildMI(MBB, MBBI, dl, - TII.get(ARM::tMOVgpr2gpr), RegInfo->getBaseRegister()) - .addReg(ARM::SP); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), + RegInfo->getBaseRegister()) + .addReg(ARM::SP)); } // If the frame has variable sized objects then the epilogue must restore @@ -364,8 +364,9 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, "No scratch register to restore SP from FP!"); emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, ARMCC::AL, 0, TII); - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP) - .addReg(ARM::R4); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), + ARM::SP) + .addReg(ARM::R4)); } } else { // Thumb2 or ARM. @@ -373,8 +374,9 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); else - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP) - .addReg(FramePtr); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), + ARM::SP) + .addReg(FramePtr)); } } else if (NumBytes) emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); diff --git a/lib/Target/ARM/ARMGlobalMerge.cpp b/lib/Target/ARM/ARMGlobalMerge.cpp index 3f02383..4bdd4f1 100644 --- a/lib/Target/ARM/ARMGlobalMerge.cpp +++ b/lib/Target/ARM/ARMGlobalMerge.cpp @@ -175,7 +175,9 @@ bool ARMGlobalMerge::doInitialization(Module &M) { continue; // Ignore fancy-aligned globals for now. - if (I->getAlignment() != 0) + unsigned Alignment = I->getAlignment(); + unsigned AllocSize = TD->getTypeAllocSize(I->getType()->getElementType()); + if (Alignment > AllocSize) continue; // Ignore all 'special' globals. @@ -183,7 +185,7 @@ bool ARMGlobalMerge::doInitialization(Module &M) { I->getName().startswith(".llvm.")) continue; - if (TD->getTypeAllocSize(I->getType()->getElementType()) < MaxOffset) { + if (AllocSize < MaxOffset) { const TargetLoweringObjectFile &TLOF = TLI->getObjFileLowering(); if (TLOF.getKindForGlobal(I, TLI->getTargetMachine()).isBSSLocal()) BSSGlobals.push_back(I); diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp index 517bba8..787f6a2 100644 --- a/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -19,11 +19,11 @@ using namespace llvm; static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI, const TargetRegisterInfo &TRI) { // FIXME: Detect integer instructions properly. - const TargetInstrDesc &TID = MI->getDesc(); - unsigned Domain = TID.TSFlags & ARMII::DomainMask; - if (TID.mayStore()) + const MCInstrDesc &MCID = MI->getDesc(); + unsigned Domain = MCID.TSFlags & ARMII::DomainMask; + if (MCID.mayStore()) return false; - unsigned Opcode = TID.getOpcode(); + unsigned Opcode = MCID.getOpcode(); if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) return false; if ((Domain & ARMII::DomainVFP) || (Domain & ARMII::DomainNEON)) @@ -43,15 +43,15 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { // Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following // a VMLA / VMLS will cause 4 cycle stall. - const TargetInstrDesc &TID = MI->getDesc(); - if (LastMI && (TID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) { + const MCInstrDesc &MCID = MI->getDesc(); + if (LastMI && (MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) { MachineInstr *DefMI = LastMI; - const TargetInstrDesc &LastTID = LastMI->getDesc(); + const MCInstrDesc &LastMCID = LastMI->getDesc(); // Skip over one non-VFP / NEON instruction. - if (!LastTID.isBarrier() && + if (!LastMCID.isBarrier() && // On A9, AGU and NEON/FPU are muxed. - !(STI.isCortexA9() && (LastTID.mayLoad() || LastTID.mayStore())) && - (LastTID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) { + !(STI.isCortexA9() && (LastMCID.mayLoad() || LastMCID.mayStore())) && + (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) { MachineBasicBlock::iterator I = LastMI; if (I != LastMI->getParent()->begin()) { I = llvm::prior(I); diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 6f57a04..2c9481b 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -329,10 +329,10 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (Use->getOpcode() == ISD::CopyToReg) return true; if (Use->isMachineOpcode()) { - const TargetInstrDesc &TID = TII->get(Use->getMachineOpcode()); - if (TID.mayStore()) + const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); + if (MCID.mayStore()) return true; - unsigned Opcode = TID.getOpcode(); + unsigned Opcode = MCID.getOpcode(); if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) return true; // vmlx feeding into another vmlx. We actually want to unfold diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 4ae4af1..fb738cd 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -506,6 +506,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setTargetDAGCombine(ISD::VECTOR_SHUFFLE); setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); setTargetDAGCombine(ISD::STORE); + setTargetDAGCombine(ISD::FP_TO_SINT); + setTargetDAGCombine(ISD::FP_TO_UINT); + setTargetDAGCombine(ISD::FDIV); } computeRegisterProperties(); @@ -974,12 +977,12 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { // Load are scheduled for latency even if there instruction itinerary // is not available. const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); + const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); - if (TID.getNumDefs() == 0) + if (MCID.getNumDefs() == 0) return Sched::RegPressure; if (!Itins->isEmpty() && - Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) + Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) return Sched::Latency; return Sched::RegPressure; @@ -5523,7 +5526,7 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, return SDValue(); } -// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction +// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction // (only after legalization). static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, @@ -5554,25 +5557,25 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, SDNode *V = Vec.getNode(); unsigned nextIndex = 0; - // For each operands to the ADD which are BUILD_VECTORs, + // For each operands to the ADD which are BUILD_VECTORs, // check to see if each of their operands are an EXTRACT_VECTOR with // the same vector and appropriate index. for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { - + SDValue ExtVec0 = N0->getOperand(i); SDValue ExtVec1 = N1->getOperand(i); - + // First operand is the vector, verify its the same. if (V != ExtVec0->getOperand(0).getNode() || V != ExtVec1->getOperand(0).getNode()) return SDValue(); - + // Second is the constant, verify its correct. ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); - + // For the constant, we want to see all the even or all the odd. if (!C0 || !C1 || C0->getZExtValue() != nextIndex || C1->getZExtValue() != nextIndex+1) @@ -5580,7 +5583,7 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, // Increment index. nextIndex+=2; - } else + } else return SDValue(); } @@ -5595,7 +5598,7 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, // Input is the vector. Ops.push_back(Vec); - + // Get widened type and narrowed type. MVT widenType; unsigned numElem = VT.getVectorNumElements(); @@ -5624,7 +5627,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); if (Result.getNode()) return Result; - + // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { SDValue Result = combineSelectAndUse(N, N0, N1, DCI); @@ -6479,7 +6482,105 @@ static SDValue PerformVDUPLANECombine(SDNode *N, return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); } -/// getVShiftImm - Check if this is a valid build_vector for the immediate +// isConstVecPow2 - Return true if each vector element is a power of 2, all +// elements are the same constant, C, and Log2(C) ranges from 1 to 32. +static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) +{ + integerPart cN; + integerPart c0 = 0; + for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); + I != E; I++) { + ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); + if (!C) + return false; + + bool isExact; + APFloat APF = C->getValueAPF(); + if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) + != APFloat::opOK || !isExact) + return false; + + c0 = (I == 0) ? cN : c0; + if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) + return false; + } + C = c0; + return true; +} + +/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) +/// can replace combinations of VMUL and VCVT (floating-point to integer) +/// when the VMUL has a constant operand that is a power of 2. +/// +/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): +/// vmul.f32 d16, d17, d16 +/// vcvt.s32.f32 d16, d16 +/// becomes: +/// vcvt.s32.f32 d16, d16, #3 +static SDValue PerformVCVTCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const ARMSubtarget *Subtarget) { + SelectionDAG &DAG = DCI.DAG; + SDValue Op = N->getOperand(0); + + if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || + Op.getOpcode() != ISD::FMUL) + return SDValue(); + + uint64_t C; + SDValue N0 = Op->getOperand(0); + SDValue ConstVec = Op->getOperand(1); + bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; + + if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || + !isConstVecPow2(ConstVec, isSigned, C)) + return SDValue(); + + unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : + Intrinsic::arm_neon_vcvtfp2fxu; + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), + N->getValueType(0), + DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, + DAG.getConstant(Log2_64(C), MVT::i32)); +} + +/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) +/// can replace combinations of VCVT (integer to floating-point) and VDIV +/// when the VDIV has a constant operand that is a power of 2. +/// +/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): +/// vcvt.f32.s32 d16, d16 +/// vdiv.f32 d16, d17, d16 +/// becomes: +/// vcvt.f32.s32 d16, d16, #3 +static SDValue PerformVDIVCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const ARMSubtarget *Subtarget) { + SelectionDAG &DAG = DCI.DAG; + SDValue Op = N->getOperand(0); + unsigned OpOpcode = Op.getNode()->getOpcode(); + + if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || + (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) + return SDValue(); + + uint64_t C; + SDValue ConstVec = N->getOperand(1); + bool isSigned = OpOpcode == ISD::SINT_TO_FP; + + if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || + !isConstVecPow2(ConstVec, isSigned, C)) + return SDValue(); + + unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : + Intrinsic::arm_neon_vcvtfxu2fp; + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), + Op.getValueType(), + DAG.getConstant(IntrinsicOpcode, MVT::i32), + Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); +} + +/// Getvshiftimm - Check if this is a valid build_vector for the immediate /// operand of a vector shift operation, where all the elements of the /// build_vector must have the same constant integer value. static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { @@ -6868,6 +6969,9 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); + case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); case ISD::SHL: case ISD::SRA: @@ -7378,6 +7482,10 @@ ARMTargetLowering::getConstraintType(const std::string &Constraint) const { default: break; case 'l': return C_RegisterClass; case 'w': return C_RegisterClass; + case 'h': return C_RegisterClass; + case 'x': return C_RegisterClass; + case 't': return C_RegisterClass; + case 'j': return C_Other; // Constant for movw. } } else if (Constraint.size() == 2) { switch (Constraint[0]) { @@ -7423,26 +7531,43 @@ ARMTargetLowering::getSingleConstraintMatchWeight( return weight; } -std::pair<unsigned, const TargetRegisterClass*> +typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; +RCPair ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { if (Constraint.size() == 1) { // GCC ARM Constraint Letters switch (Constraint[0]) { - case 'l': + case 'l': // Low regs or general regs. if (Subtarget->isThumb()) - return std::make_pair(0U, ARM::tGPRRegisterClass); + return RCPair(0U, ARM::tGPRRegisterClass); else - return std::make_pair(0U, ARM::GPRRegisterClass); + return RCPair(0U, ARM::GPRRegisterClass); + case 'h': // High regs or no regs. + if (Subtarget->isThumb()) + return RCPair(0U, ARM::hGPRRegisterClass); + break; case 'r': - return std::make_pair(0U, ARM::GPRRegisterClass); + return RCPair(0U, ARM::GPRRegisterClass); case 'w': if (VT == MVT::f32) - return std::make_pair(0U, ARM::SPRRegisterClass); + return RCPair(0U, ARM::SPRRegisterClass); if (VT.getSizeInBits() == 64) - return std::make_pair(0U, ARM::DPRRegisterClass); + return RCPair(0U, ARM::DPRRegisterClass); if (VT.getSizeInBits() == 128) - return std::make_pair(0U, ARM::QPRRegisterClass); + return RCPair(0U, ARM::QPRRegisterClass); + break; + case 'x': + if (VT == MVT::f32) + return RCPair(0U, ARM::SPR_8RegisterClass); + if (VT.getSizeInBits() == 64) + return RCPair(0U, ARM::DPR_8RegisterClass); + if (VT.getSizeInBits() == 128) + return RCPair(0U, ARM::QPR_8RegisterClass); + break; + case 't': + if (VT == MVT::f32) + return RCPair(0U, ARM::SPRRegisterClass); break; } } @@ -7452,47 +7577,6 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } -std::vector<unsigned> ARMTargetLowering:: -getRegClassForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { - if (Constraint.size() != 1) - return std::vector<unsigned>(); - - switch (Constraint[0]) { // GCC ARM Constraint Letters - default: break; - case 'l': - return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, - ARM::R4, ARM::R5, ARM::R6, ARM::R7, - 0); - case 'r': - return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, - ARM::R4, ARM::R5, ARM::R6, ARM::R7, - ARM::R8, ARM::R9, ARM::R10, ARM::R11, - ARM::R12, ARM::LR, 0); - case 'w': - if (VT == MVT::f32) - return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, - ARM::S4, ARM::S5, ARM::S6, ARM::S7, - ARM::S8, ARM::S9, ARM::S10, ARM::S11, - ARM::S12,ARM::S13,ARM::S14,ARM::S15, - ARM::S16,ARM::S17,ARM::S18,ARM::S19, - ARM::S20,ARM::S21,ARM::S22,ARM::S23, - ARM::S24,ARM::S25,ARM::S26,ARM::S27, - ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); - if (VT.getSizeInBits() == 64) - return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, - ARM::D4, ARM::D5, ARM::D6, ARM::D7, - ARM::D8, ARM::D9, ARM::D10,ARM::D11, - ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); - if (VT.getSizeInBits() == 128) - return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, - ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); - break; - } - - return std::vector<unsigned>(); -} - /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, @@ -7507,6 +7591,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter = Constraint[0]; switch (ConstraintLetter) { default: break; + case 'j': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); @@ -7521,6 +7606,13 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, return; switch (ConstraintLetter) { + case 'j': + // Constant suitable for movw, must be between 0 and + // 65535. + if (Subtarget->hasV6T2Ops()) + if (CVal >= 0 && CVal <= 65535) + break; + return; case 'I': if (Subtarget->isThumb1Only()) { // This must be a constant between 0 and 255, for ADD diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 21a9a3a..dd9df0e 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -306,9 +306,6 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; - std::vector<unsigned> - getRegClassForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp index 6f48d96..adcbf18 100644 --- a/lib/Target/ARM/ARMInstrInfo.cpp +++ b/lib/Target/ARM/ARMInstrInfo.cpp @@ -14,7 +14,6 @@ #include "ARMInstrInfo.h" #include "ARM.h" #include "ARMAddressingModes.h" -#include "ARMGenInstrInfo.inc" #include "ARMMachineFunctionInfo.h" #include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/LiveVariables.h" diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 5c013de..cdb1fe0 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -676,7 +676,7 @@ include "ARMInstrFormats.td" /// binop that produces a value. multiclass AsI1_bin_irs<bits<4> opcod, string opc, InstrItinClass iii, InstrItinClass iir, InstrItinClass iis, - PatFrag opnode, bit Commutable = 0> { + PatFrag opnode, string baseOpc, bit Commutable = 0> { // The register-immediate version is re-materializable. This is useful // in particular for taking the address of a local. let isReMaterializable = 1 in { @@ -716,6 +716,24 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc, let Inst{15-12} = Rd; let Inst{11-0} = shift; } + + // Assembly aliases for optional destination operand when it's the same + // as the source operand. + def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"), + (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn, + so_imm:$imm, pred:$p, + cc_out:$s)>, + Requires<[IsARM]>; + def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"), + (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn, + GPR:$Rm, pred:$p, + cc_out:$s)>, + Requires<[IsARM]>; + def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"), + (!cast<Instruction>(!strconcat(baseOpc, "rs")) GPR:$Rdn, GPR:$Rdn, + so_reg:$shift, pred:$p, + cc_out:$s)>, + Requires<[IsARM]>; } /// AI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the @@ -1988,6 +2006,8 @@ defm STM : arm_ldst_mult<"stm", 0, LdStMulFrm, IIC_iStore_m, IIC_iStore_mu>; } // neverHasSideEffects // Load / Store Multiple Mnemonic Aliases +def : MnemonicAlias<"ldmfd", "ldmia">; +def : MnemonicAlias<"stmfd", "stmdb">; def : MnemonicAlias<"ldm", "ldmia">; def : MnemonicAlias<"stm", "stmia">; @@ -2205,10 +2225,10 @@ def UBFX : I<(outs GPR:$Rd), defm ADD : AsI1_bin_irs<0b0100, "add", IIC_iALUi, IIC_iALUr, IIC_iALUsr, - BinOpFrag<(add node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(add node:$LHS, node:$RHS)>, "ADD", 1>; defm SUB : AsI1_bin_irs<0b0010, "sub", IIC_iALUi, IIC_iALUr, IIC_iALUsr, - BinOpFrag<(sub node:$LHS, node:$RHS)>>; + BinOpFrag<(sub node:$LHS, node:$RHS)>, "SUB">; // ADD and SUB with 's' bit set. defm ADDS : AI1_bin_s_irs<0b0100, "adds", @@ -2531,16 +2551,16 @@ def : ARMV6Pat<(int_arm_usat GPR:$a, imm:$pos), (USAT imm:$pos, GPR:$a, 0)>; defm AND : AsI1_bin_irs<0b0000, "and", IIC_iBITi, IIC_iBITr, IIC_iBITsr, - BinOpFrag<(and node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(and node:$LHS, node:$RHS)>, "AND", 1>; defm ORR : AsI1_bin_irs<0b1100, "orr", IIC_iBITi, IIC_iBITr, IIC_iBITsr, - BinOpFrag<(or node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(or node:$LHS, node:$RHS)>, "ORR", 1>; defm EOR : AsI1_bin_irs<0b0001, "eor", IIC_iBITi, IIC_iBITr, IIC_iBITsr, - BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(xor node:$LHS, node:$RHS)>, "EOR", 1>; defm BIC : AsI1_bin_irs<0b1110, "bic", IIC_iBITi, IIC_iBITr, IIC_iBITsr, - BinOpFrag<(and node:$LHS, (not node:$RHS))>>; + BinOpFrag<(and node:$LHS, (not node:$RHS))>, "BIC">; def BFC : I<(outs GPR:$Rd), (ins GPR:$src, bf_inv_mask_imm:$imm), AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi, diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 44fbc02..0b14976 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -34,9 +34,10 @@ def imm0_7_neg : PatLeaf<(i32 imm), [{ return (uint32_t)-N->getZExtValue() < 8; }], imm_neg_XFORM>; -def imm0_255 : ImmLeaf<i32, [{ - return Imm >= 0 && Imm < 256; -}]>; +def imm0_255_asmoperand : AsmOperandClass { let Name = "Imm0_255"; } +def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> { + let ParserMatchClass = imm0_255_asmoperand; +} def imm0_255_comp : PatLeaf<(i32 imm), [{ return ~((uint32_t)N->getZExtValue()) < 256; }]>; @@ -407,15 +408,8 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { // FIXME: remove when we have a way to marking a MI with these properties. let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1, hasExtraDefRegAllocReq = 1 in -def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops), - IIC_iPop_Br, - "pop${p}\t$regs", []>, - T1Misc<{1,1,0,?,?,?,?}> { - // A8.6.121 - bits<16> regs; - let Inst{8} = regs{15}; // registers = P:'0000000':register_list - let Inst{7-0} = regs{7-0}; -} +def tPOP_RET : tPseudoInst<(outs), (ins pred:$p, reglist:$regs, variable_ops), + Size2Bytes, IIC_iPop_Br, []>; // All calls clobber the non-callee saved registers. SP is marked as a use to // prevent stack-pointer assignments that appear immediately before calls from @@ -685,19 +679,6 @@ def tLDRspi : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_sp:$addr), IIC_iLoad_i, let Inst{7-0} = addr; } -// Special instruction for restore. It cannot clobber condition register -// when it's expanded by eliminateCallFramePseudoInstr(). -let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1 in -// FIXME: Pseudo for tLDRspi -def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoad_i, - "ldr", "\t$dst, $addr", []>, - T1LdStSP<{1,?,?}> { - bits<3> Rt; - bits<8> addr; - let Inst{10-8} = Rt; - let Inst{7-0} = addr; -} - // Load tconstpool // FIXME: Use ldr.n to work around a Darwin assembler bug. let canFoldAsLoad = 1, isReMaterializable = 1 in @@ -754,19 +735,6 @@ def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i, let Inst{7-0} = addr; } -let mayStore = 1, neverHasSideEffects = 1 in -// Special instruction for spill. It cannot clobber condition register when it's -// expanded by eliminateCallFramePseudoInstr(). -// FIXME: Pseudo for tSTRspi -def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStore_i, - "str", "\t$src, $addr", []>, - T1LdStSP<{0,?,?}> { - bits<3> Rt; - bits<8> addr; - let Inst{10-8} = Rt; - let Inst{7-0} = addr; -} - //===----------------------------------------------------------------------===// // Load / store multiple Instructions. // @@ -1072,7 +1040,7 @@ def tLSRrr : // A8.6.91 // Move register let isMoveImm = 1 in -def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins i32imm:$imm8), IIC_iMOVi, +def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins imm0_255:$imm8), IIC_iMOVi, "mov", "\t$Rd, $imm8", [(set tGPR:$Rd, imm0_255:$imm8)]>, T1General<{1,0,0,?,?}> { @@ -1086,15 +1054,15 @@ def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins i32imm:$imm8), IIC_iMOVi, // TODO: A7-73: MOV(2) - mov setting flag. let neverHasSideEffects = 1 in { -// FIXME: Make this predicable. -def tMOVr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr, - "mov\t$Rd, $Rm", []>, - T1Special<0b1000> { +def tMOVr : Thumb1pI<(outs GPR:$Rd), (ins GPR:$Rm), AddrModeNone, + Size2Bytes, IIC_iMOVr, + "mov", "\t$Rd, $Rm", "", []>, + T1Special<{1,0,?,?}> { // A8.6.97 bits<4> Rd; bits<4> Rm; - // Bits {7-6} are encoded by the T1Special value. - let Inst{5-3} = Rm{2-0}; + let Inst{7} = Rd{3}; + let Inst{6-3} = Rm; let Inst{2-0} = Rd{2-0}; } let Defs = [CPSR] in @@ -1107,39 +1075,6 @@ def tMOVSr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr, let Inst{5-3} = Rm; let Inst{2-0} = Rd; } - -// FIXME: Make these predicable. -def tMOVgpr2tgpr : T1I<(outs tGPR:$Rd), (ins GPR:$Rm), IIC_iMOVr, - "mov\t$Rd, $Rm", []>, - T1Special<{1,0,0,?}> { - // A8.6.97 - bits<4> Rd; - bits<4> Rm; - // Bit {7} is encoded by the T1Special value. - let Inst{6-3} = Rm; - let Inst{2-0} = Rd{2-0}; -} -def tMOVtgpr2gpr : T1I<(outs GPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr, - "mov\t$Rd, $Rm", []>, - T1Special<{1,0,?,0}> { - // A8.6.97 - bits<4> Rd; - bits<4> Rm; - // Bit {6} is encoded by the T1Special value. - let Inst{7} = Rd{3}; - let Inst{5-3} = Rm{2-0}; - let Inst{2-0} = Rd{2-0}; -} -def tMOVgpr2gpr : T1I<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVr, - "mov\t$Rd, $Rm", []>, - T1Special<{1,0,?,?}> { - // A8.6.97 - bits<4> Rd; - bits<4> Rm; - let Inst{7} = Rd{3}; - let Inst{6-3} = Rm; - let Inst{2-0} = Rd{2-0}; -} } // neverHasSideEffects // Multiply register @@ -1424,13 +1359,11 @@ def tCDP : T1Cop<(outs), (ins p_imm:$cop, i32imm:$opc1, // // __aeabi_read_tp preserves the registers r1-r3. -let isCall = 1, Defs = [R0, LR], Uses = [SP] in -def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br, - "bl\t__aeabi_read_tp", - [(set R0, ARMthread_pointer)]> { - // Encoding is 0xf7fffffe. - let Inst = 0xf7fffffe; -} +// This is a pseudo inst so that we can get the encoding right, +// complete with fixup for the aeabi_read_tp function. +let isCall = 1, Defs = [R0, R12, LR, CPSR], Uses = [SP] in +def tTPsoft : tPseudoInst<(outs), (ins), Size4Bytes, IIC_Br, + [(set R0, ARMthread_pointer)]>; //===----------------------------------------------------------------------===// // SJLJ Exception handling intrinsics diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index 090670b..d49b282 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -44,9 +44,11 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{ // t2_so_imm - Match a 32-bit immediate operand, which is an // 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit // immediate splatted into multiple bytes of the word. +def t2_so_imm_asmoperand : AsmOperandClass { let Name = "T2SOImm"; } def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{ return ARM_AM::getT2SOImmVal(Imm) != -1; }]> { + let ParserMatchClass = t2_so_imm_asmoperand; let EncoderMethod = "getT2SOImmOpValue"; } @@ -463,7 +465,8 @@ multiclass T2I_un_irs<bits<4> opcod, string opc, /// changed to modify CPSR. multiclass T2I_bin_irs<bits<4> opcod, string opc, InstrItinClass iii, InstrItinClass iir, InstrItinClass iis, - PatFrag opnode, bit Commutable = 0, string wide = ""> { + PatFrag opnode, string baseOpc, bit Commutable = 0, + string wide = ""> { // shifted imm def ri : T2sTwoRegImm< (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), iii, @@ -495,14 +498,31 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc, let Inst{26-25} = 0b01; let Inst{24-21} = opcod; } + // Assembly aliases for optional destination operand when it's the same + // as the source operand. + def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"), + (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn, + t2_so_imm:$imm, pred:$p, + cc_out:$s)>, + Requires<[IsThumb2]>; + def : InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $Rm"), + (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn, + rGPR:$Rm, pred:$p, + cc_out:$s)>, + Requires<[IsThumb2]>; + def : InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $shift"), + (!cast<Instruction>(!strconcat(baseOpc, "rs")) rGPR:$Rdn, rGPR:$Rdn, + t2_so_reg:$shift, pred:$p, + cc_out:$s)>, + Requires<[IsThumb2]>; } /// T2I_bin_w_irs - Same as T2I_bin_irs except these operations need -// the ".w" prefix to indicate that they are wide. +// the ".w" suffix to indicate that they are wide. multiclass T2I_bin_w_irs<bits<4> opcod, string opc, InstrItinClass iii, InstrItinClass iir, InstrItinClass iis, - PatFrag opnode, bit Commutable = 0> : - T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, Commutable, ".w">; + PatFrag opnode, string baseOpc, bit Commutable = 0> : + T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, baseOpc, Commutable, ".w">; /// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are /// reversed. The 'rr' form is only defined for the disassembler; for codegen @@ -1149,63 +1169,6 @@ def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd), []>; -// FIXME: None of these add/sub SP special instructions should be necessary -// at all for thumb2 since they use the same encodings as the generic -// add/sub instructions. In thumb1 we need them since they have dedicated -// encodings. At the least, they should be pseudo instructions. -// ADD r, sp, {so_imm|i12} -let isCodeGenOnly = 1 in { -def t2ADDrSPi : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm), - IIC_iALUi, "add", ".w\t$Rd, $Rn, $imm", []> { - let Inst{31-27} = 0b11110; - let Inst{25} = 0; - let Inst{24-21} = 0b1000; - let Inst{15} = 0; -} -def t2ADDrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm), - IIC_iALUi, "addw", "\t$Rd, $Rn, $imm", []> { - let Inst{31-27} = 0b11110; - let Inst{25-20} = 0b100000; - let Inst{15} = 0; -} - -// ADD r, sp, so_reg -def t2ADDrSPs : T2sTwoRegShiftedReg< - (outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm), - IIC_iALUsi, "add", ".w\t$Rd, $Rn, $ShiftedRm", []> { - let Inst{31-27} = 0b11101; - let Inst{26-25} = 0b01; - let Inst{24-21} = 0b1000; - let Inst{15} = 0; -} - -// SUB r, sp, {so_imm|i12} -def t2SUBrSPi : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm), - IIC_iALUi, "sub", ".w\t$Rd, $Rn, $imm", []> { - let Inst{31-27} = 0b11110; - let Inst{25} = 0; - let Inst{24-21} = 0b1101; - let Inst{15} = 0; -} -def t2SUBrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm), - IIC_iALUi, "subw", "\t$Rd, $Rn, $imm", []> { - let Inst{31-27} = 0b11110; - let Inst{25-20} = 0b101010; - let Inst{15} = 0; -} - -// SUB r, sp, so_reg -def t2SUBrSPs : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$imm), - IIC_iALUsi, - "sub", "\t$Rd, $Rn, $imm", []> { - let Inst{31-27} = 0b11101; - let Inst{26-25} = 0b01; - let Inst{24-21} = 0b1101; - let Inst{19-16} = 0b1101; // Rn = sp - let Inst{15} = 0; -} -} // end isCodeGenOnly = 1 - //===----------------------------------------------------------------------===// // Load / store Instructions. // @@ -1645,6 +1608,10 @@ def t2MOVi : T2sOneRegImm<(outs rGPR:$Rd), (ins t2_so_imm:$imm), IIC_iMOVi, let Inst{15} = 0; } +def : InstAlias<"mov${s}${p} $Rd, $imm", (t2MOVi rGPR:$Rd, t2_so_imm:$imm, + pred:$p, cc_out:$s)>, + Requires<[IsThumb2]>; + let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins i32imm_hilo16:$imm), IIC_iMOVi, "movw", "\t$Rd, $imm", @@ -2063,17 +2030,18 @@ def t2MOVsra_flag : T2TwoRegShiftImm< defm t2AND : T2I_bin_w_irs<0b0000, "and", IIC_iBITi, IIC_iBITr, IIC_iBITsi, - BinOpFrag<(and node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(and node:$LHS, node:$RHS)>, "t2AND", 1>; defm t2ORR : T2I_bin_w_irs<0b0010, "orr", IIC_iBITi, IIC_iBITr, IIC_iBITsi, - BinOpFrag<(or node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(or node:$LHS, node:$RHS)>, "t2ORR", 1>; defm t2EOR : T2I_bin_w_irs<0b0100, "eor", IIC_iBITi, IIC_iBITr, IIC_iBITsi, - BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>; + BinOpFrag<(xor node:$LHS, node:$RHS)>, "t2EOR", 1>; defm t2BIC : T2I_bin_w_irs<0b0001, "bic", IIC_iBITi, IIC_iBITr, IIC_iBITsi, - BinOpFrag<(and node:$LHS, (not node:$RHS))>>; + BinOpFrag<(and node:$LHS, (not node:$RHS))>, + "t2BIC">; class T2BitFI<dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> @@ -2173,7 +2141,8 @@ let Constraints = "$src = $Rd" in { defm t2ORN : T2I_bin_irs<0b0011, "orn", IIC_iBITi, IIC_iBITr, IIC_iBITsi, - BinOpFrag<(or node:$LHS, (not node:$RHS))>, 0, "">; + BinOpFrag<(or node:$LHS, (not node:$RHS))>, + "t2ORN", 0, "">; // Prefer over of t2EORri ra, rb, -1 because mvn has 16-bit version let AddedComplexity = 1 in @@ -2709,6 +2678,8 @@ def t2MOVCCr : T2TwoReg< let Inst{7-4} = 0b0000; } +// FIXME: Pseudo-ize these. For now, just mark codegen only. +let isCodeGenOnly = 1 in { let isMoveImm = 1 in def t2MOVCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm), IIC_iCMOVi, "mov", ".w\t$Rd, $imm", @@ -2789,6 +2760,7 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd), IIC_iCMOVsi, "ror", ".w\t$Rd, $Rm, $imm", []>, RegConstraint<"$false = $Rd">; } // neverHasSideEffects +} // isCodeGenOnly = 1 //===----------------------------------------------------------------------===// // Atomic operations intrinsics @@ -2937,22 +2909,6 @@ def t2CLREX : T2XI<(outs), (ins), NoItinerary, "clrex", } //===----------------------------------------------------------------------===// -// TLS Instructions -// - -// __aeabi_read_tp preserves the registers r1-r3. -let isCall = 1, - Defs = [R0, R12, LR, CPSR], Uses = [SP] in { - def t2TPsoft : T2XI<(outs), (ins), IIC_Br, - "bl\t__aeabi_read_tp", - [(set R0, ARMthread_pointer)]> { - let Inst{31-27} = 0b11110; - let Inst{15-14} = 0b11; - let Inst{12} = 1; - } -} - -//===----------------------------------------------------------------------===// // SJLJ Exception handling intrinsics // eh_sjlj_setjmp() is an instruction sequence to store the return // address and save #0 in R0 for the non-longjmp case. @@ -2990,28 +2946,13 @@ let Defs = // // FIXME: remove when we have a way to marking a MI with these properties. -// FIXME: $dst1 should be a def. But the extra ops must be in the end of the -// operand list. // FIXME: Should pc be an implicit operand like PICADD, etc? let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1, hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in -def t2LDMIA_RET: T2XIt<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, - reglist:$regs, variable_ops), - IIC_iLoad_mBr, - "ldmia${p}.w\t$Rn!, $regs", - "$Rn = $wb", []> { - bits<4> Rn; - bits<16> regs; - - let Inst{31-27} = 0b11101; - let Inst{26-25} = 0b00; - let Inst{24-23} = 0b01; // Increment After - let Inst{22} = 0; - let Inst{21} = 1; // Writeback - let Inst{20} = 1; - let Inst{19-16} = Rn; - let Inst{15-0} = regs; -} +def t2LDMIA_RET: t2PseudoInst<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, + reglist:$regs, variable_ops), + Size4Bytes, IIC_iLoad_mBr, []>, + RegConstraint<"$Rn = $wb">; let isBranch = 1, isTerminator = 1, isBarrier = 1 in { let isPredicable = 1 in diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index b4c3239..d2aaa97 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -166,6 +166,15 @@ defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>; def : MnemonicAlias<"vldm", "vldmia">; def : MnemonicAlias<"vstm", "vstmia">; +def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>, + Requires<[HasVFP2]>; +def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>, + Requires<[HasVFP2]>; +def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>, + Requires<[HasVFP2]>; +def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>, + Requires<[HasVFP2]>; + // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index f4645f1..c6efea1 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -329,13 +329,9 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB, if (NewBase == 0) return false; } - int BaseOpc = !isThumb2 - ? ARM::ADDri - : ((Base == ARM::SP) ? ARM::t2ADDrSPi : ARM::t2ADDri); + int BaseOpc = !isThumb2 ? ARM::ADDri : ARM::t2ADDri; if (Offset < 0) { - BaseOpc = !isThumb2 - ? ARM::SUBri - : ((Base == ARM::SP) ? ARM::t2SUBrSPi : ARM::t2SUBri); + BaseOpc = !isThumb2 ? ARM::SUBri : ARM::t2SUBri; Offset = - Offset; } int ImmedOffset = isThumb2 @@ -516,8 +512,6 @@ static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base, if (!MI) return false; if (MI->getOpcode() != ARM::t2SUBri && - MI->getOpcode() != ARM::t2SUBrSPi && - MI->getOpcode() != ARM::t2SUBrSPi12 && MI->getOpcode() != ARM::tSUBspi && MI->getOpcode() != ARM::SUBri) return false; @@ -541,8 +535,6 @@ static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base, if (!MI) return false; if (MI->getOpcode() != ARM::t2ADDri && - MI->getOpcode() != ARM::t2ADDrSPi && - MI->getOpcode() != ARM::t2ADDrSPi12 && MI->getOpcode() != ARM::tADDspi && MI->getOpcode() != ARM::ADDri) return false; @@ -1461,19 +1453,19 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, while (++I != E) { if (I->isDebugValue() || MemOps.count(&*I)) continue; - const TargetInstrDesc &TID = I->getDesc(); - if (TID.isCall() || TID.isTerminator() || I->hasUnmodeledSideEffects()) + const MCInstrDesc &MCID = I->getDesc(); + if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects()) return false; - if (isLd && TID.mayStore()) + if (isLd && MCID.mayStore()) return false; if (!isLd) { - if (TID.mayLoad()) + if (MCID.mayLoad()) return false; // It's not safe to move the first 'str' down. // str r1, [r0] // strh r5, [r0] // str r4, [r0, #+4] - if (TID.mayStore()) + if (MCID.mayStore()) return false; } for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) { @@ -1672,14 +1664,14 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, Ops.pop_back(); Ops.pop_back(); - const TargetInstrDesc &TID = TII->get(NewOpc); - const TargetRegisterClass *TRC = TID.OpInfo[0].getRegClass(TRI); + const MCInstrDesc &MCID = TII->get(NewOpc); + const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI); MRI->constrainRegClass(EvenReg, TRC); MRI->constrainRegClass(OddReg, TRC); // Form the pair instruction. if (isLd) { - MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID) + MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID) .addReg(EvenReg, RegState::Define) .addReg(OddReg, RegState::Define) .addReg(BaseReg); @@ -1691,7 +1683,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, MIB.addImm(Offset).addImm(Pred).addReg(PredReg); ++NumLDRDFormed; } else { - MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID) + MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID) .addReg(EvenReg) .addReg(OddReg) .addReg(BaseReg); @@ -1742,8 +1734,8 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { while (MBBI != E) { for (; MBBI != E; ++MBBI) { MachineInstr *MI = MBBI; - const TargetInstrDesc &TID = MI->getDesc(); - if (TID.isCall() || TID.isTerminator()) { + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.isCall() || MCID.isTerminator()) { // Stop at barriers. ++MBBI; break; diff --git a/lib/Target/ARM/ARMMCCodeEmitter.cpp b/lib/Target/ARM/ARMMCCodeEmitter.cpp index c5f727d..4fcba11 100644 --- a/lib/Target/ARM/ARMMCCodeEmitter.cpp +++ b/lib/Target/ARM/ARMMCCodeEmitter.cpp @@ -1274,7 +1274,7 @@ void ARMMCCodeEmitter:: EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const { // Pseudo instructions don't get encoded. - const TargetInstrDesc &Desc = TII.get(MI.getOpcode()); + const MCInstrDesc &Desc = TII.get(MI.getOpcode()); uint64_t TSFlags = Desc.TSFlags; if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo) return; diff --git a/lib/Target/ARM/ARMMachObjectWriter.cpp b/lib/Target/ARM/ARMMachObjectWriter.cpp index 4c35d0b..a36e47d 100644 --- a/lib/Target/ARM/ARMMachObjectWriter.cpp +++ b/lib/Target/ARM/ARMMachObjectWriter.cpp @@ -8,19 +8,376 @@ //===----------------------------------------------------------------------===// #include "ARM.h" +#include "ARMFixupKinds.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCAsmLayout.h" #include "llvm/MC/MCMachObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Object/MachOFormat.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Target/TargetAsmBackend.h" using namespace llvm; +using namespace llvm::object; namespace { class ARMMachObjectWriter : public MCMachObjectTargetWriter { + void RecordARMScatteredRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + unsigned Log2Size, + uint64_t &FixedValue); + void RecordARMMovwMovtRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, MCValue Target, + uint64_t &FixedValue); + public: ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype) : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype, /*UseAggressiveSymbolFolding=*/true) {} + + void RecordRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, const MCAsmLayout &Layout, + const MCFragment *Fragment, const MCFixup &Fixup, + MCValue Target, uint64_t &FixedValue); }; } +static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType, + unsigned &Log2Size) { + RelocType = unsigned(macho::RIT_Vanilla); + Log2Size = ~0U; + + switch (Kind) { + default: + return false; + + case FK_Data_1: + Log2Size = llvm::Log2_32(1); + return true; + case FK_Data_2: + Log2Size = llvm::Log2_32(2); + return true; + case FK_Data_4: + Log2Size = llvm::Log2_32(4); + return true; + case FK_Data_8: + Log2Size = llvm::Log2_32(8); + return true; + + // Handle 24-bit branch kinds. + case ARM::fixup_arm_ldst_pcrel_12: + case ARM::fixup_arm_pcrel_10: + case ARM::fixup_arm_adr_pcrel_12: + case ARM::fixup_arm_condbranch: + case ARM::fixup_arm_uncondbranch: + RelocType = unsigned(macho::RIT_ARM_Branch24Bit); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + + // Handle Thumb branches. + case ARM::fixup_arm_thumb_br: + RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit); + Log2Size = llvm::Log2_32(2); + return true; + + case ARM::fixup_t2_uncondbranch: + case ARM::fixup_arm_thumb_bl: + case ARM::fixup_arm_thumb_blx: + RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit); + Log2Size = llvm::Log2_32(4); + return true; + + case ARM::fixup_arm_movt_hi16: + case ARM::fixup_arm_movt_hi16_pcrel: + case ARM::fixup_t2_movt_hi16: + case ARM::fixup_t2_movt_hi16_pcrel: + RelocType = unsigned(macho::RIT_ARM_HalfDifference); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + + case ARM::fixup_arm_movw_lo16: + case ARM::fixup_arm_movw_lo16_pcrel: + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movw_lo16_pcrel: + RelocType = unsigned(macho::RIT_ARM_Half); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + } +} + +void ARMMachObjectWriter:: +RecordARMMovwMovtRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + uint64_t &FixedValue) { + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Type = macho::RIT_ARM_Half; + + // See <reloc.h>. + const MCSymbol *A = &Target.getSymA()->getSymbol(); + MCSymbolData *A_SD = &Asm.getSymbolData(*A); + + if (!A_SD->getFragment()) + report_fatal_error("symbol '" + A->getName() + + "' can not be undefined in a subtraction expression"); + + uint32_t Value = Writer->getSymbolAddress(A_SD, Layout); + uint32_t Value2 = 0; + uint64_t SecAddr = + Writer->getSectionAddress(A_SD->getFragment()->getParent()); + FixedValue += SecAddr; + + if (const MCSymbolRefExpr *B = Target.getSymB()) { + MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol()); + + if (!B_SD->getFragment()) + report_fatal_error("symbol '" + B->getSymbol().getName() + + "' can not be undefined in a subtraction expression"); + + // Select the appropriate difference relocation type. + Type = macho::RIT_ARM_HalfDifference; + Value2 = Writer->getSymbolAddress(B_SD, Layout); + FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent()); + } + + // Relocations are written out in reverse order, so the PAIR comes first. + // ARM_RELOC_HALF and ARM_RELOC_HALF_SECTDIFF abuse the r_length field: + // + // For these two r_type relocations they always have a pair following them and + // the r_length bits are used differently. The encoding of the r_length is as + // follows: + // low bit of r_length: + // 0 - :lower16: for movw instructions + // 1 - :upper16: for movt instructions + // high bit of r_length: + // 0 - arm instructions + // 1 - thumb instructions + // the other half of the relocated expression is in the following pair + // relocation entry in the the low 16 bits of r_address field. + unsigned ThumbBit = 0; + unsigned MovtBit = 0; + switch ((unsigned)Fixup.getKind()) { + default: break; + case ARM::fixup_arm_movt_hi16: + case ARM::fixup_arm_movt_hi16_pcrel: + MovtBit = 1; + break; + case ARM::fixup_t2_movt_hi16: + case ARM::fixup_t2_movt_hi16_pcrel: + MovtBit = 1; + // Fallthrough + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movw_lo16_pcrel: + ThumbBit = 1; + break; + } + + + if (Type == macho::RIT_ARM_HalfDifference) { + uint32_t OtherHalf = MovtBit + ? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16); + + macho::RelocationEntry MRE; + MRE.Word0 = ((OtherHalf << 0) | + (macho::RIT_Pair << 24) | + (MovtBit << 28) | + (ThumbBit << 29) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value2; + Writer->addRelocation(Fragment->getParent(), MRE); + } + + macho::RelocationEntry MRE; + MRE.Word0 = ((FixupOffset << 0) | + (Type << 24) | + (MovtBit << 28) | + (ThumbBit << 29) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value; + Writer->addRelocation(Fragment->getParent(), MRE); +} + +void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + unsigned Log2Size, + uint64_t &FixedValue) { + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Type = macho::RIT_Vanilla; + + // See <reloc.h>. + const MCSymbol *A = &Target.getSymA()->getSymbol(); + MCSymbolData *A_SD = &Asm.getSymbolData(*A); + + if (!A_SD->getFragment()) + report_fatal_error("symbol '" + A->getName() + + "' can not be undefined in a subtraction expression"); + + uint32_t Value = Writer->getSymbolAddress(A_SD, Layout); + uint64_t SecAddr = Writer->getSectionAddress(A_SD->getFragment()->getParent()); + FixedValue += SecAddr; + uint32_t Value2 = 0; + + if (const MCSymbolRefExpr *B = Target.getSymB()) { + MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol()); + + if (!B_SD->getFragment()) + report_fatal_error("symbol '" + B->getSymbol().getName() + + "' can not be undefined in a subtraction expression"); + + // Select the appropriate difference relocation type. + Type = macho::RIT_Difference; + Value2 = Writer->getSymbolAddress(B_SD, Layout); + FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent()); + } + + // Relocations are written out in reverse order, so the PAIR comes first. + if (Type == macho::RIT_Difference || + Type == macho::RIT_Generic_LocalDifference) { + macho::RelocationEntry MRE; + MRE.Word0 = ((0 << 0) | + (macho::RIT_Pair << 24) | + (Log2Size << 28) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value2; + Writer->addRelocation(Fragment->getParent(), MRE); + } + + macho::RelocationEntry MRE; + MRE.Word0 = ((FixupOffset << 0) | + (Type << 24) | + (Log2Size << 28) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value; + Writer->addRelocation(Fragment->getParent(), MRE); +} + +void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + uint64_t &FixedValue) { + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Log2Size; + unsigned RelocType = macho::RIT_Vanilla; + if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size)) { + report_fatal_error("unknown ARM fixup kind!"); + return; + } + + // If this is a difference or a defined symbol plus an offset, then we need a + // scattered relocation entry. Differences always require scattered + // relocations. + if (Target.getSymB()) { + if (RelocType == macho::RIT_ARM_Half || + RelocType == macho::RIT_ARM_HalfDifference) + return RecordARMMovwMovtRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, FixedValue); + return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue); + } + + // Get the symbol data, if any. + MCSymbolData *SD = 0; + if (Target.getSymA()) + SD = &Asm.getSymbolData(Target.getSymA()->getSymbol()); + + // FIXME: For other platforms, we need to use scattered relocations for + // internal relocations with offsets. If this is an internal relocation with + // an offset, it also needs a scattered relocation entry. + // + // Is this right for ARM? + uint32_t Offset = Target.getConstant(); + if (IsPCRel && RelocType == macho::RIT_Vanilla) + Offset += 1 << Log2Size; + if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD)) + return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue); + + // See <reloc.h>. + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned Index = 0; + unsigned IsExtern = 0; + unsigned Type = 0; + + if (Target.isAbsolute()) { // constant + // FIXME! + report_fatal_error("FIXME: relocations to absolute targets " + "not yet implemented"); + } else { + // Resolve constant variables. + if (SD->getSymbol().isVariable()) { + int64_t Res; + if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute( + Res, Layout, Writer->getSectionAddressMap())) { + FixedValue = Res; + return; + } + } + + // Check whether we need an external or internal relocation. + if (Writer->doesSymbolRequireExternRelocation(SD)) { + IsExtern = 1; + Index = SD->getIndex(); + + // For external relocations, make sure to offset the fixup value to + // compensate for the addend of the symbol address, if it was + // undefined. This occurs with weak definitions, for example. + if (!SD->Symbol->isUndefined()) + FixedValue -= Layout.getSymbolOffset(SD); + } else { + // The index is the section ordinal (1-based). + const MCSectionData &SymSD = Asm.getSectionData( + SD->getSymbol().getSection()); + Index = SymSD.getOrdinal() + 1; + FixedValue += Writer->getSectionAddress(&SymSD); + } + if (IsPCRel) + FixedValue -= Writer->getSectionAddress(Fragment->getParent()); + + // The type is determined by the fixup kind. + Type = RelocType; + } + + // struct relocation_info (8 bytes) + macho::RelocationEntry MRE; + MRE.Word0 = FixupOffset; + MRE.Word1 = ((Index << 0) | + (IsPCRel << 24) | + (Log2Size << 25) | + (IsExtern << 27) | + (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); +} + MCObjectWriter *llvm::createARMMachObjectWriter(raw_ostream &OS, bool Is64Bit, uint32_t CPUType, diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td index 7741410..76eb496 100644 --- a/lib/Target/ARM/ARMRegisterInfo.td +++ b/lib/Target/ARM/ARMRegisterInfo.td @@ -228,6 +228,9 @@ def rGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, SP, PC)> { // the general GPR register class above (MOV, e.g.) def tGPR : RegisterClass<"ARM", [i32], 32, (trunc GPR, 8)>; +// The high registers in thumb mode, R8-R15. +def hGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, tGPR)>; + // For tail calls, we can't use callee-saved registers, as they are restored // to the saved value before the tail call, which would clobber a call address. // Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index c6f266b..694b313 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -15,7 +15,6 @@ #include "ARMGenSubtarget.inc" #include "ARMBaseRegisterInfo.h" #include "llvm/GlobalValue.h" -#include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" #include "llvm/ADT/SmallVector.h" using namespace llvm; @@ -31,8 +30,8 @@ static cl::opt<bool> StrictAlign("arm-strict-align", cl::Hidden, cl::desc("Disallow all unaligned memory accesses")); -ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS, - bool isT) +ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, bool isT) : ARMArchVersion(V4) , ARMProcFamily(Others) , ARMFPUType(None) @@ -57,21 +56,19 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS, , FPOnlySP(false) , AllowsUnalignedMem(false) , stackAlignment(4) - , CPUString("generic") + , CPUString(CPU) , TargetTriple(TT) , TargetABI(ARM_ABI_APCS) { - // Default to soft float ABI - if (FloatABIType == FloatABI::Default) - FloatABIType = FloatABI::Soft; - // Determine default and user specified characteristics // When no arch is specified either by CPU or by attributes, make the default // ARMv4T. const char *ARMArchFeature = ""; + if (CPUString.empty()) + CPUString = "generic"; if (CPUString == "generic" && (FS.empty() || FS == "generic")) { ARMArchVersion = V4T; - ARMArchFeature = ",+v4t"; + ARMArchFeature = "+v4t"; } // Set the boolean corresponding to the current target triple, or the default @@ -90,29 +87,29 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS, unsigned SubVer = TT[Idx]; if (SubVer >= '7' && SubVer <= '9') { ARMArchVersion = V7A; - ARMArchFeature = ",+v7a"; + ARMArchFeature = "+v7a"; if (Len >= Idx+2 && TT[Idx+1] == 'm') { ARMArchVersion = V7M; - ARMArchFeature = ",+v7m"; + ARMArchFeature = "+v7m"; } } else if (SubVer == '6') { ARMArchVersion = V6; - ARMArchFeature = ",+v6"; + ARMArchFeature = "+v6"; if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2') { ARMArchVersion = V6T2; - ARMArchFeature = ",+v6t2"; + ARMArchFeature = "+v6t2"; } } else if (SubVer == '5') { ARMArchVersion = V5T; - ARMArchFeature = ",+v5t"; + ARMArchFeature = "+v5t"; if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e') { ARMArchVersion = V5TE; - ARMArchFeature = ",+v5te"; + ARMArchFeature = "+v5te"; } } else if (SubVer == '4') { if (Len >= Idx+2 && TT[Idx+1] == 't') { ARMArchVersion = V4T; - ARMArchFeature = ",+v4t"; + ARMArchFeature = "+v4t"; } else { ARMArchVersion = V4; ARMArchFeature = ""; @@ -123,18 +120,15 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS, if (TT.find("eabi") != std::string::npos) TargetABI = ARM_ABI_AAPCS; - // Parse features string. If the first entry in FS (the CPU) is missing, - // insert the architecture feature derived from the target triple. This is - // important for setting features that are implied based on the architecture - // version. - std::string FSWithArch; - if (FS.empty()) - FSWithArch = std::string(ARMArchFeature); - else if (FS.find(',') == 0) - FSWithArch = std::string(ARMArchFeature) + FS; - else + // Insert the architecture feature derived from the target triple into the + // feature string. This is important for setting features that are implied + // based on the architecture version. + std::string FSWithArch = std::string(ARMArchFeature); + if (FSWithArch.empty()) FSWithArch = FS; - CPUString = ParseSubtargetFeatures(FSWithArch, CPUString); + else if (!FS.empty()) + FSWithArch = FSWithArch + "," + FS; + ParseSubtargetFeatures(FSWithArch, CPUString); // After parsing Itineraries, set ItinData.IssueWidth. computeIssueWidth(); diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index 0271c87..7c93173 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -14,9 +14,8 @@ #ifndef ARMSUBTARGET_H #define ARMSUBTARGET_H -#include "llvm/Target/TargetInstrItineraries.h" -#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSubtarget.h" +#include "llvm/MC/MCInstrItineraries.h" #include "llvm/ADT/Triple.h" #include <string> @@ -154,7 +153,8 @@ protected: /// This constructor initializes the data members to match that /// of the specified triple. /// - ARMSubtarget(const std::string &TT, const std::string &FS, bool isThumb); + ARMSubtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, bool isThumb); /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size /// that still makes it profitable to inline the call. @@ -165,8 +165,7 @@ protected: } /// ParseSubtargetFeatures - Parses features string setting specified /// subtarget options. Definition of function is auto generated by tblgen. - std::string ParseSubtargetFeatures(const std::string &FS, - const std::string &CPU); + void ParseSubtargetFeatures(const std::string &FS, const std::string &CPU); void computeIssueWidth(); diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 29aa4f7..80e7d55 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -78,18 +78,24 @@ extern "C" void LLVMInitializeARMTarget() { /// ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const std::string &TT, + const std::string &CPU, const std::string &FS, bool isThumb) : LLVMTargetMachine(T, TT), - Subtarget(TT, FS, isThumb), + Subtarget(TT, CPU, FS, isThumb), JITInfo(), InstrItins(Subtarget.getInstrItineraryData()) { DefRelocModel = getRelocationModel(); + + // Default to soft float ABI + if (FloatABIType == FloatABI::Default) + FloatABIType = FloatABI::Soft; } ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT, + const std::string &CPU, const std::string &FS) - : ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget), + : ARMBaseTargetMachine(T, TT, CPU, FS, false), InstrInfo(Subtarget), DataLayout(Subtarget.isAPCS_ABI() ? std::string("e-p:32:32-f64:32:64-i64:32:64-" "v128:32:128-v64:32:64-n32") : @@ -105,8 +111,9 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT, } ThumbTargetMachine::ThumbTargetMachine(const Target &T, const std::string &TT, + const std::string &CPU, const std::string &FS) - : ARMBaseTargetMachine(T, TT, FS, true), + : ARMBaseTargetMachine(T, TT, CPU, FS, true), InstrInfo(Subtarget.hasThumb2() ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget)) : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))), diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h index e0aa149..a4a7927 100644 --- a/lib/Target/ARM/ARMTargetMachine.h +++ b/lib/Target/ARM/ARMTargetMachine.h @@ -41,7 +41,8 @@ private: public: ARMBaseTargetMachine(const Target &T, const std::string &TT, - const std::string &FS, bool isThumb); + const std::string &CPU, const std::string &FS, + bool isThumb); virtual ARMJITInfo *getJITInfo() { return &JITInfo; } virtual const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; } @@ -70,7 +71,7 @@ class ARMTargetMachine : public ARMBaseTargetMachine { ARMFrameLowering FrameLowering; public: ARMTargetMachine(const Target &T, const std::string &TT, - const std::string &FS); + const std::string &CPU, const std::string &FS); virtual const ARMRegisterInfo *getRegisterInfo() const { return &InstrInfo.getRegisterInfo(); @@ -109,7 +110,7 @@ class ThumbTargetMachine : public ARMBaseTargetMachine { OwningPtr<ARMFrameLowering> FrameLowering; public: ThumbTargetMachine(const Target &T, const std::string &TT, - const std::string &FS); + const std::string &CPU, const std::string &FS); /// returns either Thumb1RegisterInfo or Thumb2RegisterInfo virtual const ARMBaseRegisterInfo *getRegisterInfo() const { diff --git a/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp b/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp index 2428ce1..d9a5fa2 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp @@ -87,8 +87,9 @@ public: : ARMBaseAsmLexer(T, MAI) { std::string tripleString("arm-unknown-unknown"); std::string featureString; + std::string CPU; OwningPtr<const TargetMachine> - targetMachine(T.createTargetMachine(tripleString, featureString)); + targetMachine(T.createTargetMachine(tripleString, CPU, featureString)); InitRegisterMap(targetMachine->getRegisterInfo()); } }; @@ -99,8 +100,9 @@ public: : ARMBaseAsmLexer(T, MAI) { std::string tripleString("thumb-unknown-unknown"); std::string featureString; + std::string CPU; OwningPtr<const TargetMachine> - targetMachine(T.createTargetMachine(tripleString, featureString)); + targetMachine(T.createTargetMachine(tripleString, CPU, featureString)); InitRegisterMap(targetMachine->getRegisterInfo()); } }; diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 4bc12c9..6952c38 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -350,6 +350,22 @@ public: bool isCondCode() const { return Kind == CondCode; } bool isCCOut() const { return Kind == CCOut; } bool isImm() const { return Kind == Immediate; } + bool isImm0_255() const { + if (Kind != Immediate) + return false; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return Value >= 0 && Value < 256; + } + bool isT2SOImm() const { + if (Kind != Immediate) + return false; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return ARM_AM::getT2SOImmVal(Value) != -1; + } bool isReg() const { return Kind == Register; } bool isRegList() const { return Kind == RegisterList; } bool isDPRRegList() const { return Kind == DPRRegisterList; } @@ -515,6 +531,16 @@ public: addExpr(Inst, getImm()); } + void addImm0_255Operands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + addExpr(Inst, getImm()); + } + + void addT2SOImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + addExpr(Inst, getImm()); + } + void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); @@ -1761,7 +1787,7 @@ static StringRef SplitMnemonic(StringRef Mnemonic, Mnemonic == "vcle" || (Mnemonic == "smlal" || Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" || Mnemonic == "vpadal" || - Mnemonic == "vqdmlal")) + Mnemonic == "vqdmlal" || Mnemonic == "bics")) return Mnemonic; // First, split out any predication code. @@ -1769,7 +1795,9 @@ static StringRef SplitMnemonic(StringRef Mnemonic, .Case("eq", ARMCC::EQ) .Case("ne", ARMCC::NE) .Case("hs", ARMCC::HS) + .Case("cs", ARMCC::HS) .Case("lo", ARMCC::LO) + .Case("cc", ARMCC::LO) .Case("mi", ARMCC::MI) .Case("pl", ARMCC::PL) .Case("vs", ARMCC::VS) @@ -1824,6 +1852,7 @@ static StringRef SplitMnemonic(StringRef Mnemonic, void ARMAsmParser:: GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) { + bool isThumbOne = TM.getSubtarget<ARMSubtarget>().isThumb1Only(); bool isThumb = TM.getSubtarget<ARMSubtarget>().isThumb(); if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || @@ -1834,7 +1863,7 @@ GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "mla" || Mnemonic == "umull" || Mnemonic == "eor" || Mnemonic == "smlal" || - (Mnemonic == "mov" && !isThumb)) { + (Mnemonic == "mov" && !isThumbOne)) { CanAcceptCarrySet = true; } else { CanAcceptCarrySet = false; @@ -1853,8 +1882,7 @@ GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, if (isThumb) if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || - Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp" || - Mnemonic == "mov") + Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") CanAcceptPredicationCode = false; } diff --git a/lib/Target/ARM/CMakeLists.txt b/lib/Target/ARM/CMakeLists.txt index edc0054..b1d4f54 100644 --- a/lib/Target/ARM/CMakeLists.txt +++ b/lib/Target/ARM/CMakeLists.txt @@ -1,10 +1,7 @@ set(LLVM_TARGET_DEFINITIONS ARM.td) -tablegen(ARMGenRegisterInfo.h.inc -gen-register-desc-header) -tablegen(ARMGenRegisterNames.inc -gen-register-enums) -tablegen(ARMGenRegisterInfo.inc -gen-register-desc) -tablegen(ARMGenInstrNames.inc -gen-instr-enums) -tablegen(ARMGenInstrInfo.inc -gen-instr-desc) +tablegen(ARMGenRegisterInfo.inc -gen-register-info) +tablegen(ARMGenInstrInfo.inc -gen-instr-info) tablegen(ARMGenCodeEmitter.inc -gen-emitter) tablegen(ARMGenMCCodeEmitter.inc -gen-emitter -mc-emitter) tablegen(ARMGenAsmWriter.inc -gen-asm-writer) diff --git a/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp b/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp index 271ca8c..fe165b0 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp @@ -24,8 +24,8 @@ //#define DEBUG(X) do { X; } while (0) /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const -/// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s -/// describing the operand info for each ARMInsts[i]. +/// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the +/// operand info for each ARMInsts[i]. /// /// Together with an instruction's encoding format, we can take advantage of the /// NumOperands and the OpInfo fields of the target instruction description in @@ -46,10 +46,10 @@ /// dag DefaultOps = (ops (i32 14), (i32 zero_reg)); /// } /// -/// which is manifested by the TargetOperandInfo[] of: +/// which is manifested by the MCOperandInfo[] of: /// -/// { 0, 0|(1<<TOI::Predicate), 0 }, -/// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 } +/// { 0, 0|(1<<MCOI::Predicate), 0 }, +/// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 } /// /// So the first predicate MCOperand corresponds to the immediate part of the /// ARM condition field (Inst{31-28}), and the second predicate MCOperand @@ -66,11 +66,12 @@ /// dag DefaultOps = (ops (i32 zero_reg)); /// } /// -/// which is manifested by the one TargetOperandInfo of: +/// which is manifested by the one MCOperandInfo of: /// -/// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 } +/// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 } /// /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR. +#define GET_INSTRINFO_MC_DESC #include "ARMGenInstrInfo.inc" using namespace llvm; @@ -588,9 +589,9 @@ static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) { static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - unsigned short NumDefs = TID.getNumDefs(); - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + unsigned short NumDefs = MCID.getNumDefs(); + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -739,9 +740,9 @@ static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn, if (PW) { MI.addOperand(MCOperand::CreateReg(0)); ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub; - const TargetInstrDesc &TID = ARMInsts[Opcode]; + const MCInstrDesc &MCID = ARMInsts[Opcode]; unsigned IndexMode = - (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; + (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2, ARM_AM::no_shift, IndexMode); MI.addOperand(MCOperand::CreateImm(Offset)); @@ -802,7 +803,7 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn, if (CoprocessorOpcode(Opcode)) return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; // MRS and MRSsys take one GPR reg Rd. @@ -901,7 +902,7 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; unsigned &OpIdx = NumOpsAdded; @@ -976,10 +977,10 @@ static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) { static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - unsigned short NumDefs = TID.getNumDefs(); - bool isUnary = isUnaryDP(TID.TSFlags); - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + unsigned short NumDefs = MCID.getNumDefs(); + bool isUnary = isUnaryDP(MCID.TSFlags); + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1041,7 +1042,7 @@ static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn, } // If this is a two-address operand, skip it, e.g., MOVCCr operand 1. - if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) { + if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) { MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; } @@ -1089,10 +1090,10 @@ static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - unsigned short NumDefs = TID.getNumDefs(); - bool isUnary = isUnaryDP(TID.TSFlags); - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + unsigned short NumDefs = MCID.getNumDefs(); + bool isUnary = isUnaryDP(MCID.TSFlags); + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1118,7 +1119,7 @@ static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn, } // If this is a two-address operand, skip it, e.g., MOVCCs operand 1. - if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) { + if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) { MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; } @@ -1244,17 +1245,17 @@ static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBac static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - bool isPrePost = isPrePostLdSt(TID.TSFlags); - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + bool isPrePost = isPrePostLdSt(MCID.TSFlags); + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; - assert(((!isStore && TID.getNumDefs() > 0) || - (isStore && (TID.getNumDefs() == 0 || isPrePost))) + assert(((!isStore && MCID.getNumDefs() > 0) || + (isStore && (MCID.getNumDefs() == 0 || isPrePost))) && "Invalid arguments"); // Operand 0 of a pre- and post-indexed store is the address base writeback. @@ -1291,7 +1292,7 @@ static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID && "Reg operand expected"); - assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) + assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) && "Index mode or tied_to operand expected"); MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn)))); @@ -1308,7 +1309,7 @@ static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn, ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub; unsigned IndexMode = - (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; + (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; if (getIBit(insn) == 0) { // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2). // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already @@ -1379,17 +1380,17 @@ static bool HasDualReg(unsigned Opcode) { static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - bool isPrePost = isPrePostLdSt(TID.TSFlags); - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + bool isPrePost = isPrePostLdSt(MCID.TSFlags); + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; - assert(((!isStore && TID.getNumDefs() > 0) || - (isStore && (TID.getNumDefs() == 0 || isPrePost))) + assert(((!isStore && MCID.getNumDefs() > 0) || + (isStore && (MCID.getNumDefs() == 0 || isPrePost))) && "Invalid arguments"); // Operand 0 of a pre- and post-indexed store is the address base writeback. @@ -1433,7 +1434,7 @@ static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID && "Reg operand expected"); - assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) + assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) && "Offset mode or tied_to operand expected"); MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn)))); @@ -1451,7 +1452,7 @@ static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub; unsigned IndexMode = - (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; + (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift; if (getAM3IBit(insn) == 1) { MI.addOperand(MCOperand::CreateReg(0)); @@ -1539,7 +1540,7 @@ static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; unsigned &OpIdx = NumOpsAdded; @@ -1591,7 +1592,7 @@ static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1653,8 +1654,8 @@ static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn, if (decodeRd(insn) == 15 || decodeRm(insn) == 15) return false; - const TargetInstrDesc &TID = ARMInsts[Opcode]; - NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands + const MCInstrDesc &MCID = ARMInsts[Opcode]; + NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands // Disassemble register def. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID, @@ -1696,7 +1697,7 @@ static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn, if (decodeRd(insn) == 15 || decodeRm(insn) == 15) return false; - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1802,7 +1803,7 @@ static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1842,8 +1843,8 @@ static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3"); - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1858,7 +1859,7 @@ static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn, ++OpIdx; // Skip tied_to operand constraint. - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) { assert(NumOps >= 4 && "Expect >=4 operands"); MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; @@ -1886,8 +1887,8 @@ static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2"); - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297 @@ -1903,7 +1904,7 @@ static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn, getRegisterEnum(B, RegClassID, decodeVFPRd(insn, SP)))); - assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 && + assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 && "Tied to operand expected"); MI.addOperand(MI.getOperand(0)); @@ -1961,7 +1962,7 @@ static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID, @@ -2011,7 +2012,7 @@ static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn, assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -2136,7 +2137,7 @@ static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -2402,8 +2403,8 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced, unsigned alignment, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; // At least one DPR register plus addressing mode #6. assert(NumOps >= 3 && "Expect >= 3 operands"); @@ -2507,7 +2508,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn, } while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) { - assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 && + assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 && "Tied to operand expected"); MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; @@ -2757,8 +2758,8 @@ static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; assert(NumOps >= 2 && (OpInfo[0].RegClass == ARM::DPRRegClassID || @@ -2848,8 +2849,8 @@ enum N2VFlag { static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opc]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opc]; + const MCOperandInfo *OpInfo = MCID.OpInfo; assert(NumOps >= 2 && (OpInfo[0].RegClass == ARM::DPRRegClassID || @@ -2878,7 +2879,7 @@ static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn, ++OpIdx; // VPADAL... - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) { // TIED_TO operand. MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; @@ -2892,7 +2893,7 @@ static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn, // VZIP and others have two TIED_TO reg operands. int Idx; while (OpIdx < NumOps && - (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + (Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // Add TIED_TO operand. MI.addOperand(MI.getOperand(Idx)); ++OpIdx; @@ -2945,8 +2946,8 @@ static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn, static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; assert(NumOps >= 3 && (OpInfo[0].RegClass == ARM::DPRRegClassID || @@ -2964,7 +2965,7 @@ static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn, decodeNEONRd(insn)))); ++OpIdx; - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) { // TIED_TO operand. MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; @@ -3044,8 +3045,8 @@ enum N3VFlag { static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs. assert(NumOps >= 3 && @@ -3076,7 +3077,7 @@ static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn, ++OpIdx; // VABA, VABAL, VBSLd, VBSLq, ... - if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) { // TIED_TO operand. MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx; @@ -3163,8 +3164,8 @@ static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode, static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; assert(NumOps >= 3 && @@ -3192,7 +3193,7 @@ static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn, // Process tied_to operand constraint. int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { MI.addOperand(MI.getOperand(Idx)); ++OpIdx; } @@ -3221,11 +3222,11 @@ static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; - assert(TID.getNumDefs() == 1 && NumOps >= 3 && + assert(MCID.getNumDefs() == 1 && NumOps >= 3 && OpInfo[0].RegClass == ARM::GPRRegClassID && OpInfo[1].RegClass == ARM::DPRRegClassID && OpInfo[2].RegClass < 0 && @@ -3255,14 +3256,14 @@ static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; if (!OpInfo) return false; - assert(TID.getNumDefs() == 1 && NumOps >= 3 && + assert(MCID.getNumDefs() == 1 && NumOps >= 3 && OpInfo[0].RegClass == ARM::DPRRegClassID && OpInfo[1].RegClass == ARM::DPRRegClassID && - TID.getOperandConstraint(1, TOI::TIED_TO) != -1 && + MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 && OpInfo[2].RegClass == ARM::GPRRegClassID && OpInfo[3].RegClass < 0 && "Expect >= 3 operands with one dst operand"); @@ -3294,7 +3295,7 @@ static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; assert(NumOps >= 2 && (OpInfo[0].RegClass == ARM::DPRRegClassID || @@ -3604,11 +3605,11 @@ bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode, assert(NumOpsRemaining > 0 && "Invalid argument"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned Idx = MI.getNumOperands(); // First, we check whether this instr specifies the PredicateOperand through - // a pair of TargetOperandInfos with isPredicate() property. + // a pair of MCOperandInfos with isPredicate() property. if (NumOpsRemaining >= 2 && OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() && OpInfo[Idx].RegClass < 0 && @@ -3636,13 +3637,13 @@ bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode, assert(NumOpsRemaining > 0 && "Invalid argument"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; const std::string &Name = ARMInsts[Opcode].Name; unsigned Idx = MI.getNumOperands(); uint64_t TSFlags = ARMInsts[Opcode].TSFlags; // First, we check whether this instr specifies the PredicateOperand through - // a pair of TargetOperandInfos with isPredicate() property. + // a pair of MCOperandInfos with isPredicate() property. if (NumOpsRemaining >= 2 && OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() && OpInfo[Idx].RegClass < 0 && diff --git a/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h b/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h index 9639c8a..834c6f6 100644 --- a/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h +++ b/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h @@ -350,7 +350,7 @@ static inline unsigned decodeRotate(uint32_t insn) { static bool DisassembleThumb1General(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -425,8 +425,8 @@ static bool DisassembleThumb1General(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb1DP(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -454,7 +454,7 @@ static bool DisassembleThumb1DP(MCInst &MI, unsigned Opcode, uint32_t insn, assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::tGPRRegClassID && "Thumb reg operand expected"); int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // The reg operand is tied to the first reg operand. MI.addOperand(MI.getOperand(Idx)); ++OpIdx; @@ -511,8 +511,8 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn, return true; } - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -530,7 +530,7 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn, assert(OpIdx < NumOps && "More operands expected"); int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // The reg operand is tied to the first reg operand. MI.addOperand(MI.getOperand(Idx)); ++OpIdx; @@ -554,7 +554,7 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID && @@ -602,7 +602,7 @@ static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 2 && @@ -630,8 +630,8 @@ static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode, static bool DisassembleThumb1LdSt(unsigned opA, MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; assert(NumOps >= 2 @@ -680,7 +680,7 @@ static bool DisassembleThumb1LdStSP(MCInst &MI, unsigned Opcode, uint32_t insn, assert((Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) && "Unexpected opcode"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 3 && @@ -708,7 +708,7 @@ static bool DisassembleThumb1AddPCi(MCInst &MI, unsigned Opcode, uint32_t insn, assert(Opcode == ARM::tADDrPCi && "Unexpected opcode"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID && @@ -733,7 +733,7 @@ static bool DisassembleThumb1AddSPi(MCInst &MI, unsigned Opcode, uint32_t insn, assert(Opcode == ARM::tADDrSPi && "Unexpected opcode"); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 3 && @@ -810,7 +810,7 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn, if (Opcode == ARM::tPUSH || Opcode == ARM::tPOP) return DisassembleThumb1PushPop(MI, Opcode, insn, NumOps, NumOpsAdded, B); - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; // Predicate operands are handled elsewhere. if (NumOps == 2 && @@ -958,7 +958,7 @@ static bool DisassembleThumb1CondBr(MCInst &MI, unsigned Opcode, uint32_t insn, if (Opcode == ARM::tTRAP) return true; - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps == 3 && OpInfo[0].RegClass < 0 && @@ -989,7 +989,7 @@ static bool DisassembleThumb1CondBr(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb1Br(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps == 1 && OpInfo[0].RegClass < 0 && "1 imm operand expected"); @@ -1226,7 +1226,7 @@ static bool DisassembleThumb2LdStMul(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2LdStEx(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; unsigned &OpIdx = NumOpsAdded; @@ -1316,7 +1316,7 @@ static bool DisassembleThumb2LdStEx(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2LdStDual(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; if (!OpInfo) return false; assert(NumOps >= 4 @@ -1423,8 +1423,8 @@ static inline bool Thumb2ShiftOpcode(unsigned Opcode) { static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; // Special case handling. @@ -1467,7 +1467,7 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn, if (ThreeReg) { int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // Process tied_to operand constraint. MI.addOperand(MI.getOperand(Idx)); ++OpIdx; @@ -1521,8 +1521,8 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1550,7 +1550,7 @@ static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode, return false; } int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // The reg operand is tied to the first reg operand. MI.addOperand(MI.getOperand(Idx)); } else { @@ -1590,8 +1590,8 @@ static inline bool Thumb2SaturateOpcode(unsigned Opcode) { /// o t2SSAT16, t2USAT16: Rs sat_pos Rn static bool DisassembleThumb2Sat(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands + const MCInstrDesc &MCID = ARMInsts[Opcode]; + NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands // Disassemble the register def. MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::rGPRRegClassID, @@ -1635,8 +1635,8 @@ static bool DisassembleThumb2Sat(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2DPBinImm(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -1659,7 +1659,7 @@ static bool DisassembleThumb2DPBinImm(MCInst &MI, unsigned Opcode, if (TwoReg) { assert(NumOps >= 3 && "Expect >= 3 operands"); int Idx; - if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) { + if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) { // Process tied_to operand constraint. MI.addOperand(MI.getOperand(Idx)); } else { @@ -1907,8 +1907,8 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn, // t2PLDs: Rn Rm imm2=Inst{5-4} // Same pattern applies for t2PLDW* and t2PLI*. - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -2073,8 +2073,8 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode, // See, for example, A6.3.7 Load word: Table A6-18 Load word. if (Load && Rn == 15) return DisassembleThumb2Ldpci(MI, Opcode, insn, NumOps, NumOpsAdded, B); - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -2085,7 +2085,7 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode, "Expect >= 3 operands and first two as reg operands"); bool ThreeReg = (OpInfo[2].RegClass > 0); - bool TIED_TO = ThreeReg && TID.getOperandConstraint(2, TOI::TIED_TO) != -1; + bool TIED_TO = ThreeReg && MCID.getOperandConstraint(2, MCOI::TIED_TO) != -1; bool Imm12 = !ThreeReg && slice(insn, 23, 23) == 1; // ARMInstrThumb2.td // Build the register operands, followed by the immediate. @@ -2160,8 +2160,8 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode, static bool DisassembleThumb2DPReg(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetInstrDesc &TID = ARMInsts[Opcode]; - const TargetOperandInfo *OpInfo = TID.OpInfo; + const MCInstrDesc &MCID = ARMInsts[Opcode]; + const MCOperandInfo *OpInfo = MCID.OpInfo; unsigned &OpIdx = NumOpsAdded; OpIdx = 0; @@ -2214,7 +2214,7 @@ static bool DisassembleThumb2DPReg(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2Mul(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; assert(NumOps >= 3 && OpInfo[0].RegClass == ARM::rGPRRegClassID && @@ -2259,7 +2259,7 @@ static bool DisassembleThumb2Mul(MCInst &MI, unsigned Opcode, uint32_t insn, static bool DisassembleThumb2LongMul(MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) { - const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo; assert(NumOps >= 3 && OpInfo[0].RegClass == ARM::rGPRRegClassID && diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index f6d0242..2df0053 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -137,11 +137,11 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const { bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const { // FIXME: Detect integer instructions properly. - const TargetInstrDesc &TID = MI->getDesc(); - unsigned Domain = TID.TSFlags & ARMII::DomainMask; - if (TID.mayStore()) + const MCInstrDesc &MCID = MI->getDesc(); + unsigned Domain = MCID.TSFlags & ARMII::DomainMask; + if (MCID.mayStore()) return false; - unsigned Opcode = TID.getOpcode(); + unsigned Opcode = MCID.getOpcode(); if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) return false; if ((Domain & ARMII::DomainVFP) || (Domain & ARMII::DomainNEON)) @@ -218,18 +218,18 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI, ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NextOp).getImm(); unsigned PredReg = MI->getOperand(++NextOp).getReg(); - const TargetInstrDesc &TID1 = TII->get(MulOpc); - const TargetInstrDesc &TID2 = TII->get(AddSubOpc); - unsigned TmpReg = MRI->createVirtualRegister(TID1.getRegClass(0, TRI)); + const MCInstrDesc &MCID1 = TII->get(MulOpc); + const MCInstrDesc &MCID2 = TII->get(AddSubOpc); + unsigned TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI)); - MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID1, TmpReg) + MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID1, TmpReg) .addReg(Src1Reg, getKillRegState(Src1Kill)) .addReg(Src2Reg, getKillRegState(Src2Kill)); if (HasLane) MIB.addImm(LaneImm); MIB.addImm(Pred).addReg(PredReg); - MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID2) + MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID2) .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstDead)); if (NegAcc) { @@ -273,15 +273,15 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) { continue; } - const TargetInstrDesc &TID = MI->getDesc(); - if (TID.isBarrier()) { + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.isBarrier()) { clearStack(); Skip = 0; ++MII; continue; } - unsigned Domain = TID.TSFlags & ARMII::DomainMask; + unsigned Domain = MCID.TSFlags & ARMII::DomainMask; if (Domain == ARMII::DomainGeneral) { if (++Skip == 2) // Assume dual issues of non-VFP / NEON instructions. @@ -291,7 +291,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) { unsigned MulOpc, AddSubOpc; bool NegAcc, HasLane; - if (!TII->isFpMLxInstruction(TID.getOpcode(), + if (!TII->isFpMLxInstruction(MCID.getOpcode(), MulOpc, AddSubOpc, NegAcc, HasLane) || !FindMLxHazard(MI)) pushStack(MI); diff --git a/lib/Target/ARM/Makefile b/lib/Target/ARM/Makefile index 65a6494..6472c53 100644 --- a/lib/Target/ARM/Makefile +++ b/lib/Target/ARM/Makefile @@ -12,9 +12,8 @@ LIBRARYNAME = LLVMARMCodeGen TARGET = ARM # Make sure that tblgen is run, first thing. -BUILT_SOURCES = ARMGenRegisterInfo.h.inc ARMGenRegisterNames.inc \ - ARMGenRegisterInfo.inc ARMGenInstrNames.inc \ - ARMGenInstrInfo.inc ARMGenAsmWriter.inc ARMGenAsmMatcher.inc \ +BUILT_SOURCES = ARMGenRegisterInfo.inc ARMGenInstrInfo.inc \ + ARMGenAsmWriter.inc ARMGenAsmMatcher.inc \ ARMGenDAGISel.inc ARMGenSubtarget.inc \ ARMGenCodeEmitter.inc ARMGenCallingConv.inc \ ARMGenDecoderTables.inc ARMGenEDInfo.inc \ diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index e56d481..48211d8 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -160,7 +160,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const { // will be allocated after this, so we can still use the base pointer // to reference locals. if (RegInfo->hasBasePointer(MF)) - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr).addReg(ARM::SP); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), BasePtr) + .addReg(ARM::SP)); // If the frame has variable sized objects then the epilogue must restore // the sp from fp. We can assume there's an FP here since hasFP already @@ -177,7 +178,7 @@ static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) { } static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) { - if (MI->getOpcode() == ARM::tRestore && + if (MI->getOpcode() == ARM::tLDRspi && MI->getOperand(1).isFI() && isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs)) return true; @@ -239,11 +240,13 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, "No scratch register to restore SP from FP!"); emitThumbRegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, TII, *RegInfo); - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP) - .addReg(ARM::R4); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), + ARM::SP) + .addReg(ARM::R4)); } else - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP) - .addReg(FramePtr); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), + ARM::SP) + .addReg(FramePtr)); } else { if (MBBI->getOpcode() == ARM::tBX_RET && &MBB.front() != MBBI && diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp index 3fbb433..218311d 100644 --- a/lib/Target/ARM/Thumb1InstrInfo.cpp +++ b/lib/Target/ARM/Thumb1InstrInfo.cpp @@ -13,7 +13,6 @@ #include "Thumb1InstrInfo.h" #include "ARM.h" -#include "ARMGenInstrInfo.inc" #include "ARMMachineFunctionInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -37,18 +36,8 @@ void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - bool tDest = ARM::tGPRRegClass.contains(DestReg); - bool tSrc = ARM::tGPRRegClass.contains(SrcReg); - unsigned Opc = ARM::tMOVgpr2gpr; - if (tDest && tSrc) - Opc = ARM::tMOVr; - else if (tSrc) - Opc = ARM::tMOVtgpr2gpr; - else if (tDest) - Opc = ARM::tMOVgpr2tgpr; - - BuildMI(MBB, I, DL, get(Opc), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc))); assert(ARM::GPRRegClass.contains(DestReg, SrcReg) && "Thumb1 can only copy GPR registers"); } @@ -76,7 +65,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand::MOStore, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tSpill)) + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tSTRspi)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); } @@ -105,7 +94,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg) + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tLDRspi), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); } } diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index 6bf5650..4eb0b6c 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -239,13 +239,13 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB, unsigned Chunk = (1 << 3) - 1; unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes; Bytes -= ThisVal; - const TargetInstrDesc &TID = TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3); + const MCInstrDesc &MCID = TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3); const MachineInstrBuilder MIB = - AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg).setMIFlags(MIFlags)); + AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg).setMIFlags(MIFlags)); AddDefaultPred(MIB.addReg(BaseReg, RegState::Kill).addImm(ThisVal)); } else { - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) - .addReg(BaseReg, RegState::Kill) + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) + .addReg(BaseReg, RegState::Kill)) .setMIFlags(MIFlags); } BaseReg = DestReg; @@ -291,8 +291,8 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB, } if (ExtraOpc) { - const TargetInstrDesc &TID = TII.get(ExtraOpc); - AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg)) + const MCInstrDesc &MCID = TII.get(ExtraOpc); + AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg)) .addReg(DestReg, RegState::Kill) .addImm(((unsigned)NumBytes) & 3) .setMIFlags(MIFlags)); @@ -360,8 +360,8 @@ static void emitThumbConstant(MachineBasicBlock &MBB, if (Imm > 0) emitThumbRegPlusImmediate(MBB, MBBI, dl, DestReg, DestReg, Imm, TII, MRI); if (isSub) { - const TargetInstrDesc &TID = TII.get(ARM::tRSB); - AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg)) + const MCInstrDesc &MCID = TII.get(ARM::tRSB); + AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg)) .addReg(DestReg, RegState::Kill)); } } @@ -377,11 +377,9 @@ static void removeOperands(MachineInstr &MI, unsigned i) { static unsigned convertToNonSPOpcode(unsigned Opcode) { switch (Opcode) { case ARM::tLDRspi: - case ARM::tRestore: // FIXME: Should this opcode be here? return ARM::tLDRi; case ARM::tSTRspi: - case ARM::tSpill: // FIXME: Should this opcode be here? return ARM::tSTRi; } @@ -396,7 +394,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx, MachineBasicBlock &MBB = *MI.getParent(); DebugLoc dl = MI.getDebugLoc(); unsigned Opcode = MI.getOpcode(); - const TargetInstrDesc &Desc = MI.getDesc(); + const MCInstrDesc &Desc = MI.getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); if (Opcode == ARM::tADDrSPi) { @@ -419,13 +417,12 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx, unsigned PredReg; if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) { // Turn it into a move. - MI.setDesc(TII.get(ARM::tMOVgpr2tgpr)); + MI.setDesc(TII.get(ARM::tMOVr)); MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); - // Remove offset and remaining explicit predicate operands. - do MI.RemoveOperand(FrameRegIdx+1); - while (MI.getNumOperands() > FrameRegIdx+1 && - (!MI.getOperand(FrameRegIdx+1).isReg() || - !MI.getOperand(FrameRegIdx+1).isImm())); + // Remove offset and add predicate operands. + MI.RemoveOperand(FrameRegIdx+1); + MachineInstrBuilder MIB(&MI); + AddDefaultPred(MIB); return true; } @@ -524,7 +521,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx, // If this is a thumb spill / restore, we will be using a constpool load to // materialize the offset. - if (Opcode == ARM::tRestore || Opcode == ARM::tSpill) { + if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) { ImmOp.ChangeToImmediate(0); } else { // Otherwise, it didn't fit. Pull in what we can to simplify the immed. @@ -567,8 +564,9 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB, // the function, the offset will be negative. Use R12 instead since that's // a call clobbered register that we know won't be used in Thumb1 mode. DebugLoc DL; - BuildMI(MBB, I, DL, TII.get(ARM::tMOVtgpr2gpr)). - addReg(ARM::R12, RegState::Define).addReg(Reg, RegState::Kill); + AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr)) + .addReg(ARM::R12, RegState::Define) + .addReg(Reg, RegState::Kill)); // The UseMI is where we would like to restore the register. If there's // interference with R12 before then, however, we'll need to restore it @@ -591,8 +589,8 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB, } } // Restore the register from R12 - BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVgpr2tgpr)). - addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill); + AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)). + addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill)); return true; } @@ -653,7 +651,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, assert(Offset && "This code isn't needed if offset already handled!"); unsigned Opcode = MI.getOpcode(); - const TargetInstrDesc &Desc = MI.getDesc(); + const MCInstrDesc &Desc = MI.getDesc(); // Remove predicate first. int PIdx = MI.findFirstPredOperandIdx(); @@ -664,7 +662,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Use the destination register to materialize sp + offset. unsigned TmpReg = MI.getOperand(0).getReg(); bool UseRR = false; - if (Opcode == ARM::tRestore) { + if (Opcode == ARM::tLDRspi) { if (FrameReg == ARM::SP) emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg, Offset, false, TII, *this); @@ -687,7 +685,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass); bool UseRR = false; - if (Opcode == ARM::tSpill) { + if (Opcode == ARM::tSTRspi) { if (FrameReg == ARM::SP) emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg, Offset, false, TII, *this); diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp index 45e6937..360ec00 100644 --- a/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -98,9 +98,6 @@ static bool isCopy(MachineInstr *MI) { case ARM::MOVr: case ARM::MOVr_TC: case ARM::tMOVr: - case ARM::tMOVgpr2tgpr: - case ARM::tMOVtgpr2gpr: - case ARM::tMOVgpr2gpr: case ARM::t2MOVr: return true; } diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp index d169dbb..51b56aa 100644 --- a/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -15,7 +15,6 @@ #include "ARM.h" #include "ARMConstantPoolValue.h" #include "ARMAddressingModes.h" -#include "ARMGenInstrInfo.inc" #include "ARMMachineFunctionInfo.h" #include "Thumb2InstrInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -113,18 +112,8 @@ void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); - bool tDest = ARM::tGPRRegClass.contains(DestReg); - bool tSrc = ARM::tGPRRegClass.contains(SrcReg); - unsigned Opc = ARM::tMOVgpr2gpr; - if (tDest && tSrc) - Opc = ARM::tMOVr; - else if (tSrc) - Opc = ARM::tMOVtgpr2gpr; - else if (tDest) - Opc = ARM::tMOVgpr2tgpr; - - BuildMI(MBB, I, DL, get(Opc), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc))); } void Thumb2InstrInfo:: @@ -232,8 +221,8 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, unsigned Opc = 0; if (DestReg == ARM::SP && BaseReg != ARM::SP) { // mov sp, rn. Note t2MOVr cannot be used. - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr),DestReg) - .addReg(BaseReg).setMIFlags(MIFlags); + AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr),DestReg) + .addReg(BaseReg).setMIFlags(MIFlags)); BaseReg = ARM::SP; continue; } @@ -252,7 +241,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, } // sub rd, sp, so_imm - Opc = isSub ? ARM::t2SUBrSPi : ARM::t2ADDrSPi; + Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { NumBytes = 0; } else { @@ -396,7 +385,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int &Offset, const ARMBaseInstrInfo &TII) { unsigned Opcode = MI.getOpcode(); - const TargetInstrDesc &Desc = MI.getDesc(); + const MCInstrDesc &Desc = MI.getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); bool isSub = false; @@ -410,25 +399,24 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned PredReg; if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) { // Turn it into a move. - MI.setDesc(TII.get(ARM::tMOVgpr2gpr)); + MI.setDesc(TII.get(ARM::tMOVr)); MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); // Remove offset and remaining explicit predicate operands. do MI.RemoveOperand(FrameRegIdx+1); - while (MI.getNumOperands() > FrameRegIdx+1 && - (!MI.getOperand(FrameRegIdx+1).isReg() || - !MI.getOperand(FrameRegIdx+1).isImm())); + while (MI.getNumOperands() > FrameRegIdx+1); + MachineInstrBuilder MIB(&MI); + AddDefaultPred(MIB); return true; } - bool isSP = FrameReg == ARM::SP; bool HasCCOut = Opcode != ARM::t2ADDri12; if (Offset < 0) { Offset = -Offset; isSub = true; - MI.setDesc(TII.get(isSP ? ARM::t2SUBrSPi : ARM::t2SUBri)); + MI.setDesc(TII.get(ARM::t2SUBri)); } else { - MI.setDesc(TII.get(isSP ? ARM::t2ADDrSPi : ARM::t2ADDri)); + MI.setDesc(TII.get(ARM::t2ADDri)); } // Common case: small offset, fits into instruction. @@ -444,9 +432,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, // Another common case: imm12. if (Offset < 4096 && (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { - unsigned NewOpc = isSP - ? (isSub ? ARM::t2SUBrSPi12 : ARM::t2ADDrSPi12) - : (isSub ? ARM::t2SUBri12 : ARM::t2ADDri12); + unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; MI.setDesc(TII.get(NewOpc)); MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); @@ -579,8 +565,7 @@ void Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI, MachineInstr *UseMI, const TargetRegisterInfo &TRI) const { - if (SrcMI->getOpcode() != ARM::tMOVgpr2gpr || - SrcMI->getOperand(1).isKill()) + if (SrcMI->getOpcode() != ARM::tMOVr || SrcMI->getOperand(1).isKill()) return; unsigned PredReg = 0; diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index ce2e966..24a037c 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -57,10 +57,8 @@ namespace { static const ReduceEntry ReduceTable[] = { // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C, PF, S { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0 }, - { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,0 }, + { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1 }, { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0 }, - // Note: immediate scale is 4. - { ARM::t2ADDrSPi,ARM::tADDrSPi,0, 8, 0, 1, 0, 1,0, 0,1 }, { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1 }, { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1 }, { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0 }, @@ -84,7 +82,7 @@ namespace { { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0,0 }, { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0,1 }, // FIXME: Do we need the 16-bit 'S' variant? - { ARM::t2MOVr,ARM::tMOVgpr2gpr,0, 0, 0, 0, 0, 1,0, 0,0 }, + { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0 }, { ARM::t2MOVCCr,0, ARM::tMOVCCr, 0, 0, 0, 0, 0,1, 0,0 }, { ARM::t2MOVCCi,0, ARM::tMOVCCi, 0, 8, 0, 1, 0,1, 0,0 }, { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0 }, @@ -189,8 +187,8 @@ Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) { } } -static bool HasImplicitCPSRDef(const TargetInstrDesc &TID) { - for (const unsigned *Regs = TID.ImplicitDefs; *Regs; ++Regs) +static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { + for (const unsigned *Regs = MCID.ImplicitDefs; *Regs; ++Regs) if (*Regs == ARM::CPSR) return true; return false; @@ -291,7 +289,7 @@ static bool VerifyLowRegs(MachineInstr *MI) { Opc == ARM::t2LDMDB || Opc == ARM::t2LDMIA_UPD || Opc == ARM::t2LDMDB_UPD); bool isLROk = (Opc == ARM::t2STMIA_UPD || Opc == ARM::t2STMDB_UPD); - bool isSPOk = isPCOk || isLROk || (Opc == ARM::t2ADDrSPi); + bool isSPOk = isPCOk || isLROk; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || MO.isImplicit()) @@ -481,14 +479,54 @@ bool Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, const ReduceEntry &Entry, bool LiveCPSR, MachineInstr *CPSRDef) { + unsigned Opc = MI->getOpcode(); + if (Opc == ARM::t2ADDri) { + // If the source register is SP, try to reduce to tADDrSPi, otherwise + // it's a normal reduce. + if (MI->getOperand(1).getReg() != ARM::SP) { + if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef)) + return true; + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef); + } + // Try to reduce to tADDrSPi. + unsigned Imm = MI->getOperand(2).getImm(); + // The immediate must be in range, the destination register must be a low + // reg, the predicate must be "always" and the condition flags must not + // be being set. + if (Imm & 3 || Imm > 1024) + return false; + if (!isARMLowRegister(MI->getOperand(0).getReg())) + return false; + if (MI->getOperand(3).getImm() != ARMCC::AL) + return false; + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.hasOptionalDef() && + MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) + return false; + + MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), + TII->get(ARM::tADDrSPi)) + .addOperand(MI->getOperand(0)) + .addOperand(MI->getOperand(1)) + .addImm(Imm / 4); // The tADDrSPi has an implied scale by four. + + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + + DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB); + + MBB.erase(MI); + ++NumNarrows; + return true; + } + if (Entry.LowRegs1 && !VerifyLowRegs(MI)) return false; - const TargetInstrDesc &TID = MI->getDesc(); - if (TID.mayLoad() || TID.mayStore()) + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.mayLoad() || MCID.mayStore()) return ReduceLoadStore(MBB, MI, Entry); - unsigned Opc = MI->getOpcode(); switch (Opc) { default: break; case ARM::t2ADDSri: @@ -531,13 +569,6 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, return true; return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef); } - case ARM::t2ADDrSPi: { - static const ReduceEntry NarrowEntry = - { ARM::t2ADDrSPi,ARM::tADDspi, 0, 7, 0, 1, 0, 1, 0, 0,1 }; - if (MI->getOperand(0).getReg() == ARM::SP) - return ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, CPSRDef); - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef); - } } return false; } @@ -576,23 +607,23 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, } // Check if it's possible / necessary to transfer the predicate. - const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc2); + const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); bool SkipPred = false; if (Pred != ARMCC::AL) { - if (!NewTID.isPredicable()) + if (!NewMCID.isPredicable()) // Can't transfer predicate, fail. return false; } else { - SkipPred = !NewTID.isPredicable(); + SkipPred = !NewMCID.isPredicable(); } bool HasCC = false; bool CCDead = false; - const TargetInstrDesc &TID = MI->getDesc(); - if (TID.hasOptionalDef()) { - unsigned NumOps = TID.getNumOperands(); + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.hasOptionalDef()) { + unsigned NumOps = MCID.getNumOperands(); HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); if (HasCC && MI->getOperand(NumOps-1).isDead()) CCDead = true; @@ -602,15 +633,15 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, // Avoid adding a false dependency on partial flag update by some 16-bit // instructions which has the 's' bit set. - if (Entry.PartFlag && NewTID.hasOptionalDef() && HasCC && + if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && canAddPseudoFlagDep(CPSRDef, MI)) return false; // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); - MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID); + MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID); MIB.addOperand(MI->getOperand(0)); - if (NewTID.hasOptionalDef()) { + if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); else @@ -618,11 +649,11 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, } // Transfer the rest of operands. - unsigned NumOps = TID.getNumOperands(); + unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && TID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) continue; - if (SkipPred && TID.OpInfo[i].isPredicate()) + if (SkipPred && MCID.OpInfo[i].isPredicate()) continue; MIB.addOperand(MI->getOperand(i)); } @@ -645,47 +676,44 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, return false; unsigned Limit = ~0U; - unsigned Scale = (Entry.WideOpc == ARM::t2ADDrSPi) ? 4 : 1; if (Entry.Imm1Limit) - Limit = ((1 << Entry.Imm1Limit) - 1) * Scale; + Limit = (1 << Entry.Imm1Limit) - 1; - const TargetInstrDesc &TID = MI->getDesc(); - for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) { - if (TID.OpInfo[i].isPredicate()) + const MCInstrDesc &MCID = MI->getDesc(); + for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { + if (MCID.OpInfo[i].isPredicate()) continue; const MachineOperand &MO = MI->getOperand(i); if (MO.isReg()) { unsigned Reg = MO.getReg(); if (!Reg || Reg == ARM::CPSR) continue; - if (Entry.WideOpc == ARM::t2ADDrSPi && Reg == ARM::SP) - continue; if (Entry.LowRegs1 && !isARMLowRegister(Reg)) return false; } else if (MO.isImm() && - !TID.OpInfo[i].isPredicate()) { - if (((unsigned)MO.getImm()) > Limit || (MO.getImm() & (Scale-1)) != 0) + !MCID.OpInfo[i].isPredicate()) { + if (((unsigned)MO.getImm()) > Limit) return false; } } // Check if it's possible / necessary to transfer the predicate. - const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc1); + const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); bool SkipPred = false; if (Pred != ARMCC::AL) { - if (!NewTID.isPredicable()) + if (!NewMCID.isPredicable()) // Can't transfer predicate, fail. return false; } else { - SkipPred = !NewTID.isPredicable(); + SkipPred = !NewMCID.isPredicable(); } bool HasCC = false; bool CCDead = false; - if (TID.hasOptionalDef()) { - unsigned NumOps = TID.getNumOperands(); + if (MCID.hasOptionalDef()) { + unsigned NumOps = MCID.getNumOperands(); HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); if (HasCC && MI->getOperand(NumOps-1).isDead()) CCDead = true; @@ -695,15 +723,15 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, // Avoid adding a false dependency on partial flag update by some 16-bit // instructions which has the 's' bit set. - if (Entry.PartFlag && NewTID.hasOptionalDef() && HasCC && + if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && canAddPseudoFlagDep(CPSRDef, MI)) return false; // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); - MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID); + MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID); MIB.addOperand(MI->getOperand(0)); - if (NewTID.hasOptionalDef()) { + if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); else @@ -711,29 +739,25 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, } // Transfer the rest of operands. - unsigned NumOps = TID.getNumOperands(); + unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && TID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) continue; - if ((TID.getOpcode() == ARM::t2RSBSri || - TID.getOpcode() == ARM::t2RSBri) && i == 2) + if ((MCID.getOpcode() == ARM::t2RSBSri || + MCID.getOpcode() == ARM::t2RSBri) && i == 2) // Skip the zero immediate operand, it's now implicit. continue; - bool isPred = (i < NumOps && TID.OpInfo[i].isPredicate()); + bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); if (SkipPred && isPred) continue; const MachineOperand &MO = MI->getOperand(i); - if (Scale > 1 && !isPred && MO.isImm()) - MIB.addImm(MO.getImm() / Scale); - else { - if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) - // Skip implicit def of CPSR. Either it's modeled as an optional - // def now or it's already an implicit def on the new instruction. - continue; - MIB.addOperand(MO); - } + if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) + // Skip implicit def of CPSR. Either it's modeled as an optional + // def now or it's already an implicit def on the new instruction. + continue; + MIB.addOperand(MO); } - if (!TID.isPredicable() && NewTID.isPredicable()) + if (!MCID.isPredicable() && NewMCID.isPredicable()) AddDefaultPred(MIB); // Transfer MI flags. |