diff options
author | Stephen Hines <srhines@google.com> | 2013-08-07 15:07:10 -0700 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2013-08-07 15:07:10 -0700 |
commit | fab2daa4a1127ecb217abe2b07c1769122b6fee1 (patch) | |
tree | 268ebfd1963fd98ba412e76819afdf95a7d4267b /lib/Target | |
parent | 8197ac1c1a0a91baa70c4dea8cb488f254ef974c (diff) | |
parent | 10251753b6897adcd22cc981c0cc42f348c109de (diff) | |
download | external_llvm-fab2daa4a1127ecb217abe2b07c1769122b6fee1.zip external_llvm-fab2daa4a1127ecb217abe2b07c1769122b6fee1.tar.gz external_llvm-fab2daa4a1127ecb217abe2b07c1769122b6fee1.tar.bz2 |
Merge commit '10251753b6897adcd22cc981c0cc42f348c109de' into merge-20130807
Conflicts:
lib/Archive/ArchiveReader.cpp
lib/Support/Unix/PathV2.inc
Change-Id: I29d8c1e321a4a380b6013f00bac6a8e4b593cc4e
Diffstat (limited to 'lib/Target')
451 files changed, 25485 insertions, 22840 deletions
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp index 47ebb82..9498722 100644 --- a/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -27,16 +27,6 @@ using namespace llvm; -MachineLocation -AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { - // See emitFrameIndexDebugValue in InstrInfo for where this instruction is - // expected to be created. - assert(MI->getNumOperands() == 4 && MI->getOperand(0).isReg() - && MI->getOperand(1).isImm() && "unexpected custom DBG_VALUE"); - return MachineLocation(MI->getOperand(0).getReg(), - MI->getOperand(1).getImm()); -} - /// Try to print a floating-point register as if it belonged to a specified /// register-class. For example the inline asm operand modifier "b" requires its /// argument to be printed as "bN". @@ -271,24 +261,6 @@ bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, return false; } -void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI, - raw_ostream &OS) { - unsigned NOps = MI->getNumOperands(); - assert(NOps==4); - OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - OS << V.getName(); - OS << " <- "; - // Frame address. Currently handles register +- offset only. - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm()); - OS << '[' << AArch64InstPrinter::getRegisterName(MI->getOperand(0).getReg()); - OS << '+' << MI->getOperand(1).getImm(); - OS << ']'; - OS << "+" << MI->getOperand(NOps - 2).getImm(); -} - - #include "AArch64GenMCPseudoLowering.inc" void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) { @@ -296,18 +268,6 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) { if (emitPseudoExpansionLowering(OutStreamer, MI)) return; - switch (MI->getOpcode()) { - case AArch64::DBG_VALUE: { - if (isVerbose() && OutStreamer.hasRawTextSupport()) { - SmallString<128> TmpStr; - raw_svector_ostream OS(TmpStr); - PrintDebugValueComment(MI, OS); - OutStreamer.EmitRawText(StringRef(OS.str())); - } - return; - } - } - MCInst TmpInst; LowerAArch64MachineInstrToMCInst(MI, TmpInst, *this); OutStreamer.EmitInstruction(TmpInst); @@ -329,7 +289,7 @@ void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) { for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { OutStreamer.EmitLabel(Stubs[i].first); OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(), - TD->getPointerSize(0), 0); + TD->getPointerSize(0)); } Stubs.clear(); } diff --git a/lib/Target/AArch64/AArch64AsmPrinter.h b/lib/Target/AArch64/AArch64AsmPrinter.h index af0c9fe..824f003 100644 --- a/lib/Target/AArch64/AArch64AsmPrinter.h +++ b/lib/Target/AArch64/AArch64AsmPrinter.h @@ -55,8 +55,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64AsmPrinter : public AsmPrinter { unsigned AsmVariant, const char *ExtraCode, raw_ostream &O); - void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - /// printSymbolicAddress - Given some kind of reasonably bare symbolic /// reference, print out the appropriate asm string to represent it. If /// appropriate, a relocation-specifier will be produced, composed of a @@ -67,8 +65,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64AsmPrinter : public AsmPrinter { bool PrintImmediatePrefix, StringRef Suffix, raw_ostream &O); - MachineLocation getDebugValueLocation(const MachineInstr *MI) const; - virtual const char *getPassName() const { return "AArch64 Assembly Printer"; } diff --git a/lib/Target/AArch64/AArch64CallingConv.td b/lib/Target/AArch64/AArch64CallingConv.td index b880d83..bff7eeb 100644 --- a/lib/Target/AArch64/AArch64CallingConv.td +++ b/lib/Target/AArch64/AArch64CallingConv.td @@ -61,7 +61,7 @@ def CC_A64_APCS : CallingConv<[ // Vectors and Floating-point types. CCIfType<[v2i8], CCBitConvertToType<f16>>, CCIfType<[v4i8, v2i16], CCBitConvertToType<f32>>, - CCIfType<[v8i8, v4i16, v2i32, v2f32], CCBitConvertToType<f64>>, + CCIfType<[v8i8, v4i16, v2i32, v2f32, v1i64], CCBitConvertToType<f64>>, CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCBitConvertToType<f128>>, diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index 8b907b2..7318230 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -54,7 +54,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const { DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); bool NeedsFrameMoves = MMI.hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry(); @@ -97,7 +97,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const { .addSym(SPLabel); MachineLocation Dst(MachineLocation::VirtualFP); - unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true); + unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true); MMI.addFrameInst( MCCFIInstruction::createDefCfa(SPLabel, Reg, -NumInitialBytes)); } @@ -132,7 +132,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const { MCSymbol *FPLabel = MMI.getContext().CreateTempSymbol(); BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL)) .addSym(FPLabel); - unsigned Reg = MRI.getDwarfRegNum(AArch64::X29, true); + unsigned Reg = MRI->getDwarfRegNum(AArch64::X29, true); unsigned Offset = MFI->getObjectOffset(X29FrameIdx); MMI.addFrameInst(MCCFIInstruction::createDefCfa(FPLabel, Reg, Offset)); } @@ -165,7 +165,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const { .addSym(CSLabel); MachineLocation Dst(MachineLocation::VirtualFP); - unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true); + unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true); unsigned Offset = NumResidualBytes + NumInitialBytes; MMI.addFrameInst(MCCFIInstruction::createDefCfa(CSLabel, Reg, -Offset)); } @@ -183,7 +183,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const { for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), E = CSI.end(); I != E; ++I) { unsigned Offset = MFI->getObjectOffset(I->getFrameIdx()); - unsigned Reg = MRI.getDwarfRegNum(I->getReg(), true); + unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true); MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, Reg, Offset)); } } @@ -425,7 +425,7 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI, - LoadStoreMethod PossClasses[], + const LoadStoreMethod PossClasses[], unsigned NumClasses) const { DebugLoc DL = MBB.findDebugLoc(MBBI); MachineFunction &MF = *MBB.getParent(); @@ -528,11 +528,11 @@ AArch64FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, if (CSI.empty()) return false; - static LoadStoreMethod PossibleClasses[] = { + static const LoadStoreMethod PossibleClasses[] = { {&AArch64::GPR64RegClass, AArch64::LSPair64_STR, AArch64::LS64_STR}, {&AArch64::FPR64RegClass, AArch64::LSFPPair64_STR, AArch64::LSFP64_STR}, }; - unsigned NumClasses = llvm::array_lengthof(PossibleClasses); + const unsigned NumClasses = llvm::array_lengthof(PossibleClasses); emitFrameMemOps(/* isPrologue = */ true, MBB, MBBI, CSI, TRI, PossibleClasses, NumClasses); @@ -549,11 +549,11 @@ AArch64FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, if (CSI.empty()) return false; - static LoadStoreMethod PossibleClasses[] = { + static const LoadStoreMethod PossibleClasses[] = { {&AArch64::GPR64RegClass, AArch64::LSPair64_LDR, AArch64::LS64_LDR}, {&AArch64::FPR64RegClass, AArch64::LSFPPair64_LDR, AArch64::LSFP64_LDR}, }; - unsigned NumClasses = llvm::array_lengthof(PossibleClasses); + const unsigned NumClasses = llvm::array_lengthof(PossibleClasses); emitFrameMemOps(/* isPrologue = */ false, MBB, MBBI, CSI, TRI, PossibleClasses, NumClasses); diff --git a/lib/Target/AArch64/AArch64FrameLowering.h b/lib/Target/AArch64/AArch64FrameLowering.h index 45ea0ec..032dd90 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.h +++ b/lib/Target/AArch64/AArch64FrameLowering.h @@ -90,7 +90,7 @@ public: MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI, - LoadStoreMethod PossibleClasses[], + const LoadStoreMethod PossibleClasses[], unsigned NumClasses) const; diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 2e37cb4..ee819e0 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -70,10 +70,11 @@ public: /// Used for pre-lowered address-reference nodes, so we already know /// the fields match. This operand's job is simply to add an - /// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction. + /// appropriate shift operand to the MOVZ/MOVK instruction. + template<unsigned LogShift> bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) { Imm = N; - Shift = CurDAG->getTargetConstant(0, MVT::i32); + Shift = CurDAG->getTargetConstant(LogShift, MVT::i32); return true; } @@ -258,15 +259,15 @@ AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL, LitAddr = CurDAG->getMachineNode( AArch64::MOVZxii, DL, PtrVT, CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3), - CurDAG->getTargetConstant(0, MVT::i32)); + CurDAG->getTargetConstant(3, MVT::i32)); LitAddr = CurDAG->getMachineNode( AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC), - CurDAG->getTargetConstant(0, MVT::i32)); + CurDAG->getTargetConstant(2, MVT::i32)); LitAddr = CurDAG->getMachineNode( AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC), - CurDAG->getTargetConstant(0, MVT::i32)); + CurDAG->getTargetConstant(1, MVT::i32)); LitAddr = CurDAG->getMachineNode( AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC), diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 5a53339..44b691b 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -42,6 +42,8 @@ static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { + const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); + // SIMD compares set the entire lane's bits to 1 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); @@ -53,6 +55,21 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); + if (Subtarget->hasNEON()) { + // And the vectors + addRegisterClass(MVT::v8i8, &AArch64::VPR64RegClass); + addRegisterClass(MVT::v4i16, &AArch64::VPR64RegClass); + addRegisterClass(MVT::v2i32, &AArch64::VPR64RegClass); + addRegisterClass(MVT::v1i64, &AArch64::VPR64RegClass); + addRegisterClass(MVT::v2f32, &AArch64::VPR64RegClass); + addRegisterClass(MVT::v16i8, &AArch64::VPR128RegClass); + addRegisterClass(MVT::v8i16, &AArch64::VPR128RegClass); + addRegisterClass(MVT::v4i32, &AArch64::VPR128RegClass); + addRegisterClass(MVT::v2i64, &AArch64::VPR128RegClass); + addRegisterClass(MVT::v4f32, &AArch64::VPR128RegClass); + addRegisterClass(MVT::v2f64, &AArch64::VPR128RegClass); + } + computeRegisterProperties(); // We combine OR nodes for bitfield and NEON BSL operations. @@ -249,11 +266,33 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) setTruncStoreAction(MVT::f64, MVT::f16, Expand); setTruncStoreAction(MVT::f32, MVT::f16, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); - setExceptionPointerRegister(AArch64::X0); setExceptionSelectorRegister(AArch64::X1); + + if (Subtarget->hasNEON()) { + setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); + + setOperationAction(ISD::SETCC, MVT::v8i8, Custom); + setOperationAction(ISD::SETCC, MVT::v16i8, Custom); + setOperationAction(ISD::SETCC, MVT::v4i16, Custom); + setOperationAction(ISD::SETCC, MVT::v8i16, Custom); + setOperationAction(ISD::SETCC, MVT::v2i32, Custom); + setOperationAction(ISD::SETCC, MVT::v4i32, Custom); + setOperationAction(ISD::SETCC, MVT::v2i64, Custom); + setOperationAction(ISD::SETCC, MVT::v2f32, Custom); + setOperationAction(ISD::SETCC, MVT::v4f32, Custom); + setOperationAction(ISD::SETCC, MVT::v2f64, Custom); + } } EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { @@ -267,16 +306,16 @@ EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, unsigned &LdrOpc, unsigned &StrOpc) { - static unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, - AArch64::LDXR_word, AArch64::LDXR_dword}; - static unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, - AArch64::LDAXR_word, AArch64::LDAXR_dword}; - static unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, - AArch64::STXR_word, AArch64::STXR_dword}; - static unsigned StoreRels[] = {AArch64::STLXR_byte, AArch64::STLXR_hword, - AArch64::STLXR_word, AArch64::STLXR_dword}; - - unsigned *LoadOps, *StoreOps; + static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, + AArch64::LDXR_word, AArch64::LDXR_dword}; + static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, + AArch64::LDAXR_word, AArch64::LDAXR_dword}; + static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, + AArch64::STXR_word, AArch64::STXR_dword}; + static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, + AArch64::STLXR_word, AArch64::STLXR_dword}; + + const unsigned *LoadOps, *StoreOps; if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) LoadOps = LoadAcqs; else @@ -780,7 +819,22 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; - default: return NULL; + case AArch64ISD::NEON_BSL: + return "AArch64ISD::NEON_BSL"; + case AArch64ISD::NEON_MOVIMM: + return "AArch64ISD::NEON_MOVIMM"; + case AArch64ISD::NEON_MVNIMM: + return "AArch64ISD::NEON_MVNIMM"; + case AArch64ISD::NEON_FMOVIMM: + return "AArch64ISD::NEON_FMOVIMM"; + case AArch64ISD::NEON_CMP: + return "AArch64ISD::NEON_CMP"; + case AArch64ISD::NEON_CMPZ: + return "AArch64ISD::NEON_CMPZ"; + case AArch64ISD::NEON_TST: + return "AArch64ISD::NEON_TST"; + default: + return NULL; } } @@ -1082,9 +1136,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &IsTailCall = CLI.IsTailCall; @@ -2051,7 +2105,7 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, AArch64II::MO_TPREL_G0_NC); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, - DAG.getTargetConstant(0, MVT::i32)), 0); + DAG.getTargetConstant(1, MVT::i32)), 0); TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, TPOff, LoVar, DAG.getTargetConstant(0, MVT::i32)), 0); @@ -2233,6 +2287,213 @@ AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { DAG.getConstant(A64CC::NE, MVT::i32)); } +static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { + SDLoc DL(Op); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); + EVT VT = Op.getValueType(); + bool Invert = false; + SDValue Op0, Op1; + unsigned Opcode; + + if (LHS.getValueType().isInteger()) { + + // Attempt to use Vector Integer Compare Mask Test instruction. + // TST = icmp ne (and (op0, op1), zero). + if (CC == ISD::SETNE) { + if (((LHS.getOpcode() == ISD::AND) && + ISD::isBuildVectorAllZeros(RHS.getNode())) || + ((RHS.getOpcode() == ISD::AND) && + ISD::isBuildVectorAllZeros(LHS.getNode()))) { + + SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; + SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); + SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); + return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); + } + } + + // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). + // Note: Compare against Zero does not support unsigned predicates. + if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || + ISD::isBuildVectorAllZeros(LHS.getNode())) && + !isUnsignedIntSetCC(CC)) { + + // If LHS is the zero value, swap operands and CondCode. + if (ISD::isBuildVectorAllZeros(LHS.getNode())) { + CC = getSetCCSwappedOperands(CC); + Op0 = RHS; + } else + Op0 = LHS; + + // Ensure valid CondCode for Compare Mask against Zero instruction: + // EQ, GE, GT, LE, LT. + if (ISD::SETNE == CC) { + Invert = true; + CC = ISD::SETEQ; + } + + // Using constant type to differentiate integer and FP compares with zero. + Op1 = DAG.getConstant(0, MVT::i32); + Opcode = AArch64ISD::NEON_CMPZ; + + } else { + // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). + // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. + bool Swap = false; + switch (CC) { + default: + llvm_unreachable("Illegal integer comparison."); + case ISD::SETEQ: + case ISD::SETGT: + case ISD::SETGE: + case ISD::SETUGT: + case ISD::SETUGE: + break; + case ISD::SETNE: + Invert = true; + CC = ISD::SETEQ; + break; + case ISD::SETULT: + case ISD::SETULE: + case ISD::SETLT: + case ISD::SETLE: + Swap = true; + CC = getSetCCSwappedOperands(CC); + } + + if (Swap) + std::swap(LHS, RHS); + + Opcode = AArch64ISD::NEON_CMP; + Op0 = LHS; + Op1 = RHS; + } + + // Generate Compare Mask instr or Compare Mask against Zero instr. + SDValue NeonCmp = + DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); + + if (Invert) + NeonCmp = DAG.getNOT(DL, NeonCmp, VT); + + return NeonCmp; + } + + // Now handle Floating Point cases. + // Attempt to use Vector Floating Point Compare Mask against Zero instruction. + if (ISD::isBuildVectorAllZeros(RHS.getNode()) || + ISD::isBuildVectorAllZeros(LHS.getNode())) { + + // If LHS is the zero value, swap operands and CondCode. + if (ISD::isBuildVectorAllZeros(LHS.getNode())) { + CC = getSetCCSwappedOperands(CC); + Op0 = RHS; + } else + Op0 = LHS; + + // Using constant type to differentiate integer and FP compares with zero. + Op1 = DAG.getConstantFP(0, MVT::f32); + Opcode = AArch64ISD::NEON_CMPZ; + } else { + // Attempt to use Vector Floating Point Compare Mask instruction. + Op0 = LHS; + Op1 = RHS; + Opcode = AArch64ISD::NEON_CMP; + } + + SDValue NeonCmpAlt; + // Some register compares have to be implemented with swapped CC and operands, + // e.g.: OLT implemented as OGT with swapped operands. + bool SwapIfRegArgs = false; + + // Ensure valid CondCode for FP Compare Mask against Zero instruction: + // EQ, GE, GT, LE, LT. + // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. + switch (CC) { + default: + llvm_unreachable("Illegal FP comparison"); + case ISD::SETUNE: + case ISD::SETNE: + Invert = true; // Fallthrough + case ISD::SETOEQ: + case ISD::SETEQ: + CC = ISD::SETEQ; + break; + case ISD::SETOLT: + case ISD::SETLT: + CC = ISD::SETLT; + SwapIfRegArgs = true; + break; + case ISD::SETOGT: + case ISD::SETGT: + CC = ISD::SETGT; + break; + case ISD::SETOLE: + case ISD::SETLE: + CC = ISD::SETLE; + SwapIfRegArgs = true; + break; + case ISD::SETOGE: + case ISD::SETGE: + CC = ISD::SETGE; + break; + case ISD::SETUGE: + Invert = true; + CC = ISD::SETLT; + SwapIfRegArgs = true; + break; + case ISD::SETULE: + Invert = true; + CC = ISD::SETGT; + break; + case ISD::SETUGT: + Invert = true; + CC = ISD::SETLE; + SwapIfRegArgs = true; + break; + case ISD::SETULT: + Invert = true; + CC = ISD::SETGE; + break; + case ISD::SETUEQ: + Invert = true; // Fallthrough + case ISD::SETONE: + // Expand this to (OGT |OLT). + NeonCmpAlt = + DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); + CC = ISD::SETLT; + SwapIfRegArgs = true; + break; + case ISD::SETUO: + Invert = true; // Fallthrough + case ISD::SETO: + // Expand this to (OGE | OLT). + NeonCmpAlt = + DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); + CC = ISD::SETLT; + SwapIfRegArgs = true; + break; + } + + if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { + CC = getSetCCSwappedOperands(CC); + std::swap(Op0, Op1); + } + + // Generate FP Compare Mask instr or FP Compare Mask against Zero instr + SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); + + if (NeonCmpAlt.getNode()) + NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); + + if (Invert) + NeonCmp = DAG.getNOT(DL, NeonCmp, VT); + + return NeonCmp; +} + // (SETCC lhs, rhs, condcode) SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { @@ -2242,6 +2503,9 @@ AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); EVT VT = Op.getValueType(); + if (VT.isVector()) + return LowerVectorSETCC(Op, DAG); + if (LHS.getValueType() == MVT::f128) { // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS // for the rest of the function (some i32 or i64 values). @@ -2398,11 +2662,155 @@ AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SETCC: return LowerSETCC(Op, DAG); case ISD::VACOPY: return LowerVACOPY(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); + case ISD::BUILD_VECTOR: + return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); } return SDValue(); } +/// Check if the specified splat value corresponds to a valid vector constant +/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If +/// so, return the encoded 8-bit immediate and the OpCmode instruction fields +/// values. +static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, + unsigned SplatBitSize, SelectionDAG &DAG, + bool is128Bits, NeonModImmType type, EVT &VT, + unsigned &Imm, unsigned &OpCmode) { + switch (SplatBitSize) { + default: + llvm_unreachable("unexpected size for isNeonModifiedImm"); + case 8: { + if (type != Neon_Mov_Imm) + return false; + assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); + // Neon movi per byte: Op=0, Cmode=1110. + OpCmode = 0xe; + Imm = SplatBits; + VT = is128Bits ? MVT::v16i8 : MVT::v8i8; + break; + } + case 16: { + // Neon move inst per halfword + VT = is128Bits ? MVT::v8i16 : MVT::v4i16; + if ((SplatBits & ~0xff) == 0) { + // Value = 0x00nn is 0x00nn LSL 0 + // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 + // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 + // Op=x, Cmode=100y + Imm = SplatBits; + OpCmode = 0x8; + break; + } + if ((SplatBits & ~0xff00) == 0) { + // Value = 0xnn00 is 0x00nn LSL 8 + // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 + // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 + // Op=x, Cmode=101x + Imm = SplatBits >> 8; + OpCmode = 0xa; + break; + } + // can't handle any other + return false; + } + + case 32: { + // First the LSL variants (MSL is unusable by some interested instructions). + + // Neon move instr per word, shift zeros + VT = is128Bits ? MVT::v4i32 : MVT::v2i32; + if ((SplatBits & ~0xff) == 0) { + // Value = 0x000000nn is 0x000000nn LSL 0 + // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 + // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 + // Op=x, Cmode=000x + Imm = SplatBits; + OpCmode = 0; + break; + } + if ((SplatBits & ~0xff00) == 0) { + // Value = 0x0000nn00 is 0x000000nn LSL 8 + // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 + // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 + // Op=x, Cmode=001x + Imm = SplatBits >> 8; + OpCmode = 0x2; + break; + } + if ((SplatBits & ~0xff0000) == 0) { + // Value = 0x00nn0000 is 0x000000nn LSL 16 + // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 + // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 + // Op=x, Cmode=010x + Imm = SplatBits >> 16; + OpCmode = 0x4; + break; + } + if ((SplatBits & ~0xff000000) == 0) { + // Value = 0xnn000000 is 0x000000nn LSL 24 + // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 + // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 + // Op=x, Cmode=011x + Imm = SplatBits >> 24; + OpCmode = 0x6; + break; + } + + // Now the MSL immediates. + + // Neon move instr per word, shift ones + if ((SplatBits & ~0xffff) == 0 && + ((SplatBits | SplatUndef) & 0xff) == 0xff) { + // Value = 0x0000nnff is 0x000000nn MSL 8 + // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 + // Op=x, Cmode=1100 + Imm = SplatBits >> 8; + OpCmode = 0xc; + break; + } + if ((SplatBits & ~0xffffff) == 0 && + ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { + // Value = 0x00nnffff is 0x000000nn MSL 16 + // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 + // Op=x, Cmode=1101 + Imm = SplatBits >> 16; + OpCmode = 0xd; + break; + } + // can't handle any other + return false; + } + + case 64: { + if (type != Neon_Mov_Imm) + return false; + // Neon move instr bytemask, where each byte is either 0x00 or 0xff. + // movi Op=1, Cmode=1110. + OpCmode = 0x1e; + uint64_t BitMask = 0xff; + uint64_t Val = 0; + unsigned ImmMask = 1; + Imm = 0; + for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { + if (((SplatBits | SplatUndef) & BitMask) == BitMask) { + Val |= BitMask; + Imm |= ImmMask; + } else if ((SplatBits & BitMask) != 0) { + return false; + } + BitMask <<= 8; + ImmMask <<= 1; + } + SplatBits = Val; + VT = is128Bits ? MVT::v2i64 : MVT::v1i64; + break; + } + } + + return true; +} + static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { @@ -2728,6 +3136,7 @@ static SDValue PerformORCombine(SDNode *N, const AArch64Subtarget *Subtarget) { SelectionDAG &DAG = DCI.DAG; + SDLoc DL(N); EVT VT = N->getValueType(0); if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) @@ -2748,6 +3157,44 @@ static SDValue PerformORCombine(SDNode *N, if (Res.getNode()) return Res; + if (!Subtarget->hasNEON()) + return SDValue(); + + // Attempt to use vector immediate-form BSL + // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. + + SDValue N0 = N->getOperand(0); + if (N0.getOpcode() != ISD::AND) + return SDValue(); + + SDValue N1 = N->getOperand(1); + if (N1.getOpcode() != ISD::AND) + return SDValue(); + + if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { + APInt SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); + APInt SplatBits0; + if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, + HasAnyUndefs) && + !HasAnyUndefs) { + BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); + APInt SplatBits1; + if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, + HasAnyUndefs) && + !HasAnyUndefs && SplatBits0 == ~SplatBits1) { + // Canonicalize the vector type to make instruction selection simpler. + EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8; + SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT, + N0->getOperand(1), N0->getOperand(0), + N1->getOperand(0)); + return DAG.getNode(ISD::BITCAST, DL, VT, Result); + } + } + } + return SDValue(); } @@ -2801,6 +3248,97 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } +bool +AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + VT = VT.getScalarType(); + + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f16: + case MVT::f32: + case MVT::f64: + return true; + case MVT::f128: + return false; + default: + break; + } + + return false; +} + +// If this is a case we can't handle, return null and let the default +// expansion code take care of it. +SDValue +AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, + const AArch64Subtarget *ST) const { + + BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); + SDLoc DL(Op); + EVT VT = Op.getValueType(); + + APInt SplatBits, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + + // Note we favor lowering MOVI over MVNI. + // This has implications on the definition of patterns in TableGen to select + // BIC immediate instructions but not ORR immediate instructions. + // If this lowering order is changed, TableGen patterns for BIC immediate and + // ORR immediate instructions have to be updated. + if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { + if (SplatBitSize <= 64) { + // First attempt to use vector immediate-form MOVI + EVT NeonMovVT; + unsigned Imm = 0; + unsigned OpCmode = 0; + + if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), + SplatBitSize, DAG, VT.is128BitVector(), + Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { + SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); + SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); + + if (ImmVal.getNode() && OpCmodeVal.getNode()) { + SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, + ImmVal, OpCmodeVal); + return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); + } + } + + // Then attempt to use vector immediate-form MVNI + uint64_t NegatedImm = (~SplatBits).getZExtValue(); + if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, + DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, + Imm, OpCmode)) { + SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); + SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); + if (ImmVal.getNode() && OpCmodeVal.getNode()) { + SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, + ImmVal, OpCmodeVal); + return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); + } + } + + // Attempt to use vector immediate-form FMOV + if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || + (VT == MVT::v2f64 && SplatBitSize == 64)) { + APFloat RealVal( + SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, + SplatBits); + uint32_t ImmVal; + if (A64Imms::isFPImm(RealVal, ImmVal)) { + SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); + return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); + } + } + } + } + return SDValue(); +} + AArch64TargetLowering::ConstraintType AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { if (Constraint.size() == 1) { @@ -2932,7 +3470,7 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::pair<unsigned, const TargetRegisterClass*> AArch64TargetLowering::getRegForInlineAsmConstraint( const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index edef68b..67a908e 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -111,7 +111,28 @@ namespace AArch64ISD { // created using the small memory model style: i.e. adrp/add or // adrp/mem-op. This exists to prevent bare TargetAddresses which may never // get selected. - WrapperSmall + WrapperSmall, + + // Vector bitwise select + NEON_BSL, + + // Vector move immediate + NEON_MOVIMM, + + // Vector Move Inverted Immediate + NEON_MVNIMM, + + // Vector FP move immediate + NEON_FMOVIMM, + + // Vector compare + NEON_CMP, + + // Vector compare zero + NEON_CMPZ, + + // Vector compare bitwise test + NEON_TST }; } @@ -148,9 +169,11 @@ public: SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; - void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, - SDLoc DL, SDValue &Chain) const; + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, + const AArch64Subtarget *ST) const; + void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL, + SDValue &Chain) const; /// IsEligibleForTailCallOptimization - Check whether the call is eligible /// for tail call optimization. Targets which want to do tail call @@ -229,11 +252,11 @@ public: virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; - /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than - /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to - /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd - /// is expanded to mul + add. - virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } + /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster + /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be + /// expanded to FMAs when this method returns true, otherwise fmuladd is + /// expanded to fmul + fadd. + virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const; ConstraintType getConstraintType(const std::string &Constraint) const; @@ -245,7 +268,7 @@ public: SelectionDAG &DAG) const; std::pair<unsigned, const TargetRegisterClass*> - getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; + getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; private: const InstrItineraryData *Itins; @@ -253,6 +276,10 @@ private: return &getTargetMachine().getSubtarget<AArch64Subtarget>(); } }; +enum NeonModImmType { + Neon_Mov_Imm, + Neon_Mvn_Imm +}; } // namespace llvm #endif // LLVM_TARGET_AARCH64_ISELLOWERING_H diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td index 9dd122f..09451fd 100644 --- a/lib/Target/AArch64/AArch64InstrFormats.td +++ b/lib/Target/AArch64/AArch64InstrFormats.td @@ -959,3 +959,96 @@ class A64I_Breg<bits<4> opc, bits<5> op2, bits<6> op3, bits<5> op4, let Inst{4-0} = op4; } + +//===----------------------------------------------------------------------===// +// +// Neon Instruction Format Definitions. +// + +let Predicates = [HasNEON] in { + +class NeonInstAlias<string Asm, dag Result, bit Emit = 0b1> + : InstAlias<Asm, Result, Emit> { +} + +// Format AdvSIMD 3 vector registers with same vector type +class NeonI_3VSame<bit q, bit u, bits<2> size, bits<5> opcode, + dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRdnm<outs, ins, asmstr, patterns, itin> +{ + let Inst{31} = 0b0; + let Inst{30} = q; + let Inst{29} = u; + let Inst{28-24} = 0b01110; + let Inst{23-22} = size; + let Inst{21} = 0b1; + // Inherit Rm in 20-16 + let Inst{15-11} = opcode; + let Inst{10} = 0b1; + // Inherit Rn in 9-5 + // Inherit Rd in 4-0 +} + +// Format AdvSIMD 1 vector register with modified immediate +class NeonI_1VModImm<bit q, bit op, + dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRd<outs,ins, asmstr, patterns, itin> +{ + bits<8> Imm; + bits<4> cmode; + let Inst{31} = 0b0; + let Inst{30} = q; + let Inst{29} = op; + let Inst{28-19} = 0b0111100000; + let Inst{15-12} = cmode; + let Inst{11} = 0b0; // o2 + let Inst{10} = 1; + // Inherit Rd in 4-0 + let Inst{18-16} = Imm{7-5}; // imm a:b:c + let Inst{9-5} = Imm{4-0}; // imm d:e:f:g:h +} + +// Format AdvSIMD 3 scalar registers with same type + +class NeonI_Scalar3Same<bit u, bits<2> size, bits<5> opcode, + dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRdnm<outs, ins, asmstr, patterns, itin> +{ + let Inst{31} = 0b0; + let Inst{30} = 0b1; + let Inst{29} = u; + let Inst{28-24} = 0b11110; + let Inst{23-22} = size; + let Inst{21} = 0b1; + // Inherit Rm in 20-16 + let Inst{15-11} = opcode; + let Inst{10} = 0b1; + // Inherit Rn in 9-5 + // Inherit Rd in 4-0 +} + + +// Format AdvSIMD 2 vector registers miscellaneous +class NeonI_2VMisc<bit q, bit u, bits<2> size, bits<5> opcode, + dag outs, dag ins, string asmstr, + list<dag> patterns, InstrItinClass itin> + : A64InstRdn<outs, ins, asmstr, patterns, itin> +{ + let Inst{31} = 0b0; + let Inst{30} = q; + let Inst{29} = u; + let Inst{28-24} = 0b01110; + let Inst{23-22} = size; + let Inst{21-17} = 0b10000; + let Inst{16-12} = opcode; + let Inst{11-10} = 0b10; + + // Inherit Rn in 9-5 + // Inherit Rd in 4-0 +} + +} + diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index f90bcef..d8f45eb 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -116,17 +116,6 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB, .addImm(0); } -MachineInstr * -AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, - uint64_t Offset, const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE)) - .addFrameIndex(FrameIx).addImm(0) - .addImm(Offset) - .addMetadata(MDPtr); - return &*MIB; -} - /// Does the Opcode represent a conditional branch that we can remove and re-add /// at the end of a basic block? static bool isCondBranch(unsigned Opc) { diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h index 22a2ab4..620ecc9 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.h +++ b/lib/Target/AArch64/AArch64InstrInfo.h @@ -43,10 +43,6 @@ public: unsigned DestReg, unsigned SrcReg, bool KillSrc) const; - MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, - uint64_t Offset, const MDNode *MDPtr, - DebugLoc DL) const; - void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index d2cfc7d..07289b0 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -11,6 +11,17 @@ // //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// ARM Instruction Predicate Definitions. +// +def HasNEON : Predicate<"Subtarget->hasNEON()">, + AssemblerPredicate<"FeatureNEON", "neon">; +def HasCrypto : Predicate<"Subtarget->hasCrypto()">, + AssemblerPredicate<"FeatureCrypto","crypto">; + +// Use fused MAC if more precision in FP computation is allowed. +def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion ==" + " FPOpFusion::Fast)">; include "AArch64InstrFormats.td" //===----------------------------------------------------------------------===// @@ -2173,6 +2184,29 @@ def FMSUBdddd : A64I_fpdp3Impl<"fmsub", FPR64, f64, 0b01, 0b0, 0b1, fmsub>; def FNMADDdddd : A64I_fpdp3Impl<"fnmadd", FPR64, f64, 0b01, 0b1, 0b0, fnmadd>; def FNMSUBdddd : A64I_fpdp3Impl<"fnmsub", FPR64, f64, 0b01, 0b1, 0b1, fnmsub>; +// Extra patterns for when we're allowed to optimise separate multiplication and +// addition. +let Predicates = [UseFusedMAC] in { +def : Pat<(fadd FPR32:$Ra, (fmul FPR32:$Rn, FPR32:$Rm)), + (FMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; +def : Pat<(fsub FPR32:$Ra, (fmul FPR32:$Rn, FPR32:$Rm)), + (FMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; +def : Pat<(fsub (fmul FPR32:$Rn, FPR32:$Rm), FPR32:$Ra), + (FNMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; +def : Pat<(fsub (fneg FPR32:$Ra), (fmul FPR32:$Rn, FPR32:$Rm)), + (FNMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; + +def : Pat<(fadd FPR64:$Ra, (fmul FPR64:$Rn, FPR64:$Rm)), + (FMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; +def : Pat<(fsub FPR64:$Ra, (fmul FPR64:$Rn, FPR64:$Rm)), + (FMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; +def : Pat<(fsub (fmul FPR64:$Rn, FPR64:$Rm), FPR64:$Ra), + (FNMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; +def : Pat<(fsub (fneg FPR64:$Ra), (fmul FPR64:$Rn, FPR64:$Rm)), + (FNMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; +} + + //===----------------------------------------------------------------------===// // Floating-point <-> fixed-point conversion instructions //===----------------------------------------------------------------------===// @@ -3974,14 +4008,17 @@ def : movalias<MOVZxii, GPR64, movz64_movimm>; def : movalias<MOVNwii, GPR32, movn32_movimm>; def : movalias<MOVNxii, GPR64, movn64_movimm>; -def movw_addressref : ComplexPattern<i64, 2, "SelectMOVWAddressRef">; +def movw_addressref_g0 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<0>">; +def movw_addressref_g1 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<1>">; +def movw_addressref_g2 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<2>">; +def movw_addressref_g3 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<3>">; -def : Pat<(A64WrapperLarge movw_addressref:$G3, movw_addressref:$G2, - movw_addressref:$G1, movw_addressref:$G0), - (MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref:$G3), - movw_addressref:$G2), - movw_addressref:$G1), - movw_addressref:$G0)>; +def : Pat<(A64WrapperLarge movw_addressref_g3:$G3, movw_addressref_g2:$G2, + movw_addressref_g1:$G1, movw_addressref_g0:$G0), + (MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref_g3:$G3), + movw_addressref_g2:$G2), + movw_addressref_g1:$G1), + movw_addressref_g0:$G0)>; //===----------------------------------------------------------------------===// // PC-relative addressing instructions @@ -5120,3 +5157,9 @@ defm : regoff_pats<"Xm", (add i64:$Rn, i64:$Rm), defm : regoff_pats<"Xm", (add i64:$Rn, (shl i64:$Rm, SHIFT)), (i64 i64:$Rn), (i64 i64:$Rm), (i64 3)>; + +//===----------------------------------------------------------------------===// +// Advanced SIMD (NEON) Support +// + +include "AArch64InstrNEON.td"
\ No newline at end of file diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td new file mode 100644 index 0000000..98b9e3e --- /dev/null +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -0,0 +1,1634 @@ +//===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the AArch64 NEON instruction set. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// NEON-specific DAG Nodes. +//===----------------------------------------------------------------------===// +def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3, + [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, + SDTCisSameAs<0, 3>]>>; + +// (outs Result), (ins Imm, OpCmode) +def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>; + +def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>; + +def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>; + +// (outs Result), (ins Imm) +def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1, + [SDTCisVec<0>, SDTCisVT<1, i32>]>>; + +// (outs Result), (ins LHS, RHS, CondCode) +def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3, + [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>; + +// (outs Result), (ins LHS, 0/0.0 constant, CondCode) +def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3, + [SDTCisVec<0>, SDTCisVec<1>]>>; + +// (outs Result), (ins LHS, RHS) +def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2, + [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>; + +//===----------------------------------------------------------------------===// +// Multiclasses +//===----------------------------------------------------------------------===// + +multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode, + string asmop, SDPatternOperator opnode8B, + SDPatternOperator opnode16B, + bit Commutable = 0> +{ + let isCommutable = Commutable in { + def _8B : NeonI_3VSame<0b0, u, size, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm), + asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b", + [(set (v8i8 VPR64:$Rd), + (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))], + NoItinerary>; + + def _16B : NeonI_3VSame<0b1, u, size, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b", + [(set (v16i8 VPR128:$Rd), + (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))], + NoItinerary>; + } + +} + +multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode, + string asmop, SDPatternOperator opnode, + bit Commutable = 0> +{ + let isCommutable = Commutable in { + def _4H : NeonI_3VSame<0b0, u, 0b01, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm), + asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h", + [(set (v4i16 VPR64:$Rd), + (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))], + NoItinerary>; + + def _8H : NeonI_3VSame<0b1, u, 0b01, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h", + [(set (v8i16 VPR128:$Rd), + (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))], + NoItinerary>; + + def _2S : NeonI_3VSame<0b0, u, 0b10, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm), + asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s", + [(set (v2i32 VPR64:$Rd), + (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))], + NoItinerary>; + + def _4S : NeonI_3VSame<0b1, u, 0b10, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s", + [(set (v4i32 VPR128:$Rd), + (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))], + NoItinerary>; + } +} +multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode, + string asmop, SDPatternOperator opnode, + bit Commutable = 0> + : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable> +{ + let isCommutable = Commutable in { + def _8B : NeonI_3VSame<0b0, u, 0b00, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm), + asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b", + [(set (v8i8 VPR64:$Rd), + (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))], + NoItinerary>; + + def _16B : NeonI_3VSame<0b1, u, 0b00, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b", + [(set (v16i8 VPR128:$Rd), + (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))], + NoItinerary>; + } +} + +multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode, + string asmop, SDPatternOperator opnode, + bit Commutable = 0> + : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable> +{ + let isCommutable = Commutable in { + def _2D : NeonI_3VSame<0b1, u, 0b11, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d", + [(set (v2i64 VPR128:$Rd), + (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))], + NoItinerary>; + } +} + +// Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types, +// but Result types can be integer or floating point types. +multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode, + string asmop, SDPatternOperator opnode2S, + SDPatternOperator opnode4S, + SDPatternOperator opnode2D, + ValueType ResTy2S, ValueType ResTy4S, + ValueType ResTy2D, bit Commutable = 0> +{ + let isCommutable = Commutable in { + def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm), + asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s", + [(set (ResTy2S VPR64:$Rd), + (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))], + NoItinerary>; + + def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s", + [(set (ResTy4S VPR128:$Rd), + (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))], + NoItinerary>; + + def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm), + asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d", + [(set (ResTy2D VPR128:$Rd), + (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))], + NoItinerary>; + } +} + +//===----------------------------------------------------------------------===// +// Instruction Definitions +//===----------------------------------------------------------------------===// + +// Vector Arithmetic Instructions + +// Vector Add (Integer and Floating-Point) + +defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>; +defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd, + v2f32, v4f32, v2f64, 1>; + +// Vector Sub (Integer and Floating-Point) + +defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>; +defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub, + v2f32, v4f32, v2f64, 0>; + +// Vector Multiply (Integer and Floating-Point) + +defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>; +defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul, + v2f32, v4f32, v2f64, 1>; + +// Vector Multiply (Polynomial) + +defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul", + int_arm_neon_vmulp, int_arm_neon_vmulp, 1>; + +// Vector Multiply-accumulate and Multiply-subtract (Integer) + +// class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and +// two operands constraints. +class NeonI_3VSame_Constraint_impl<string asmop, string asmlane, + RegisterClass VPRC, ValueType OpTy, bit q, bit u, bits<2> size, bits<5> opcode, + SDPatternOperator opnode> + : NeonI_3VSame<q, u, size, opcode, + (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm), + asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane, + [(set (OpTy VPRC:$Rd), + (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))], + NoItinerary> { + let Constraints = "$src = $Rd"; +} + +def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (add node:$Ra, (mul node:$Rn, node:$Rm))>; + +def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (sub node:$Ra, (mul node:$Rn, node:$Rm))>; + + +def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8, + 0b0, 0b0, 0b00, 0b10010, Neon_mla>; +def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8, + 0b1, 0b0, 0b00, 0b10010, Neon_mla>; +def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16, + 0b0, 0b0, 0b01, 0b10010, Neon_mla>; +def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16, + 0b1, 0b0, 0b01, 0b10010, Neon_mla>; +def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32, + 0b0, 0b0, 0b10, 0b10010, Neon_mla>; +def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32, + 0b1, 0b0, 0b10, 0b10010, Neon_mla>; + +def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8, + 0b0, 0b1, 0b00, 0b10010, Neon_mls>; +def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8, + 0b1, 0b1, 0b00, 0b10010, Neon_mls>; +def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16, + 0b0, 0b1, 0b01, 0b10010, Neon_mls>; +def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16, + 0b1, 0b1, 0b01, 0b10010, Neon_mls>; +def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32, + 0b0, 0b1, 0b10, 0b10010, Neon_mls>; +def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32, + 0b1, 0b1, 0b10, 0b10010, Neon_mls>; + +// Vector Multiply-accumulate and Multiply-subtract (Floating Point) + +def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>; + +def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>; + +let Predicates = [HasNEON, UseFusedMAC] in { +def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32, + 0b0, 0b0, 0b00, 0b11001, Neon_fmla>; +def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32, + 0b1, 0b0, 0b00, 0b11001, Neon_fmla>; +def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64, + 0b1, 0b0, 0b01, 0b11001, Neon_fmla>; + +def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32, + 0b0, 0b0, 0b10, 0b11001, Neon_fmls>; +def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32, + 0b1, 0b0, 0b10, 0b11001, Neon_fmls>; +def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64, + 0b1, 0b0, 0b11, 0b11001, Neon_fmls>; +} + +// We're also allowed to match the fma instruction regardless of compile +// options. +def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)), + (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>; +def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)), + (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>; +def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)), + (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>; + +def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)), + (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>; +def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)), + (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>; +def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)), + (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>; + +// Vector Divide (Floating-Point) + +defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv, + v2f32, v4f32, v2f64, 0>; + +// Vector Bitwise Operations + +// Vector Bitwise AND + +defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>; + +// Vector Bitwise Exclusive OR + +defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>; + +// Vector Bitwise OR + +defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>; + +// ORR disassembled as MOV if Vn==Vm + +// Vector Move - register +// Alias for ORR if Vn=Vm and it is the preferred syntax +def : NeonInstAlias<"mov $Rd.8b, $Rn.8b", + (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>; +def : NeonInstAlias<"mov $Rd.16b, $Rn.16b", + (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>; + +def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{ + ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0)); + ConstantSDNode *OpCmodeConstVal = cast<ConstantSDNode>(N->getOperand(1)); + unsigned EltBits; + uint64_t EltVal = A64Imms::decodeNeonModImm(ImmConstVal->getZExtValue(), + OpCmodeConstVal->getZExtValue(), EltBits); + return (EltBits == 8 && EltVal == 0xff); +}]>; + + +def Neon_not8B : PatFrag<(ops node:$in), + (xor node:$in, (bitconvert (v8i8 Neon_immAllOnes)))>; +def Neon_not16B : PatFrag<(ops node:$in), + (xor node:$in, (bitconvert (v16i8 Neon_immAllOnes)))>; + +def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm), + (or node:$Rn, (Neon_not8B node:$Rm))>; + +def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm), + (or node:$Rn, (Neon_not16B node:$Rm))>; + +def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm), + (and node:$Rn, (Neon_not8B node:$Rm))>; + +def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm), + (and node:$Rn, (Neon_not16B node:$Rm))>; + + +// Vector Bitwise OR NOT - register + +defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn", + Neon_orn8B, Neon_orn16B, 0>; + +// Vector Bitwise Bit Clear (AND NOT) - register + +defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic", + Neon_bic8B, Neon_bic16B, 0>; + +multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B, + SDPatternOperator opnode16B, + Instruction INST8B, + Instruction INST16B> { + def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$Rn, VPR128:$Rm)>; +} + +// Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN +defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>; +defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>; +defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>; +defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>; +defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>; + +// Vector Bitwise Select +def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8, + 0b0, 0b1, 0b01, 0b00011, Neon_bsl>; + +def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8, + 0b1, 0b1, 0b01, 0b00011, Neon_bsl>; + +multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode, + Instruction INST8B, + Instruction INST16B> { + // Disassociate type from instruction definition + def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + + // Allow to match BSL instruction pattern with non-constant operand + def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd), + (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))), + (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd), + (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))), + (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd), + (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))), + (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd), + (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))), + (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd), + (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))), + (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd), + (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))), + (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd), + (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))), + (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd), + (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))), + (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>; + + // Allow to match llvm.arm.* intrinsics. + def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src), + (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src), + (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src), + (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src), + (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src), + (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))), + (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>; + def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src), + (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src), + (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src), + (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src), + (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src), + (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; + def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src), + (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))), + (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>; +} + +// Additional patterns for bitwise instruction BSL +defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>; + +def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm), + (Neon_bsl node:$src, node:$Rn, node:$Rm), + [{ (void)N; return false; }]>; + +// Vector Bitwise Insert if True + +def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8, + 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>; +def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8, + 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>; + +// Vector Bitwise Insert if False + +def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8, + 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>; +def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8, + 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>; + +// Vector Absolute Difference and Accumulate (Signed, Unsigned) + +def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>; +def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm), + (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>; + +// Vector Absolute Difference and Accumulate (Unsigned) +def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8, + 0b0, 0b1, 0b00, 0b01111, Neon_uaba>; +def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8, + 0b1, 0b1, 0b00, 0b01111, Neon_uaba>; +def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16, + 0b0, 0b1, 0b01, 0b01111, Neon_uaba>; +def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16, + 0b1, 0b1, 0b01, 0b01111, Neon_uaba>; +def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32, + 0b0, 0b1, 0b10, 0b01111, Neon_uaba>; +def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32, + 0b1, 0b1, 0b10, 0b01111, Neon_uaba>; + +// Vector Absolute Difference and Accumulate (Signed) +def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8, + 0b0, 0b0, 0b00, 0b01111, Neon_saba>; +def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8, + 0b1, 0b0, 0b00, 0b01111, Neon_saba>; +def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16, + 0b0, 0b0, 0b01, 0b01111, Neon_saba>; +def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16, + 0b1, 0b0, 0b01, 0b01111, Neon_saba>; +def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32, + 0b0, 0b0, 0b10, 0b01111, Neon_saba>; +def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32, + 0b1, 0b0, 0b10, 0b01111, Neon_saba>; + + +// Vector Absolute Difference (Signed, Unsigned) +defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>; +defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>; + +// Vector Absolute Difference (Floating Point) +defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd", + int_arm_neon_vabds, int_arm_neon_vabds, + int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>; + +// Vector Reciprocal Step (Floating Point) +defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps", + int_arm_neon_vrecps, int_arm_neon_vrecps, + int_arm_neon_vrecps, + v2f32, v4f32, v2f64, 0>; + +// Vector Reciprocal Square Root Step (Floating Point) +defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", + int_arm_neon_vrsqrts, + int_arm_neon_vrsqrts, + int_arm_neon_vrsqrts, + v2f32, v4f32, v2f64, 0>; + +// Vector Comparisons + +def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs), + (Neon_cmp node:$lhs, node:$rhs, SETEQ)>; +def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs), + (Neon_cmp node:$lhs, node:$rhs, SETUGE)>; +def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs), + (Neon_cmp node:$lhs, node:$rhs, SETGE)>; +def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs), + (Neon_cmp node:$lhs, node:$rhs, SETUGT)>; +def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs), + (Neon_cmp node:$lhs, node:$rhs, SETGT)>; + +// NeonI_compare_aliases class: swaps register operands to implement +// comparison aliases, e.g., CMLE is alias for CMGE with operands reversed. +class NeonI_compare_aliases<string asmop, string asmlane, + Instruction inst, RegisterClass VPRC> + : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane # + ", $Rm" # asmlane, + (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>; + +// Vector Comparisons (Integer) + +// Vector Compare Mask Equal (Integer) +let isCommutable =1 in { +defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>; +} + +// Vector Compare Mask Higher or Same (Unsigned Integer) +defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>; + +// Vector Compare Mask Greater Than or Equal (Integer) +defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>; + +// Vector Compare Mask Higher (Unsigned Integer) +defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>; + +// Vector Compare Mask Greater Than (Integer) +defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>; + +// Vector Compare Mask Bitwise Test (Integer) +defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>; + +// Vector Compare Mask Less or Same (Unsigned Integer) +// CMLS is alias for CMHS with operands reversed. +def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>; +def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>; +def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>; +def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>; +def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>; +def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>; +def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>; + +// Vector Compare Mask Less Than or Equal (Integer) +// CMLE is alias for CMGE with operands reversed. +def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>; +def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>; +def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>; +def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>; +def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>; +def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>; +def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>; + +// Vector Compare Mask Lower (Unsigned Integer) +// CMLO is alias for CMHI with operands reversed. +def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>; +def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>; +def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>; +def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>; +def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>; +def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>; +def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>; + +// Vector Compare Mask Less Than (Integer) +// CMLT is alias for CMGT with operands reversed. +def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>; +def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>; +def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>; +def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>; +def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>; +def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>; +def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>; + + +def neon_uimm0_asmoperand : AsmOperandClass +{ + let Name = "UImm0"; + let PredicateMethod = "isUImm<0>"; + let RenderMethod = "addImmOperands"; +} + +def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> { + let ParserMatchClass = neon_uimm0_asmoperand; + let PrintMethod = "printNeonUImm0Operand"; + +} + +multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC> +{ + def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.8b, $Rn.8b, $Imm", + [(set (v8i8 VPR64:$Rd), + (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.16b, $Rn.16b, $Imm", + [(set (v16i8 VPR128:$Rd), + (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.4h, $Rn.4h, $Imm", + [(set (v4i16 VPR64:$Rd), + (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.8h, $Rn.8h, $Imm", + [(set (v8i16 VPR128:$Rd), + (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.2s, $Rn.2s, $Imm", + [(set (v2i32 VPR64:$Rd), + (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.4s, $Rn.4s, $Imm", + [(set (v4i32 VPR128:$Rd), + (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; + + def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm), + asmop # "\t$Rd.2d, $Rn.2d, $Imm", + [(set (v2i64 VPR128:$Rd), + (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))], + NoItinerary>; +} + +// Vector Compare Mask Equal to Zero (Integer) +defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>; + +// Vector Compare Mask Greater Than or Equal to Zero (Signed Integer) +defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>; + +// Vector Compare Mask Greater Than Zero (Signed Integer) +defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>; + +// Vector Compare Mask Less Than or Equal To Zero (Signed Integer) +defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>; + +// Vector Compare Mask Less Than Zero (Signed Integer) +defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>; + +// Vector Comparisons (Floating Point) + +// Vector Compare Mask Equal (Floating Point) +let isCommutable =1 in { +defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq, + Neon_cmeq, Neon_cmeq, + v2i32, v4i32, v2i64, 0>; +} + +// Vector Compare Mask Greater Than Or Equal (Floating Point) +defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge, + Neon_cmge, Neon_cmge, + v2i32, v4i32, v2i64, 0>; + +// Vector Compare Mask Greater Than (Floating Point) +defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt, + Neon_cmgt, Neon_cmgt, + v2i32, v4i32, v2i64, 0>; + +// Vector Compare Mask Less Than Or Equal (Floating Point) +// FCMLE is alias for FCMGE with operands reversed. +def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>; +def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>; +def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>; + +// Vector Compare Mask Less Than (Floating Point) +// FCMLT is alias for FCMGT with operands reversed. +def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>; +def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>; +def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>; + + +multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode, + string asmop, CondCode CC> +{ + def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode, + (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm), + asmop # "\t$Rd.2s, $Rn.2s, $FPImm", + [(set (v2i32 VPR64:$Rd), + (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))], + NoItinerary>; + + def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm), + asmop # "\t$Rd.4s, $Rn.4s, $FPImm", + [(set (v4i32 VPR128:$Rd), + (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))], + NoItinerary>; + + def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode, + (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm), + asmop # "\t$Rd.2d, $Rn.2d, $FPImm", + [(set (v2i64 VPR128:$Rd), + (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))], + NoItinerary>; +} + +// Vector Compare Mask Equal to Zero (Floating Point) +defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>; + +// Vector Compare Mask Greater Than or Equal to Zero (Floating Point) +defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>; + +// Vector Compare Mask Greater Than Zero (Floating Point) +defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>; + +// Vector Compare Mask Less Than or Equal To Zero (Floating Point) +defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>; + +// Vector Compare Mask Less Than Zero (Floating Point) +defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>; + +// Vector Absolute Comparisons (Floating Point) + +// Vector Absolute Compare Mask Greater Than Or Equal (Floating Point) +defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge", + int_arm_neon_vacged, int_arm_neon_vacgeq, + int_aarch64_neon_vacgeq, + v2i32, v4i32, v2i64, 0>; + +// Vector Absolute Compare Mask Greater Than (Floating Point) +defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt", + int_arm_neon_vacgtd, int_arm_neon_vacgtq, + int_aarch64_neon_vacgtq, + v2i32, v4i32, v2i64, 0>; + +// Vector Absolute Compare Mask Less Than Or Equal (Floating Point) +// FACLE is alias for FACGE with operands reversed. +def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>; +def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>; +def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>; + +// Vector Absolute Compare Mask Less Than (Floating Point) +// FACLT is alias for FACGT with operands reversed. +def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>; +def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>; +def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>; + +// Vector halving add (Integer Signed, Unsigned) +defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd", + int_arm_neon_vhadds, 1>; +defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd", + int_arm_neon_vhaddu, 1>; + +// Vector halving sub (Integer Signed, Unsigned) +defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub", + int_arm_neon_vhsubs, 0>; +defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub", + int_arm_neon_vhsubu, 0>; + +// Vector rouding halving add (Integer Signed, Unsigned) +defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd", + int_arm_neon_vrhadds, 1>; +defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd", + int_arm_neon_vrhaddu, 1>; + +// Vector Saturating add (Integer Signed, Unsigned) +defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd", + int_arm_neon_vqadds, 1>; +defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd", + int_arm_neon_vqaddu, 1>; + +// Vector Saturating sub (Integer Signed, Unsigned) +defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub", + int_arm_neon_vqsubs, 1>; +defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub", + int_arm_neon_vqsubu, 1>; + +// Vector Shift Left (Signed and Unsigned Integer) +defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl", + int_arm_neon_vshifts, 1>; +defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl", + int_arm_neon_vshiftu, 1>; + +// Vector Saturating Shift Left (Signed and Unsigned Integer) +defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl", + int_arm_neon_vqshifts, 1>; +defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl", + int_arm_neon_vqshiftu, 1>; + +// Vector Rouding Shift Left (Signed and Unsigned Integer) +defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl", + int_arm_neon_vrshifts, 1>; +defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl", + int_arm_neon_vrshiftu, 1>; + +// Vector Saturating Rouding Shift Left (Signed and Unsigned Integer) +defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl", + int_arm_neon_vqrshifts, 1>; +defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl", + int_arm_neon_vqrshiftu, 1>; + +// Vector Maximum (Signed and Unsigned Integer) +defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>; +defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>; + +// Vector Minimum (Signed and Unsigned Integer) +defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>; +defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>; + +// Vector Maximum (Floating Point) +defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax", + int_arm_neon_vmaxs, int_arm_neon_vmaxs, + int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>; + +// Vector Minimum (Floating Point) +defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin", + int_arm_neon_vmins, int_arm_neon_vmins, + int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>; + +// Vector maxNum (Floating Point) - prefer a number over a quiet NaN) +defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm", + int_aarch64_neon_vmaxnm, + int_aarch64_neon_vmaxnm, + int_aarch64_neon_vmaxnm, + v2f32, v4f32, v2f64, 1>; + +// Vector minNum (Floating Point) - prefer a number over a quiet NaN) +defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm", + int_aarch64_neon_vminnm, + int_aarch64_neon_vminnm, + int_aarch64_neon_vminnm, + v2f32, v4f32, v2f64, 1>; + +// Vector Maximum Pairwise (Signed and Unsigned Integer) +defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>; +defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>; + +// Vector Minimum Pairwise (Signed and Unsigned Integer) +defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>; +defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>; + +// Vector Maximum Pairwise (Floating Point) +defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp", + int_arm_neon_vpmaxs, int_arm_neon_vpmaxs, + int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>; + +// Vector Minimum Pairwise (Floating Point) +defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp", + int_arm_neon_vpmins, int_arm_neon_vpmins, + int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>; + +// Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN) +defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp", + int_aarch64_neon_vpmaxnm, + int_aarch64_neon_vpmaxnm, + int_aarch64_neon_vpmaxnm, + v2f32, v4f32, v2f64, 1>; + +// Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN) +defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp", + int_aarch64_neon_vpminnm, + int_aarch64_neon_vpminnm, + int_aarch64_neon_vpminnm, + v2f32, v4f32, v2f64, 1>; + +// Vector Addition Pairwise (Integer) +defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>; + +// Vector Addition Pairwise (Floating Point) +defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp", + int_arm_neon_vpadd, + int_arm_neon_vpadd, + int_arm_neon_vpadd, + v2f32, v4f32, v2f64, 1>; + +// Vector Saturating Doubling Multiply High +defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh", + int_arm_neon_vqdmulh, 1>; + +// Vector Saturating Rouding Doubling Multiply High +defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh", + int_arm_neon_vqrdmulh, 1>; + +// Vector Multiply Extended (Floating Point) +defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx", + int_aarch64_neon_vmulx, + int_aarch64_neon_vmulx, + int_aarch64_neon_vmulx, + v2f32, v4f32, v2f64, 1>; + +// Vector Immediate Instructions + +multiclass neon_mov_imm_shift_asmoperands<string PREFIX> +{ + def _asmoperand : AsmOperandClass + { + let Name = "NeonMovImmShift" # PREFIX; + let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands"; + let PredicateMethod = "isNeonMovImmShift" # PREFIX; + } +} + +// Definition of vector immediates shift operands + +// The selectable use-cases extract the shift operation +// information from the OpCmode fields encoded in the immediate. +def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{ + uint64_t OpCmode = N->getZExtValue(); + unsigned ShiftImm; + unsigned ShiftOnesIn; + unsigned HasShift = + A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn); + if (!HasShift) return SDValue(); + return CurDAG->getTargetConstant(ShiftImm, MVT::i32); +}]>; + +// Vector immediates shift operands which accept LSL and MSL +// shift operators with shift value in the range of 0, 8, 16, 24 (LSL), +// or 0, 8 (LSLH) or 8, 16 (MSL). +defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">; +defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">; +// LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24 +defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">; + +multiclass neon_mov_imm_shift_operands<string PREFIX, + string HALF, string ISHALF, code pred> +{ + def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM> + { + let PrintMethod = + "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">"; + let DecoderMethod = + "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">"; + let ParserMatchClass = + !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand"); + } +} + +defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{ + unsigned ShiftImm; + unsigned ShiftOnesIn; + unsigned HasShift = + A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn); + return (HasShift && !ShiftOnesIn); +}]>; + +defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{ + unsigned ShiftImm; + unsigned ShiftOnesIn; + unsigned HasShift = + A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn); + return (HasShift && ShiftOnesIn); +}]>; + +defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{ + unsigned ShiftImm; + unsigned ShiftOnesIn; + unsigned HasShift = + A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn); + return (HasShift && !ShiftOnesIn); +}]>; + +def neon_uimm8_asmoperand : AsmOperandClass +{ + let Name = "UImm8"; + let PredicateMethod = "isUImm<8>"; + let RenderMethod = "addImmOperands"; +} + +def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> { + let ParserMatchClass = neon_uimm8_asmoperand; + let PrintMethod = "printNeonUImm8Operand"; +} + +def neon_uimm64_mask_asmoperand : AsmOperandClass +{ + let Name = "NeonUImm64Mask"; + let PredicateMethod = "isNeonUImm64Mask"; + let RenderMethod = "addNeonUImm64MaskOperands"; +} + +// MCOperand for 64-bit bytemask with each byte having only the +// value 0x00 and 0xff is encoded as an unsigned 8-bit value +def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> { + let ParserMatchClass = neon_uimm64_mask_asmoperand; + let PrintMethod = "printNeonUImm64MaskOperand"; +} + +multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op, + SDPatternOperator opnode> +{ + // shift zeros, per word + def _2S : NeonI_1VModImm<0b0, op, + (outs VPR64:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_LSL_operand:$Simm), + !strconcat(asmop, " $Rd.2s, $Imm$Simm"), + [(set (v2i32 VPR64:$Rd), + (v2i32 (opnode (timm:$Imm), + (neon_mov_imm_LSL_operand:$Simm))))], + NoItinerary> { + bits<2> Simm; + let cmode = {0b0, Simm{1}, Simm{0}, 0b0}; + } + + def _4S : NeonI_1VModImm<0b1, op, + (outs VPR128:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_LSL_operand:$Simm), + !strconcat(asmop, " $Rd.4s, $Imm$Simm"), + [(set (v4i32 VPR128:$Rd), + (v4i32 (opnode (timm:$Imm), + (neon_mov_imm_LSL_operand:$Simm))))], + NoItinerary> { + bits<2> Simm; + let cmode = {0b0, Simm{1}, Simm{0}, 0b0}; + } + + // shift zeros, per halfword + def _4H : NeonI_1VModImm<0b0, op, + (outs VPR64:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm), + !strconcat(asmop, " $Rd.4h, $Imm$Simm"), + [(set (v4i16 VPR64:$Rd), + (v4i16 (opnode (timm:$Imm), + (neon_mov_imm_LSLH_operand:$Simm))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b0, Simm, 0b0}; + } + + def _8H : NeonI_1VModImm<0b1, op, + (outs VPR128:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm), + !strconcat(asmop, " $Rd.8h, $Imm$Simm"), + [(set (v8i16 VPR128:$Rd), + (v8i16 (opnode (timm:$Imm), + (neon_mov_imm_LSLH_operand:$Simm))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b0, Simm, 0b0}; + } +} + +multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op, + SDPatternOperator opnode, + SDPatternOperator neonopnode> +{ + let Constraints = "$src = $Rd" in { + // shift zeros, per word + def _2S : NeonI_1VModImm<0b0, op, + (outs VPR64:$Rd), + (ins VPR64:$src, neon_uimm8:$Imm, + neon_mov_imm_LSL_operand:$Simm), + !strconcat(asmop, " $Rd.2s, $Imm$Simm"), + [(set (v2i32 VPR64:$Rd), + (v2i32 (opnode (v2i32 VPR64:$src), + (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm, + neon_mov_imm_LSL_operand:$Simm)))))))], + NoItinerary> { + bits<2> Simm; + let cmode = {0b0, Simm{1}, Simm{0}, 0b1}; + } + + def _4S : NeonI_1VModImm<0b1, op, + (outs VPR128:$Rd), + (ins VPR128:$src, neon_uimm8:$Imm, + neon_mov_imm_LSL_operand:$Simm), + !strconcat(asmop, " $Rd.4s, $Imm$Simm"), + [(set (v4i32 VPR128:$Rd), + (v4i32 (opnode (v4i32 VPR128:$src), + (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm, + neon_mov_imm_LSL_operand:$Simm)))))))], + NoItinerary> { + bits<2> Simm; + let cmode = {0b0, Simm{1}, Simm{0}, 0b1}; + } + + // shift zeros, per halfword + def _4H : NeonI_1VModImm<0b0, op, + (outs VPR64:$Rd), + (ins VPR64:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm), + !strconcat(asmop, " $Rd.4h, $Imm$Simm"), + [(set (v4i16 VPR64:$Rd), + (v4i16 (opnode (v4i16 VPR64:$src), + (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm, + neon_mov_imm_LSL_operand:$Simm)))))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b0, Simm, 0b1}; + } + + def _8H : NeonI_1VModImm<0b1, op, + (outs VPR128:$Rd), + (ins VPR128:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm), + !strconcat(asmop, " $Rd.8h, $Imm$Simm"), + [(set (v8i16 VPR128:$Rd), + (v8i16 (opnode (v8i16 VPR128:$src), + (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm, + neon_mov_imm_LSL_operand:$Simm)))))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b0, Simm, 0b1}; + } + } +} + +multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op, + SDPatternOperator opnode> +{ + // shift ones, per word + def _2S : NeonI_1VModImm<0b0, op, + (outs VPR64:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_MSL_operand:$Simm), + !strconcat(asmop, " $Rd.2s, $Imm$Simm"), + [(set (v2i32 VPR64:$Rd), + (v2i32 (opnode (timm:$Imm), + (neon_mov_imm_MSL_operand:$Simm))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b1, 0b0, Simm}; + } + + def _4S : NeonI_1VModImm<0b1, op, + (outs VPR128:$Rd), + (ins neon_uimm8:$Imm, + neon_mov_imm_MSL_operand:$Simm), + !strconcat(asmop, " $Rd.4s, $Imm$Simm"), + [(set (v4i32 VPR128:$Rd), + (v4i32 (opnode (timm:$Imm), + (neon_mov_imm_MSL_operand:$Simm))))], + NoItinerary> { + bit Simm; + let cmode = {0b1, 0b1, 0b0, Simm}; + } +} + +// Vector Move Immediate Shifted +let isReMaterializable = 1 in { +defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>; +} + +// Vector Move Inverted Immediate Shifted +let isReMaterializable = 1 in { +defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>; +} + +// Vector Bitwise Bit Clear (AND NOT) - immediate +let isReMaterializable = 1 in { +defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1, + and, Neon_mvni>; +} + +// Vector Bitwise OR - immedidate + +let isReMaterializable = 1 in { +defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0, + or, Neon_movi>; +} + +// Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate +// LowerBUILD_VECTOR favors lowering MOVI over MVNI. +// BIC immediate instructions selection requires additional patterns to +// transform Neon_movi operands into BIC immediate operands + +def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{ + uint64_t OpCmode = N->getZExtValue(); + unsigned ShiftImm; + unsigned ShiftOnesIn; + (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn); + // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1 + // Transform encoded shift amount 0 to 1 and 1 to 0. + return CurDAG->getTargetConstant(!ShiftImm, MVT::i32); +}]>; + +def neon_mov_imm_LSLH_transform_operand + : ImmLeaf<i32, [{ + unsigned ShiftImm; + unsigned ShiftOnesIn; + unsigned HasShift = + A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn); + return (HasShift && !ShiftOnesIn); }], + neon_mov_imm_LSLH_transform_XFORM>; + +// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8) +// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00) +def : Pat<(v4i16 (and VPR64:$src, + (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))), + (BICvi_lsl_4H VPR64:$src, 0, + neon_mov_imm_LSLH_transform_operand:$Simm)>; + +// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8) +// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00) +def : Pat<(v8i16 (and VPR128:$src, + (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))), + (BICvi_lsl_8H VPR128:$src, 0, + neon_mov_imm_LSLH_transform_operand:$Simm)>; + + +multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode, + SDPatternOperator neonopnode, + Instruction INST4H, + Instruction INST8H> { + def : Pat<(v8i8 (opnode VPR64:$src, + (bitconvert(v4i16 (neonopnode timm:$Imm, + neon_mov_imm_LSLH_operand:$Simm))))), + (INST4H VPR64:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm)>; + def : Pat<(v1i64 (opnode VPR64:$src, + (bitconvert(v4i16 (neonopnode timm:$Imm, + neon_mov_imm_LSLH_operand:$Simm))))), + (INST4H VPR64:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm)>; + + def : Pat<(v16i8 (opnode VPR128:$src, + (bitconvert(v8i16 (neonopnode timm:$Imm, + neon_mov_imm_LSLH_operand:$Simm))))), + (INST8H VPR128:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm)>; + def : Pat<(v4i32 (opnode VPR128:$src, + (bitconvert(v8i16 (neonopnode timm:$Imm, + neon_mov_imm_LSLH_operand:$Simm))))), + (INST8H VPR128:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm)>; + def : Pat<(v2i64 (opnode VPR128:$src, + (bitconvert(v8i16 (neonopnode timm:$Imm, + neon_mov_imm_LSLH_operand:$Simm))))), + (INST8H VPR128:$src, neon_uimm8:$Imm, + neon_mov_imm_LSLH_operand:$Simm)>; +} + +// Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate +defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>; + +// Additional patterns for Vector Bitwise OR - immedidate +defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>; + + +// Vector Move Immediate Masked +let isReMaterializable = 1 in { +defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>; +} + +// Vector Move Inverted Immediate Masked +let isReMaterializable = 1 in { +defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>; +} + +class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane, + Instruction inst, RegisterClass VPRC> + : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"), + (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>; + +// Aliases for Vector Move Immediate Shifted +def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>; +def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>; + +// Aliases for Vector Move Inverted Immediate Shifted +def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>; +def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>; + +// Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate +def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>; +def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>; + +// Aliases for Vector Bitwise OR - immedidate +def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>; +def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>; +def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>; + +// Vector Move Immediate - per byte +let isReMaterializable = 1 in { +def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0, + (outs VPR64:$Rd), (ins neon_uimm8:$Imm), + "movi\t$Rd.8b, $Imm", + [(set (v8i8 VPR64:$Rd), + (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))], + NoItinerary> { + let cmode = 0b1110; +} + +def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0, + (outs VPR128:$Rd), (ins neon_uimm8:$Imm), + "movi\t$Rd.16b, $Imm", + [(set (v16i8 VPR128:$Rd), + (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))], + NoItinerary> { + let cmode = 0b1110; +} +} + +// Vector Move Immediate - bytemask, per double word +let isReMaterializable = 1 in { +def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1, + (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm), + "movi\t $Rd.2d, $Imm", + [(set (v2i64 VPR128:$Rd), + (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))], + NoItinerary> { + let cmode = 0b1110; +} +} + +// Vector Move Immediate - bytemask, one doubleword + +let isReMaterializable = 1 in { +def MOVIdi : NeonI_1VModImm<0b0, 0b1, + (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm), + "movi\t $Rd, $Imm", + [(set (f64 FPR64:$Rd), + (f64 (bitconvert + (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))], + NoItinerary> { + let cmode = 0b1110; +} +} + +// Vector Floating Point Move Immediate + +class NeonI_FMOV_impl<string asmlane, RegisterClass VPRC, ValueType OpTy, + Operand immOpType, bit q, bit op> + : NeonI_1VModImm<q, op, + (outs VPRC:$Rd), (ins immOpType:$Imm), + "fmov\t$Rd" # asmlane # ", $Imm", + [(set (OpTy VPRC:$Rd), + (OpTy (Neon_fmovi (timm:$Imm))))], + NoItinerary> { + let cmode = 0b1111; + } + +let isReMaterializable = 1 in { +def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>; +def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>; +def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>; +} + +// Scalar Arithmetic + +class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop> + : NeonI_Scalar3Same<u, 0b11, opcode, + (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm), + !strconcat(asmop, " $Rd, $Rn, $Rm"), + [], + NoItinerary>; + +multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode, + string asmop, bit Commutable = 0> +{ + let isCommutable = Commutable in { + def bbb : NeonI_Scalar3Same<u, 0b00, opcode, + (outs FPR8:$Rd), (ins FPR8:$Rn, FPR8:$Rm), + !strconcat(asmop, " $Rd, $Rn, $Rm"), + [], + NoItinerary>; + def hhh : NeonI_Scalar3Same<u, 0b01, opcode, + (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm), + !strconcat(asmop, " $Rd, $Rn, $Rm"), + [], + NoItinerary>; + def sss : NeonI_Scalar3Same<u, 0b10, opcode, + (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm), + !strconcat(asmop, " $Rd, $Rn, $Rm"), + [], + NoItinerary>; + def ddd : NeonI_Scalar3Same<u, 0b11, opcode, + (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm), + !strconcat(asmop, " $Rd, $Rn, $Rm"), + [], + NoItinerary>; + } +} + +class Neon_Scalar_D_size_patterns<SDPatternOperator opnode, Instruction INSTD> + : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))), + (SUBREG_TO_REG (i64 0), + (INSTD (EXTRACT_SUBREG VPR64:$Rn, sub_64), + (EXTRACT_SUBREG VPR64:$Rm, sub_64)), + sub_64)>; + + +// Scalar Integer Add +let isCommutable = 1 in { +def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">; +} + +// Scalar Integer Sub +def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">; + +// Pattern for Scalar Integer Add and Sub with D register +def : Neon_Scalar_D_size_patterns<add, ADDddd>; +def : Neon_Scalar_D_size_patterns<sub, SUBddd>; + +// Scalar Integer Saturating Add (Signed, Unsigned) +defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>; +defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>; + +// Scalar Integer Saturating Sub (Signed, Unsigned) +defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>; +defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>; + +// Patterns for Scalar Integer Saturating Add, Sub with D register only +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>; + +// Scalar Integer Shift Left (Signed, Unsigned) +def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">; +def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">; + +// Scalar Integer Saturating Shift Left (Signed, Unsigned) +defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>; +defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>; + +// Scalar Integer Rouding Shift Left (Signed, Unsigned) +def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">; +def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">; + +// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned) +defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>; +defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>; + +// Patterns for Scalar Integer Shift Lef, Saturating Shift Left, +// Rounding Shift Left, Rounding Saturating Shift Left with D register only +def : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>; +def : Neon_Scalar_D_size_patterns<shl, SSHLddd>; +def : Neon_Scalar_D_size_patterns<shl, USHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>; +def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>; + + +//===----------------------------------------------------------------------===// +// Non-Instruction Patterns +//===----------------------------------------------------------------------===// + +// 64-bit vector bitcasts... + +def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>; +def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>; +def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>; +def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>; + +def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>; +def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>; +def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>; +def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>; + +def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>; +def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>; +def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>; +def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>; + +def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>; +def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>; +def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>; +def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>; + +def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>; +def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>; +def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>; +def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>; + +// ..and 128-bit vector bitcasts... + +def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>; +def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>; +def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>; +def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>; +def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>; + +def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>; +def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>; +def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>; +def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>; +def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>; + +def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>; +def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>; +def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>; +def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>; +def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>; + +def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>; +def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>; +def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>; +def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>; +def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>; + +def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>; +def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>; +def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>; +def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>; +def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>; + +def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>; +def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>; +def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>; +def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>; +def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>; + + +// ...and scalar bitcasts... + +def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))), + (f64 (EXTRACT_SUBREG (v8i8 VPR64:$src), sub_64))>; +def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))), + (f64 (EXTRACT_SUBREG (v4i16 VPR64:$src), sub_64))>; +def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))), + (f64 (EXTRACT_SUBREG (v2i32 VPR64:$src), sub_64))>; +def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))), + (f64 (EXTRACT_SUBREG (v2f32 VPR64:$src), sub_64))>; +def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))), + (f64 (EXTRACT_SUBREG (v1i64 VPR64:$src), sub_64))>; +def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v16i8 VPR128:$src), sub_alias))>; +def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v8i16 VPR128:$src), sub_alias))>; +def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v4i32 VPR128:$src), sub_alias))>; +def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v2i64 VPR128:$src), sub_alias))>; +def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v4f32 VPR128:$src), sub_alias))>; +def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), + (f128 (EXTRACT_SUBREG (v2f64 VPR128:$src), sub_alias))>; + +def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), + (v8i8 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>; +def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), + (v4i16 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>; +def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), + (v2i32 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>; +def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), + (v2f32 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>; +def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), + (v1i64 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>; +def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), + (v16i8 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; +def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), + (v8i16 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; +def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), + (v4i32 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; +def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), + (v2i64 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; +def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), + (v4f32 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; +def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), + (v2f64 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src), + sub_alias))>; diff --git a/lib/Target/AArch64/AArch64MCInstLower.cpp b/lib/Target/AArch64/AArch64MCInstLower.cpp index 3d22330..7ce5ce3 100644 --- a/lib/Target/AArch64/AArch64MCInstLower.cpp +++ b/lib/Target/AArch64/AArch64MCInstLower.cpp @@ -109,6 +109,11 @@ bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO, case MachineOperand::MO_Immediate: MCOp = MCOperand::CreateImm(MO.getImm()); break; + case MachineOperand::MO_FPImmediate: { + assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported"); + MCOp = MCOperand::CreateFPImm(0.0); + break; + } case MachineOperand::MO_BlockAddress: MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress())); break; diff --git a/lib/Target/AArch64/AArch64RegisterInfo.td b/lib/Target/AArch64/AArch64RegisterInfo.td index cc2bb61..b3a81b1 100644 --- a/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/lib/Target/AArch64/AArch64RegisterInfo.td @@ -185,7 +185,7 @@ foreach Index = 0-31 in { // These two classes contain the same registers, which should be reasonably // sensible for MC and allocation purposes, but allows them to be treated // separately for things like stack spilling. -def VPR64 : RegisterClass<"AArch64", [v2f32, v2i32, v4i16, v8i8], 64, +def VPR64 : RegisterClass<"AArch64", [v2f32, v2i32, v4i16, v8i8, v1i64], 64, (sequence "V%u", 0, 31)>; def VPR128 : RegisterClass<"AArch64", diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp index d17b738..d71bb4e 100644 --- a/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/lib/Target/AArch64/AArch64Subtarget.cpp @@ -26,10 +26,8 @@ using namespace llvm; AArch64Subtarget::AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS) - : AArch64GenSubtargetInfo(TT, CPU, FS) - , HasNEON(true) - , HasCrypto(true) - , TargetTriple(TT) { + : AArch64GenSubtargetInfo(TT, CPU, FS), HasNEON(false), HasCrypto(false), + TargetTriple(TT) { ParseSubtargetFeatures(CPU, FS); } diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h index 2e9205f..35a7c8d 100644 --- a/lib/Target/AArch64/AArch64Subtarget.h +++ b/lib/Target/AArch64/AArch64Subtarget.h @@ -48,6 +48,9 @@ public: bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; } + bool hasNEON() const { return HasNEON; } + + bool hasCrypto() const { return HasCrypto; } }; } // End llvm namespace diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 69bb80a..43e91ac 100644 --- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -454,7 +454,7 @@ public: } bool isMOVN32Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_SABS_G0, AArch64MCExpr::VK_AARCH64_SABS_G1, AArch64MCExpr::VK_AARCH64_DTPREL_G1, @@ -463,13 +463,13 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1, AArch64MCExpr::VK_AARCH64_TPREL_G0, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(32, PermittedModifiers, NumModifiers); } bool isMOVN64Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_SABS_G0, AArch64MCExpr::VK_AARCH64_SABS_G1, AArch64MCExpr::VK_AARCH64_SABS_G2, @@ -481,14 +481,14 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1, AArch64MCExpr::VK_AARCH64_TPREL_G0, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(64, PermittedModifiers, NumModifiers); } bool isMOVZ32Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_ABS_G0, AArch64MCExpr::VK_AARCH64_ABS_G1, AArch64MCExpr::VK_AARCH64_SABS_G0, @@ -499,13 +499,13 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1, AArch64MCExpr::VK_AARCH64_TPREL_G0, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(32, PermittedModifiers, NumModifiers); } bool isMOVZ64Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_ABS_G0, AArch64MCExpr::VK_AARCH64_ABS_G1, AArch64MCExpr::VK_AARCH64_ABS_G2, @@ -521,13 +521,13 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1, AArch64MCExpr::VK_AARCH64_TPREL_G0, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(64, PermittedModifiers, NumModifiers); } bool isMOVK32Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_ABS_G0_NC, AArch64MCExpr::VK_AARCH64_ABS_G1_NC, AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC, @@ -536,13 +536,13 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1_NC, AArch64MCExpr::VK_AARCH64_TPREL_G0_NC, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(32, PermittedModifiers, NumModifiers); } bool isMOVK64Imm() const { - static AArch64MCExpr::VariantKind PermittedModifiers[] = { + static const AArch64MCExpr::VariantKind PermittedModifiers[] = { AArch64MCExpr::VK_AARCH64_ABS_G0_NC, AArch64MCExpr::VK_AARCH64_ABS_G1_NC, AArch64MCExpr::VK_AARCH64_ABS_G2_NC, @@ -553,13 +553,13 @@ public: AArch64MCExpr::VK_AARCH64_TPREL_G1_NC, AArch64MCExpr::VK_AARCH64_TPREL_G0_NC, }; - unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); + const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers); return isMoveWideImm(64, PermittedModifiers, NumModifiers); } bool isMoveWideImm(unsigned RegWidth, - AArch64MCExpr::VariantKind *PermittedModifiers, + const AArch64MCExpr::VariantKind *PermittedModifiers, unsigned NumModifiers) const { if (!isImmWithLSL()) return false; @@ -664,8 +664,42 @@ public: return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4; } - template<int MemSize> bool isSImm7Scaled() const { - if (!isImm()) return false; + bool isNeonMovImmShiftLSL() const { + if (!isShiftOrExtend()) + return false; + + if (ShiftExtend.ShiftType != A64SE::LSL) + return false; + + // Valid shift amount is 0, 8, 16 and 24. + return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24; + } + + bool isNeonMovImmShiftLSLH() const { + if (!isShiftOrExtend()) + return false; + + if (ShiftExtend.ShiftType != A64SE::LSL) + return false; + + // Valid shift amount is 0 and 8. + return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8; + } + + bool isNeonMovImmShiftMSL() const { + if (!isShiftOrExtend()) + return false; + + if (ShiftExtend.ShiftType != A64SE::MSL) + return false; + + // Valid shift amount is 8 and 16. + return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16; + } + + template <int MemSize> bool isSImm7Scaled() const { + if (!isImm()) + return false; const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); if (!CE) return false; @@ -705,10 +739,27 @@ public: return isa<MCConstantExpr>(getImm()); } + bool isNeonUImm64Mask() const { + if (!isImm()) + return false; + + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) + return false; + + uint64_t Value = CE->getValue(); + + // i64 value with each byte being either 0x00 or 0xff. + for (unsigned i = 0; i < 8; ++i, Value >>= 8) + if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) + return false; + return true; + } + static AArch64Operand *CreateImmWithLSL(const MCExpr *Val, unsigned ShiftAmount, bool ImplicitAmount, - SMLoc S, SMLoc E) { + SMLoc S,SMLoc E) { AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E); Op->ImmWithLSL.Val = Val; Op->ImmWithLSL.ShiftAmount = ShiftAmount; @@ -1026,6 +1077,40 @@ public: Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount)); } + // For Vector Immediates shifted imm operands. + void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + + if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24) + llvm_unreachable("Invalid shift amount for vector immediate inst."); + + // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3. + int64_t Imm = ShiftExtend.Amount / 8; + Inst.addOperand(MCOperand::CreateImm(Imm)); + } + + void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + + if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8) + llvm_unreachable("Invalid shift amount for vector immediate inst."); + + // Encode LSLH shift amount 0, 8 as 0, 1. + int64_t Imm = ShiftExtend.Amount / 8; + Inst.addOperand(MCOperand::CreateImm(Imm)); + } + + void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + + if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16) + llvm_unreachable("Invalid shift amount for vector immediate inst."); + + // Encode MSL shift amount 8, 16 as 0, 1. + int64_t Imm = ShiftExtend.Amount / 8 - 1; + Inst.addOperand(MCOperand::CreateImm(Imm)); + } + // For the extend in load-store (register offset) instructions. template<unsigned MemSize> void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const { @@ -1065,6 +1150,20 @@ public: Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount)); } + + void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + + // A bit from each byte in the constant forms the encoded immediate + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + uint64_t Value = CE->getValue(); + + unsigned Imm = 0; + for (unsigned i = 0; i < 8; ++i, Value >>= 8) { + Imm |= (Value & 1) << i; + } + Inst.addOperand(MCOperand::CreateImm(Imm)); + } }; } // end anonymous namespace. @@ -1660,20 +1759,21 @@ AArch64AsmParser::ParseShiftExtend( std::string LowerID = IDVal.lower(); A64SE::ShiftExtSpecifiers Spec = - StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID) - .Case("lsl", A64SE::LSL) - .Case("lsr", A64SE::LSR) - .Case("asr", A64SE::ASR) - .Case("ror", A64SE::ROR) - .Case("uxtb", A64SE::UXTB) - .Case("uxth", A64SE::UXTH) - .Case("uxtw", A64SE::UXTW) - .Case("uxtx", A64SE::UXTX) - .Case("sxtb", A64SE::SXTB) - .Case("sxth", A64SE::SXTH) - .Case("sxtw", A64SE::SXTW) - .Case("sxtx", A64SE::SXTX) - .Default(A64SE::Invalid); + StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID) + .Case("lsl", A64SE::LSL) + .Case("msl", A64SE::MSL) + .Case("lsr", A64SE::LSR) + .Case("asr", A64SE::ASR) + .Case("ror", A64SE::ROR) + .Case("uxtb", A64SE::UXTB) + .Case("uxth", A64SE::UXTH) + .Case("uxtw", A64SE::UXTW) + .Case("uxtx", A64SE::UXTX) + .Case("sxtb", A64SE::SXTB) + .Case("sxth", A64SE::SXTH) + .Case("sxtw", A64SE::SXTW) + .Case("sxtx", A64SE::SXTX) + .Default(A64SE::Invalid); if (Spec == A64SE::Invalid) return MatchOperand_NoMatch; @@ -1683,8 +1783,8 @@ AArch64AsmParser::ParseShiftExtend( S = Parser.getTok().getLoc(); Parser.Lex(); - if (Spec != A64SE::LSL && Spec != A64SE::LSR && - Spec != A64SE::ASR && Spec != A64SE::ROR) { + if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR && + Spec != A64SE::ROR && Spec != A64SE::MSL) { // The shift amount can be omitted for the extending versions, but not real // shifts: // add x0, x0, x0, uxtb @@ -1918,7 +2018,7 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { if (getParser().parseExpression(Value)) return true; - getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); + getParser().getStreamer().EmitValue(Value, Size); if (getLexer().is(AsmToken::EndOfStatement)) break; @@ -2019,7 +2119,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, "expected compatible register or floating-point constant"); case Match_FPZero: return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(), - "expected floating-point constant #0.0"); + "expected floating-point constant #0.0 or invalid register type"); case Match_Label: return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(), "expected label or encodable integer pc offset"); diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt index 8164d6f..0f2e816 100644 --- a/lib/Target/AArch64/CMakeLists.txt +++ b/lib/Target/AArch64/CMakeLists.txt @@ -28,6 +28,8 @@ add_llvm_target(AArch64CodeGen AArch64TargetObjectFile.cpp ) +add_dependencies(LLVMAArch64CodeGen AArch64CommonTableGen) + add_subdirectory(AsmParser) add_subdirectory(Disassembler) add_subdirectory(InstPrinter) diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp index 1c397b5..a88a8e8 100644 --- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -38,7 +38,7 @@ typedef MCDisassembler::DecodeStatus DecodeStatus; namespace { /// AArch64 disassembler for all AArch64 platforms. class AArch64Disassembler : public MCDisassembler { - const MCRegisterInfo *RegInfo; + OwningPtr<const MCRegisterInfo> RegInfo; public: /// Initializes the disassembler. /// @@ -46,8 +46,7 @@ public: : MCDisassembler(STI), RegInfo(Info) { } - ~AArch64Disassembler() { - } + ~AArch64Disassembler() {} /// See MCDisassembler. DecodeStatus getInstruction(MCInst &instr, @@ -57,7 +56,7 @@ public: raw_ostream &vStream, raw_ostream &cStream) const; - const MCRegisterInfo *getRegInfo() const { return RegInfo; } + const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); } }; } @@ -86,6 +85,9 @@ static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeVPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder); static DecodeStatus DecodeVPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder); @@ -127,6 +129,10 @@ static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst, unsigned ShiftAmount, uint64_t Address, const void *Decoder); +template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf> +static DecodeStatus +DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount, + uint64_t Address, const void *Decoder); static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount, @@ -337,9 +343,20 @@ DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, return MCDisassembler::Success; } +static DecodeStatus DecodeVPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::VPR64RegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeVPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo, - uint64_t Address, const void *Decoder) { + uint64_t Address, const void *Decoder) { if (RegNo > 31) return MCDisassembler::Fail; @@ -800,4 +817,24 @@ extern "C" void LLVMInitializeAArch64Disassembler() { createAArch64Disassembler); } +template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf> +static DecodeStatus +DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount, + uint64_t Address, const void *Decoder) { + bool IsLSL = false; + if (Ext == A64SE::LSL) + IsLSL = true; + else if (Ext != A64SE::MSL) + return MCDisassembler::Fail; + + // MSL and LSLH accepts encoded shift amount 0 or 1. + if ((!IsLSL || (IsLSL && IsHalf)) && ShiftAmount != 0 && ShiftAmount != 1) + return MCDisassembler::Fail; + // LSL accepts encoded shift amount 0, 1, 2 or 3. + if (IsLSL && ShiftAmount > 3) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateImm(ShiftAmount)); + return MCDisassembler::Success; +} diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp index 82ce80c..b624331 100644 --- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -406,3 +406,84 @@ void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O, printAnnotation(O, Annot); } + +template <A64SE::ShiftExtSpecifiers Ext, bool isHalf> +void AArch64InstPrinter::printNeonMovImmShiftOperand(const MCInst *MI, + unsigned OpNum, + raw_ostream &O) { + const MCOperand &MO = MI->getOperand(OpNum); + + assert(MO.isImm() && + "Immediate operand required for Neon vector immediate inst."); + + bool IsLSL = false; + if (Ext == A64SE::LSL) + IsLSL = true; + else if (Ext != A64SE::MSL) + llvm_unreachable("Invalid shift specifier in movi instruction"); + + int64_t Imm = MO.getImm(); + + // MSL and LSLH accepts encoded shift amount 0 or 1. + if ((!IsLSL || (IsLSL && isHalf)) && Imm != 0 && Imm != 1) + llvm_unreachable("Invalid shift amount in movi instruction"); + + // LSH accepts encoded shift amount 0, 1, 2 or 3. + if (IsLSL && (Imm < 0 || Imm > 3)) + llvm_unreachable("Invalid shift amount in movi instruction"); + + // Print shift amount as multiple of 8 with MSL encoded shift amount + // 0 and 1 printed as 8 and 16. + if (!IsLSL) + Imm++; + Imm *= 8; + + // LSL #0 is not printed + if (IsLSL) { + if (Imm == 0) + return; + O << ", lsl"; + } else + O << ", msl"; + + O << " #" << Imm; +} + +void AArch64InstPrinter::printNeonUImm0Operand(const MCInst *MI, unsigned OpNum, + raw_ostream &o) { + o << "#0x0"; +} + +void AArch64InstPrinter::printNeonUImm8Operand(const MCInst *MI, unsigned OpNum, + raw_ostream &O) { + const MCOperand &MOUImm = MI->getOperand(OpNum); + + assert(MOUImm.isImm() && + "Immediate operand required for Neon vector immediate inst."); + + unsigned Imm = MOUImm.getImm(); + + O << "#0x"; + O.write_hex(Imm); +} + +void AArch64InstPrinter::printNeonUImm64MaskOperand(const MCInst *MI, + unsigned OpNum, + raw_ostream &O) { + const MCOperand &MOUImm8 = MI->getOperand(OpNum); + + assert(MOUImm8.isImm() && + "Immediate operand required for Neon vector immediate bytemask inst."); + + uint32_t UImm8 = MOUImm8.getImm(); + uint64_t Mask = 0; + + // Replicates 0x00 or 0xff byte in a 64-bit vector + for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) { + if ((UImm8 >> ByteNum) & 1) + Mask |= (uint64_t)0xff << (8 * ByteNum); + } + + O << "#0x"; + O.write_hex(Mask); +} diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h index 639fa86..f7439be 100644 --- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h +++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h @@ -164,9 +164,14 @@ public: return RegNo == AArch64::XSP || RegNo == AArch64::WSP; } - + template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf> + void printNeonMovImmShiftOperand(const MCInst *MI, unsigned OpNum, + raw_ostream &O); + void printNeonUImm0Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + void printNeonUImm8Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + void printNeonUImm64MaskOperand(const MCInst *MI, unsigned OpNum, + raw_ostream &O); }; - } #endif diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp index 3b811df..104e4d2 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp @@ -85,18 +85,17 @@ public: /// This is one of the functions used to emit data into an ELF section, so the /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d) /// if necessary. - virtual void EmitBytes(StringRef Data, unsigned AddrSpace) { + virtual void EmitBytes(StringRef Data) { EmitDataMappingSymbol(); - MCELFStreamer::EmitBytes(Data, AddrSpace); + MCELFStreamer::EmitBytes(Data); } /// This is one of the functions used to emit data into an ELF section, so the /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d) /// if necessary. - virtual void EmitValueImpl(const MCExpr *Value, unsigned Size, - unsigned AddrSpace) { + virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) { EmitDataMappingSymbol(); - MCELFStreamer::EmitValueImpl(Value, Size, AddrSpace); + MCELFStreamer::EmitValueImpl(Value, Size); } private: diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp index a5c591e..b9770b3 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -152,10 +152,10 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, switch (Expr->getKind()) { default: llvm_unreachable("Unexpected operand modifier"); case AArch64MCExpr::VK_AARCH64_LO12: { - unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12, - AArch64::fixup_a64_ldst16_lo12, - AArch64::fixup_a64_ldst32_lo12, - AArch64::fixup_a64_ldst64_lo12, + static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12, + AArch64::fixup_a64_ldst16_lo12, + AArch64::fixup_a64_ldst32_lo12, + AArch64::fixup_a64_ldst64_lo12, AArch64::fixup_a64_ldst128_lo12 }; assert(MemSize <= 16 && "Invalid fixup for operation"); FixupKind = FixupsBySize[Log2_32(MemSize)]; @@ -166,19 +166,23 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc; break; case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: { - unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12, - AArch64::fixup_a64_ldst16_dtprel_lo12, - AArch64::fixup_a64_ldst32_dtprel_lo12, - AArch64::fixup_a64_ldst64_dtprel_lo12 }; + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_dtprel_lo12, + AArch64::fixup_a64_ldst16_dtprel_lo12, + AArch64::fixup_a64_ldst32_dtprel_lo12, + AArch64::fixup_a64_ldst64_dtprel_lo12 + }; assert(MemSize <= 8 && "Invalid fixup for operation"); FixupKind = FixupsBySize[Log2_32(MemSize)]; break; } case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: { - unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12_nc, - AArch64::fixup_a64_ldst16_dtprel_lo12_nc, - AArch64::fixup_a64_ldst32_dtprel_lo12_nc, - AArch64::fixup_a64_ldst64_dtprel_lo12_nc }; + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_dtprel_lo12_nc, + AArch64::fixup_a64_ldst16_dtprel_lo12_nc, + AArch64::fixup_a64_ldst32_dtprel_lo12_nc, + AArch64::fixup_a64_ldst64_dtprel_lo12_nc + }; assert(MemSize <= 8 && "Invalid fixup for operation"); FixupKind = FixupsBySize[Log2_32(MemSize)]; break; @@ -188,19 +192,23 @@ getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc; break; case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{ - unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12, - AArch64::fixup_a64_ldst16_tprel_lo12, - AArch64::fixup_a64_ldst32_tprel_lo12, - AArch64::fixup_a64_ldst64_tprel_lo12 }; + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_tprel_lo12, + AArch64::fixup_a64_ldst16_tprel_lo12, + AArch64::fixup_a64_ldst32_tprel_lo12, + AArch64::fixup_a64_ldst64_tprel_lo12 + }; assert(MemSize <= 8 && "Invalid fixup for operation"); FixupKind = FixupsBySize[Log2_32(MemSize)]; break; } case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: { - unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12_nc, - AArch64::fixup_a64_ldst16_tprel_lo12_nc, - AArch64::fixup_a64_ldst32_tprel_lo12_nc, - AArch64::fixup_a64_ldst64_tprel_lo12_nc }; + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_tprel_lo12_nc, + AArch64::fixup_a64_ldst16_tprel_lo12_nc, + AArch64::fixup_a64_ldst32_tprel_lo12_nc, + AArch64::fixup_a64_ldst64_tprel_lo12_nc + }; assert(MemSize <= 8 && "Invalid fixup for operation"); FixupKind = FixupsBySize[Log2_32(MemSize)]; break; @@ -346,7 +354,7 @@ AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) { - return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()); + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); } else if (MO.isImm()) { return static_cast<unsigned>(MO.getImm()); } diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp index 48d4819..58fc95c 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -40,7 +40,7 @@ MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) { MCSubtargetInfo *X = new MCSubtargetInfo(); - InitAArch64MCSubtargetInfo(X, TT, CPU, ""); + InitAArch64MCSubtargetInfo(X, TT, CPU, FS); return X; } diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp index 79865f6..2a97cd6 100644 --- a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp +++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp @@ -1105,3 +1105,69 @@ bool A64Imms::isOnlyMOVNImm(int RegWidth, uint64_t Value, return isMOVNImm(RegWidth, Value, UImm16, Shift); } + +// decodeNeonModShiftImm - Decode a Neon OpCmode value into the +// the shift amount and the shift type (shift zeros or ones in) and +// returns whether the OpCmode value implies a shift operation. +bool A64Imms::decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm, + unsigned &ShiftOnesIn) { + ShiftImm = 0; + ShiftOnesIn = false; + bool HasShift = true; + + if (OpCmode == 0xe) { + // movi byte + HasShift = false; + } else if (OpCmode == 0x1e) { + // movi 64-bit bytemask + HasShift = false; + } else if ((OpCmode & 0xc) == 0x8) { + // shift zeros, per halfword + ShiftImm = ((OpCmode & 0x2) >> 1); + } else if ((OpCmode & 0x8) == 0) { + // shift zeros, per word + ShiftImm = ((OpCmode & 0x6) >> 1); + } else if ((OpCmode & 0xe) == 0xc) { + // shift ones, per word + ShiftOnesIn = true; + ShiftImm = (OpCmode & 0x1); + } else { + // per byte, per bytemask + llvm_unreachable("Unsupported Neon modified immediate"); + } + + return HasShift; +} + +// decodeNeonModImm - Decode a NEON modified immediate and OpCmode values +// into the element value and the element size in bits. +uint64_t A64Imms::decodeNeonModImm(unsigned Val, unsigned OpCmode, + unsigned &EltBits) { + uint64_t DecodedVal = Val; + EltBits = 0; + + if (OpCmode == 0xe) { + // movi byte + EltBits = 8; + } else if (OpCmode == 0x1e) { + // movi 64-bit bytemask + DecodedVal = 0; + for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) { + if ((Val >> ByteNum) & 1) + DecodedVal |= (uint64_t)0xff << (8 * ByteNum); + } + EltBits = 64; + } else if ((OpCmode & 0xc) == 0x8) { + // shift zeros, per halfword + EltBits = 16; + } else if ((OpCmode & 0x8) == 0) { + // shift zeros, per word + EltBits = 32; + } else if ((OpCmode & 0xe) == 0xc) { + // shift ones, per word + EltBits = 32; + } else { + llvm_unreachable("Unsupported Neon modified immediate"); + } + return DecodedVal; +} diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h index 9a1ca61..e675efc 100644 --- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -289,6 +289,7 @@ namespace A64SE { enum ShiftExtSpecifiers { Invalid = -1, LSL, + MSL, LSR, ASR, ROR, @@ -1068,7 +1069,10 @@ namespace A64Imms { // MOVN but *not* with a MOVZ (because that would take priority). bool isOnlyMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift); -} + uint64_t decodeNeonModImm(unsigned Val, unsigned OpCmode, unsigned &EltBits); + bool decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm, + unsigned &ShiftOnesIn); + } } // end namespace llvm; diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp index f0d4dbe..e8c2f7c 100644 --- a/lib/Target/ARM/A15SDOptimizer.cpp +++ b/lib/Target/ARM/A15SDOptimizer.cpp @@ -615,7 +615,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { SmallVector<unsigned, 8> Defs = getReadDPRs(MI); bool Modified = false; - for (SmallVector<unsigned, 8>::iterator I = Defs.begin(), E = Defs.end(); + for (SmallVectorImpl<unsigned>::iterator I = Defs.begin(), E = Defs.end(); I != E; ++I) { // Follow the def-use chain for this DPR through COPYs, and also through // PHIs (which are essentially multi-way COPYs). It is because of PHIs that @@ -630,7 +630,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { elideCopiesAndPHIs(Def, DefSrcs); - for (SmallVector<MachineInstr*, 8>::iterator II = DefSrcs.begin(), + for (SmallVectorImpl<MachineInstr *>::iterator II = DefSrcs.begin(), EE = DefSrcs.end(); II != EE; ++II) { MachineInstr *MI = *II; @@ -655,7 +655,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { if (NewReg != 0) { Modified = true; - for (SmallVector<MachineOperand*, 8>::const_iterator I = Uses.begin(), + for (SmallVectorImpl<MachineOperand *>::const_iterator I = Uses.begin(), E = Uses.end(); I != E; ++I) { DEBUG(dbgs() << "Replacing operand " << **I << " with " diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 1bc9d6b..e5da3a5 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -45,6 +45,9 @@ def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true", def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true", "Enable VFP4 instructions", [FeatureVFP3, FeatureFP16]>; +def FeatureV8FP : SubtargetFeature<"v8fp", "HasV8FP", + "true", "Enable ARMv8 FP", + [FeatureVFP4]>; def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true", "Restrict VFP3 to 16 double registers">; def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true", @@ -138,6 +141,9 @@ def HasV6T2Ops : SubtargetFeature<"v6t2", "HasV6T2Ops", "true", def HasV7Ops : SubtargetFeature<"v7", "HasV7Ops", "true", "Support ARM v7 instructions", [HasV6T2Ops, FeaturePerfMon]>; +def HasV8Ops : SubtargetFeature<"v8", "HasV8Ops", "true", + "Support ARM v8 instructions", + [HasV7Ops]>; //===----------------------------------------------------------------------===// // ARM Processors supported. @@ -173,7 +179,7 @@ def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift", // FIXME: It has not been determined if A15 has these features. def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15", "Cortex-A15 ARM processors", - [FeatureT2XtPk, FeatureFP16, + [FeatureT2XtPk, FeatureFP16, FeatureVFP4, FeatureAvoidPartialCPSR, FeatureTrustZone]>; def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5", @@ -291,6 +297,9 @@ def : ProcessorModel<"swift", SwiftModel, FeatureDB, FeatureDSPThumb2, FeatureHasRAS]>; +// V8 Processors +def : ProcNoItin<"cortex-a53", [HasV8Ops]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 13ec208..13a22b1 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -213,77 +213,67 @@ namespace { } // end of anonymous namespace -MachineLocation ARMAsmPrinter:: -getDebugValueLocation(const MachineInstr *MI) const { - MachineLocation Location; - assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!"); - // Frame address. Currently handles register +- offset only. - if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm()) - Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm()); - else { - DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n"); - } - return Location; -} - /// EmitDwarfRegOp - Emit dwarf register operation. -void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const { +void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc, + bool Indirect) const { const TargetRegisterInfo *RI = TM.getRegisterInfo(); - if (RI->getDwarfRegNum(MLoc.getReg(), false) != -1) - AsmPrinter::EmitDwarfRegOp(MLoc); - else { - unsigned Reg = MLoc.getReg(); - if (Reg >= ARM::S0 && Reg <= ARM::S31) { - assert(ARM::S0 + 31 == ARM::S31 && "Unexpected ARM S register numbering"); - // S registers are described as bit-pieces of a register - // S[2x] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 0) - // S[2x+1] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 32) - - unsigned SReg = Reg - ARM::S0; - bool odd = SReg & 0x1; - unsigned Rx = 256 + (SReg >> 1); - - OutStreamer.AddComment("DW_OP_regx for S register"); - EmitInt8(dwarf::DW_OP_regx); - - OutStreamer.AddComment(Twine(SReg)); - EmitULEB128(Rx); - - if (odd) { - OutStreamer.AddComment("DW_OP_bit_piece 32 32"); - EmitInt8(dwarf::DW_OP_bit_piece); - EmitULEB128(32); - EmitULEB128(32); - } else { - OutStreamer.AddComment("DW_OP_bit_piece 32 0"); - EmitInt8(dwarf::DW_OP_bit_piece); - EmitULEB128(32); - EmitULEB128(0); - } - } else if (Reg >= ARM::Q0 && Reg <= ARM::Q15) { - assert(ARM::Q0 + 15 == ARM::Q15 && "Unexpected ARM Q register numbering"); - // Q registers Q0-Q15 are described by composing two D registers together. - // Qx = DW_OP_regx(256+2x) DW_OP_piece(8) DW_OP_regx(256+2x+1) - // DW_OP_piece(8) - - unsigned QReg = Reg - ARM::Q0; - unsigned D1 = 256 + 2 * QReg; - unsigned D2 = D1 + 1; - - OutStreamer.AddComment("DW_OP_regx for Q register: D1"); - EmitInt8(dwarf::DW_OP_regx); - EmitULEB128(D1); - OutStreamer.AddComment("DW_OP_piece 8"); - EmitInt8(dwarf::DW_OP_piece); - EmitULEB128(8); - - OutStreamer.AddComment("DW_OP_regx for Q register: D2"); - EmitInt8(dwarf::DW_OP_regx); - EmitULEB128(D2); - OutStreamer.AddComment("DW_OP_piece 8"); - EmitInt8(dwarf::DW_OP_piece); - EmitULEB128(8); + if (RI->getDwarfRegNum(MLoc.getReg(), false) != -1) { + AsmPrinter::EmitDwarfRegOp(MLoc, Indirect); + return; + } + assert(MLoc.isReg() && !Indirect && + "This doesn't support offset/indirection - implement it if needed"); + unsigned Reg = MLoc.getReg(); + if (Reg >= ARM::S0 && Reg <= ARM::S31) { + assert(ARM::S0 + 31 == ARM::S31 && "Unexpected ARM S register numbering"); + // S registers are described as bit-pieces of a register + // S[2x] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 0) + // S[2x+1] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 32) + + unsigned SReg = Reg - ARM::S0; + bool odd = SReg & 0x1; + unsigned Rx = 256 + (SReg >> 1); + + OutStreamer.AddComment("DW_OP_regx for S register"); + EmitInt8(dwarf::DW_OP_regx); + + OutStreamer.AddComment(Twine(SReg)); + EmitULEB128(Rx); + + if (odd) { + OutStreamer.AddComment("DW_OP_bit_piece 32 32"); + EmitInt8(dwarf::DW_OP_bit_piece); + EmitULEB128(32); + EmitULEB128(32); + } else { + OutStreamer.AddComment("DW_OP_bit_piece 32 0"); + EmitInt8(dwarf::DW_OP_bit_piece); + EmitULEB128(32); + EmitULEB128(0); } + } else if (Reg >= ARM::Q0 && Reg <= ARM::Q15) { + assert(ARM::Q0 + 15 == ARM::Q15 && "Unexpected ARM Q register numbering"); + // Q registers Q0-Q15 are described by composing two D registers together. + // Qx = DW_OP_regx(256+2x) DW_OP_piece(8) DW_OP_regx(256+2x+1) + // DW_OP_piece(8) + + unsigned QReg = Reg - ARM::Q0; + unsigned D1 = 256 + 2 * QReg; + unsigned D2 = D1 + 1; + + OutStreamer.AddComment("DW_OP_regx for Q register: D1"); + EmitInt8(dwarf::DW_OP_regx); + EmitULEB128(D1); + OutStreamer.AddComment("DW_OP_piece 8"); + EmitInt8(dwarf::DW_OP_piece); + EmitULEB128(8); + + OutStreamer.AddComment("DW_OP_regx for Q register: D2"); + EmitInt8(dwarf::DW_OP_regx); + EmitULEB128(D2); + OutStreamer.AddComment("DW_OP_piece 8"); + EmitInt8(dwarf::DW_OP_piece); + EmitULEB128(8); } } @@ -474,8 +464,14 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, // This takes advantage of the 2 operand-ness of ldm/stm and that we've // already got the operands in registers that are operands to the // inline asm statement. - - O << "{" << ARMInstPrinter::getRegisterName(RegBegin); + O << "{"; + if (ARM::GPRPairRegClass.contains(RegBegin)) { + const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); + unsigned Reg0 = TRI->getSubReg(RegBegin, ARM::gsub_0); + O << ARMInstPrinter::getRegisterName(Reg0) << ", ";; + RegBegin = TRI->getSubReg(RegBegin, ARM::gsub_1); + } + O << ARMInstPrinter::getRegisterName(RegBegin); // FIXME: The register allocator not only may not have given us the // registers in sequence, but may not be in ascending registers. This @@ -501,6 +497,20 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, return true; unsigned Flags = FlagsOP.getImm(); unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + unsigned RC; + InlineAsm::hasRegClassConstraint(Flags, RC); + if (RC == ARM::GPRPairRegClassID) { + if (NumVals != 1) + return true; + const MachineOperand &MO = MI->getOperand(OpNum); + if (!MO.isReg()) + return true; + const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); + unsigned Reg = TRI->getSubReg(MO.getReg(), ExtraCode[0] == 'Q' ? + ARM::gsub_0 : ARM::gsub_1); + O << ARMInstPrinter::getRegisterName(Reg); + return false; + } if (NumVals != 2) return true; unsigned RegOp = ExtraCode[0] == 'Q' ? OpNum : OpNum + 1; @@ -759,16 +769,9 @@ void ARMAsmPrinter::emitAttributes() { ARMBuildAttrs::Allowed); AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed); - } else if (CPUString == "generic") { - // For a generic CPU, we assume a standard v7a architecture in Subtarget. - AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7); - AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile, - ARMBuildAttrs::ApplicationProfile); - AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use, - ARMBuildAttrs::Allowed); - AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, - ARMBuildAttrs::AllowThumb32); - } else if (Subtarget->hasV7Ops()) { + } else if (Subtarget->hasV8Ops()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v8); + else if (Subtarget->hasV7Ops()) { AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7); AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::AllowThumb32); @@ -782,6 +785,8 @@ void ARMAsmPrinter::emitAttributes() { AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5T); else if (Subtarget->hasV4TOps()) AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T); + else + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4); if (Subtarget->hasNEON() && emitFPU) { /* NEON is not exactly a VFP architecture, but GAS emit one of @@ -796,8 +801,14 @@ void ARMAsmPrinter::emitAttributes() { emitFPU = false; } - /* VFPv4 + .fpu */ - if (Subtarget->hasVFP4()) { + /* V8FP + .fpu */ + if (Subtarget->hasV8FP()) { + AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch, + ARMBuildAttrs::AllowV8FPA); + if (emitFPU) + AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "v8fp"); + /* VFPv4 + .fpu */ + } else if (Subtarget->hasVFP4()) { AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch, ARMBuildAttrs::AllowFPv4A); if (emitFPU) @@ -821,8 +832,12 @@ void ARMAsmPrinter::emitAttributes() { /* TODO: ARMBuildAttrs::Allowed is not completely accurate, * since NEON can have 1 (allowed) or 2 (MAC operations) */ if (Subtarget->hasNEON()) { - AttrEmitter->EmitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, - ARMBuildAttrs::Allowed); + if (Subtarget->hasV8Ops()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, + ARMBuildAttrs::AllowedNeonV8); + else + AttrEmitter->EmitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, + ARMBuildAttrs::Allowed); } // Signal various FP modes. @@ -1092,23 +1107,6 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) { OutStreamer.EmitDataRegion(MCDR_DataRegionEnd); } -void ARMAsmPrinter::PrintDebugValueComment(const MachineInstr *MI, - raw_ostream &OS) { - unsigned NOps = MI->getNumOperands(); - assert(NOps==4); - OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - OS << V.getName(); - OS << " <- "; - // Frame address. Currently handles register +- offset only. - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm()); - OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS); - OS << ']'; - OS << "+"; - printOperand(MI, NOps-2, OS); -} - void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { assert(MI->getFlag(MachineInstr::FrameSetup) && "Only instruction which are involved into frame setup code are allowed"); @@ -1272,15 +1270,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { unsigned Opc = MI->getOpcode(); switch (Opc) { case ARM::t2MOVi32imm: llvm_unreachable("Should be lowered by thumb2it pass"); - case ARM::DBG_VALUE: { - if (isVerbose() && OutStreamer.hasRawTextSupport()) { - SmallString<128> TmpStr; - raw_svector_ostream OS(TmpStr); - PrintDebugValueComment(MI, OS); - OutStreamer.EmitRawText(StringRef(OS.str())); - } - return; - } + case ARM::DBG_VALUE: llvm_unreachable("Should be handled by generic printing"); case ARM::LEApcrel: case ARM::tLEApcrel: case ARM::t2LEApcrel: { diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h index c945e4f..de72e06 100644 --- a/lib/Target/ARM/ARMAsmPrinter.h +++ b/lib/Target/ARM/ARMAsmPrinter.h @@ -97,13 +97,9 @@ private: const MachineInstr *MI); public: - void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - - virtual MachineLocation - getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE; - /// EmitDwarfRegOp - Emit dwarf register operation. - virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const LLVM_OVERRIDE; + virtual void EmitDwarfRegOp(const MachineLocation &MLoc, bool Indirect) const + LLVM_OVERRIDE; virtual unsigned getISAEncoding() LLVM_OVERRIDE { // ARM/Darwin adds ISA to the DWARF info for each function. diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index ad14475..977d936 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -272,104 +272,90 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const { - // If the block has no terminators, it just falls into the block after it. + TBB = 0; + FBB = 0; + MachineBasicBlock::iterator I = MBB.end(); if (I == MBB.begin()) - return false; + return false; // Empty blocks are easy. --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return false; - --I; - } - // Get the last instruction in the block. - MachineInstr *LastInst = I; - unsigned LastOpc = LastInst->getOpcode(); + // Walk backwards from the end of the basic block until the branch is + // analyzed or we give up. + while (isPredicated(I) || I->isTerminator()) { - // Check if it's an indirect branch first, this should return 'unanalyzable' - // even if it's predicated. - if (isIndirectBranchOpcode(LastOpc)) - return true; - - if (!isUnpredicatedTerminator(I)) - return false; + // Flag to be raised on unanalyzeable instructions. This is useful in cases + // where we want to clean up on the end of the basic block before we bail + // out. + bool CantAnalyze = false; - // If there is only one terminator instruction, process it. - if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { - if (isUncondBranchOpcode(LastOpc)) { - TBB = LastInst->getOperand(0).getMBB(); - return false; + // Skip over DEBUG values and predicated nonterminators. + while (I->isDebugValue() || !I->isTerminator()) { + if (I == MBB.begin()) + return false; + --I; } - if (isCondBranchOpcode(LastOpc)) { - // Block ends with fall-through condbranch. - TBB = LastInst->getOperand(0).getMBB(); - Cond.push_back(LastInst->getOperand(1)); - Cond.push_back(LastInst->getOperand(2)); - return false; + + if (isIndirectBranchOpcode(I->getOpcode()) || + isJumpTableBranchOpcode(I->getOpcode())) { + // Indirect branches and jump tables can't be analyzed, but we still want + // to clean up any instructions at the tail of the basic block. + CantAnalyze = true; + } else if (isUncondBranchOpcode(I->getOpcode())) { + TBB = I->getOperand(0).getMBB(); + } else if (isCondBranchOpcode(I->getOpcode())) { + // Bail out if we encounter multiple conditional branches. + if (!Cond.empty()) + return true; + + assert(!FBB && "FBB should have been null."); + FBB = TBB; + TBB = I->getOperand(0).getMBB(); + Cond.push_back(I->getOperand(1)); + Cond.push_back(I->getOperand(2)); + } else if (I->isReturn()) { + // Returns can't be analyzed, but we should run cleanup. + CantAnalyze = !isPredicated(I); + } else { + // We encountered other unrecognized terminator. Bail out immediately. + return true; } - return true; // Can't handle indirect branch. - } - // Get the instruction before it if it is a terminator. - MachineInstr *SecondLastInst = I; - unsigned SecondLastOpc = SecondLastInst->getOpcode(); - - // If AllowModify is true and the block ends with two or more unconditional - // branches, delete all but the first unconditional branch. - if (AllowModify && isUncondBranchOpcode(LastOpc)) { - while (isUncondBranchOpcode(SecondLastOpc)) { - LastInst->eraseFromParent(); - LastInst = SecondLastInst; - LastOpc = LastInst->getOpcode(); - if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { - // Return now the only terminator is an unconditional branch. - TBB = LastInst->getOperand(0).getMBB(); - return false; - } else { - SecondLastInst = I; - SecondLastOpc = SecondLastInst->getOpcode(); + // Cleanup code - to be run for unpredicated unconditional branches and + // returns. + if (!isPredicated(I) && + (isUncondBranchOpcode(I->getOpcode()) || + isIndirectBranchOpcode(I->getOpcode()) || + isJumpTableBranchOpcode(I->getOpcode()) || + I->isReturn())) { + // Forget any previous condition branch information - it no longer applies. + Cond.clear(); + FBB = 0; + + // If we can modify the function, delete everything below this + // unconditional branch. + if (AllowModify) { + MachineBasicBlock::iterator DI = llvm::next(I); + while (DI != MBB.end()) { + MachineInstr *InstToDelete = DI; + ++DI; + InstToDelete->eraseFromParent(); + } } } - } - - // If there are three terminators, we don't know what sort of block this is. - if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) - return true; - // If the block ends with a B and a Bcc, handle it. - if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { - TBB = SecondLastInst->getOperand(0).getMBB(); - Cond.push_back(SecondLastInst->getOperand(1)); - Cond.push_back(SecondLastInst->getOperand(2)); - FBB = LastInst->getOperand(0).getMBB(); - return false; - } + if (CantAnalyze) + return true; - // If the block ends with two unconditional branches, handle it. The second - // one is not executed, so remove it. - if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { - TBB = SecondLastInst->getOperand(0).getMBB(); - I = LastInst; - if (AllowModify) - I->eraseFromParent(); - return false; - } + if (I == MBB.begin()) + return false; - // ...likewise if it ends with a branch table followed by an unconditional - // branch. The branch folder can create these, and we must get rid of them for - // correctness of Thumb constant islands. - if ((isJumpTableBranchOpcode(SecondLastOpc) || - isIndirectBranchOpcode(SecondLastOpc)) && - isUncondBranchOpcode(LastOpc)) { - I = LastInst; - if (AllowModify) - I->eraseFromParent(); - return true; + --I; } - // Otherwise, can't handle this. - return true; + // We made it past the terminators without bailing out - we must have + // analyzed this branch successfully. + return false; } @@ -745,6 +731,9 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, if (Opc == ARM::VORRq) Mov.addReg(Src); Mov = AddDefaultPred(Mov); + // MOVr can set CC. + if (Opc == ARM::MOVr) + Mov = AddDefaultCC(Mov); } // Add implicit super-register defs and kills to the last instruction. Mov->addRegisterDefined(DestReg, TRI); @@ -1213,16 +1202,6 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ return true; } -MachineInstr* -ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE)) - .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - /// Create a copy of a const pool value. Update CPI to the new index and return /// the label UID. static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { @@ -3684,8 +3663,7 @@ hasHighOperandLatency(const InstrItineraryData *ItinData, return true; // Hoist VFP / NEON instructions with 4 or higher latency. - int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx, - /*FindMin=*/false); + int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); if (Latency < 0) Latency = getInstrLatency(ItinData, DefMI); if (Latency <= 3) diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 4ca3d7b..96f8637 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -125,12 +125,6 @@ public: virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const; - virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; - virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 7c03055..58c06e3 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -58,30 +58,44 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); } - if (ghcCall) { - return CSR_GHC_SaveList; - } - else { - return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) - ? CSR_iOS_SaveList : CSR_AAPCS_SaveList; - } + if (ghcCall) + // GHC set of callee saved regs is empty as all those regs are + // used for passing STG regs around + return CSR_NoRegs_SaveList; + else + return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) + ? CSR_iOS_SaveList : CSR_AAPCS_SaveList; } const uint32_t* -ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const { +ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { + if (CC == CallingConv::GHC) + // This is academic becase all GHC calls are (supposed to be) tail calls + return CSR_NoRegs_RegMask; return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; } const uint32_t* -ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const { - return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) - ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask; +ARMBaseRegisterInfo::getNoPreservedMask() const { + return CSR_NoRegs_RegMask; } const uint32_t* -ARMBaseRegisterInfo::getNoPreservedMask() const { - return CSR_NoRegs_RegMask; +ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const { + // This should return a register mask that is the same as that returned by + // getCallPreservedMask but that additionally preserves the register used for + // the first i32 argument (which must also be the register used to return a + // single i32 return value) + // + // In case that the calling convention does not use the same register for + // both or otherwise does not want to enable this optimization, the function + // should return NULL + if (CC == CallingConv::GHC) + // This is academic becase all GHC calls are (supposed to be) tail calls + return NULL; + return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) + ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask; } BitVector ARMBaseRegisterInfo:: @@ -309,7 +323,7 @@ bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { // 1. Dynamic stack realignment is explicitly disabled, // 2. This is a Thumb1 function (it's not useful, so we don't bother), or // 3. There are VLAs in the function and the base pointer is disabled. - if (!MF.getTarget().Options.RealignStack) + if (MF.getFunction()->hasFnAttribute("no-realign-stack")) return false; if (AFI->isThumb1OnlyFunction()) return false; @@ -702,12 +716,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, } #endif // NDEBUG - // Special handling of dbg_value instructions. - if (MI.isDebugValue()) { - MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/); - MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); - return; - } + assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); // Modify MI as necessary to handle as much of 'Offset' as possible bool Done = false; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h index 03b3682..cdaad05 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -94,9 +94,18 @@ public: /// Code Generation virtual methods... const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const; const uint32_t *getCallPreservedMask(CallingConv::ID) const; - const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const; const uint32_t *getNoPreservedMask() const; + /// getThisReturnPreservedMask - Returns a call preserved mask specific to the + /// case that 'returned' is on an i32 first argument if the calling convention + /// is one that can (partially) model this attribute with a preserved mask + /// (i.e. it is a calling convention that uses the same register for the first + /// i32 argument and an i32 return value) + /// + /// Should return NULL in the case that the calling convention does not have + /// this property + const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const; + BitVector getReservedRegs(const MachineFunction &MF) const; const TargetRegisterClass* diff --git a/lib/Target/ARM/ARMBuildAttrs.h b/lib/Target/ARM/ARMBuildAttrs.h index 11bd6a4..f614dca 100644 --- a/lib/Target/ARM/ARMBuildAttrs.h +++ b/lib/Target/ARM/ARMBuildAttrs.h @@ -89,7 +89,8 @@ namespace ARMBuildAttrs { v7 = 10, // e.g. Cortex A8, Cortex M3 v6_M = 11, // e.g. Cortex M1 v6S_M = 12, // v6_M with the System extensions - v7E_M = 13 // v7_M with DSP extensions + v7E_M = 13, // v7_M with DSP extensions + v8 = 14 // v8, AArch32 }; enum CPUArchProfile { // (=7), uleb128 @@ -105,6 +106,7 @@ namespace ARMBuildAttrs { //ARMISAUse (=8), uleb128 and THUMBISAUse (=9), uleb128 Not_Allowed = 0, Allowed = 1, + AllowedNeonV8 = 3, // FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10) AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA) @@ -112,6 +114,8 @@ namespace ARMBuildAttrs { AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31 AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA) AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31 + AllowV8FPA = 7, // Use of the ARM v8-A FP ISA was permitted + AllowV8FPB = 8, // Use of the ARM v8-A FP ISA was permitted, but only D0-D15, S0-S31 // Tag_WMMX_arch, (=11), uleb128 AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions) diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td index 8ff666e..89c5223 100644 --- a/lib/Target/ARM/ARMCallingConv.td +++ b/lib/Target/ARM/ARMCallingConv.td @@ -208,9 +208,3 @@ def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>; def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS_ThisReturn, R9))>; - -// GHC set of callee saved regs is empty as all those regs are -// used for passing STG regs around -// add is a workaround for not being able to compile empty list: -// def CSR_GHC : CalleeSavedRegs<()>; -def CSR_GHC : CalleeSavedRegs<(add)>; diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index 4a157d7..96eb764 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -171,6 +171,8 @@ namespace { const { return 0; } unsigned NEONThumb2DupPostEncoder(const MachineInstr &MI,unsigned Val) const { return 0; } + unsigned NEONThumb2V8PostEncoder(const MachineInstr &MI,unsigned Val) + const { return 0; } unsigned VFPThumb2PostEncoder(const MachineInstr&MI, unsigned Val) const { if (IsThumb) { diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index a4de941..ed054aa 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -42,7 +42,6 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Support/MathExtras.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" @@ -630,6 +629,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned DestReg = createResultReg(RC); + // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG. + const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); + bool IsThreadLocal = GVar && GVar->isThreadLocal(); + if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0; + // Use movw+movt when possible, it avoids constant pool entries. // Darwin targets don't support movt with Reloc::Static, see // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support @@ -816,22 +820,19 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { switch (Opcode) { default: break; - case Instruction::BitCast: { + case Instruction::BitCast: // Look through bitcasts. return ARMComputeAddress(U->getOperand(0), Addr); - } - case Instruction::IntToPtr: { + case Instruction::IntToPtr: // Look past no-op inttoptrs. if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) return ARMComputeAddress(U->getOperand(0), Addr); break; - } - case Instruction::PtrToInt: { + case Instruction::PtrToInt: // Look past no-op ptrtoints. if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) return ARMComputeAddress(U->getOperand(0), Addr); break; - } case Instruction::GetElementPtr: { Address SavedAddr = Addr; int TmpOffset = Addr.Offset; @@ -2184,10 +2185,14 @@ unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { } unsigned ARMFastISel::getLibcallReg(const Twine &Name) { + // Manually compute the global's type to avoid building it when unnecessary. + Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); + EVT LCREVT = TLI.getValueType(GVTy); + if (!LCREVT.isSimple()) return 0; + GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, GlobalValue::ExternalLinkage, 0, Name); - EVT LCREVT = TLI.getValueType(GV->getType()); - if (!LCREVT.isSimple()) return 0; + assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); } @@ -2629,34 +2634,46 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, }; // Table governing the instruction(s) to be emitted. - static const struct { - // First entry for each of the following is sext, second zext. - uint16_t Opc[2]; - uint8_t Imm[2]; // All instructions have either a shift or a mask. - uint8_t hasS[2]; // Some instructions have an S bit, always set it to 0. - } OpcTbl[2][2][3] = { + static const struct InstructionTable { + uint32_t Opc : 16; + uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. + uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. + uint32_t Imm : 8; // All instructions have either a shift or a mask. + } IT[2][2][3][2] = { { // Two instructions (first is left shift, second is in this table). - { // ARM - /* 1 */ { { ARM::ASRi, ARM::LSRi }, { 31, 31 }, { 1, 1 } }, - /* 8 */ { { ARM::ASRi, ARM::LSRi }, { 24, 24 }, { 1, 1 } }, - /* 16 */ { { ARM::ASRi, ARM::LSRi }, { 16, 16 }, { 1, 1 } } + { // ARM Opc S Shift Imm + /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, + /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, + /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, + /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, + /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, + /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } }, - { // Thumb - /* 1 */ { { ARM::tASRri, ARM::tLSRri }, { 31, 31 }, { 0, 0 } }, - /* 8 */ { { ARM::tASRri, ARM::tLSRri }, { 24, 24 }, { 0, 0 } }, - /* 16 */ { { ARM::tASRri, ARM::tLSRri }, { 16, 16 }, { 0, 0 } } + { // Thumb Opc S Shift Imm + /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, + /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, + /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, + /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, + /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, + /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } } }, { // Single instruction. - { // ARM - /* 1 */ { { ARM::KILL, ARM::ANDri }, { 0, 1 }, { 0, 1 } }, - /* 8 */ { { ARM::SXTB, ARM::ANDri }, { 0, 255 }, { 0, 1 } }, - /* 16 */ { { ARM::SXTH, ARM::UXTH }, { 0, 0 }, { 0, 0 } } + { // ARM Opc S Shift Imm + /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, + /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, + /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, + /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, + /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, + /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } }, - { // Thumb - /* 1 */ { { ARM::KILL, ARM::t2ANDri }, { 0, 1 }, { 0, 1 } }, - /* 8 */ { { ARM::t2SXTB, ARM::t2ANDri }, { 0, 255 }, { 0, 1 } }, - /* 16 */ { { ARM::t2SXTH, ARM::t2UXTH }, { 0, 0 }, { 0, 0 } } + { // Thumb Opc S Shift Imm + /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, + /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, + /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, + /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, + /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, + /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } } } }; @@ -2671,20 +2688,28 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, "other sizes unimplemented"); bool hasV6Ops = Subtarget->hasV6Ops(); - unsigned Bitness = countTrailingZeros(SrcBits) >> 1; // {1,8,16}=>{0,1,2} + unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} assert((Bitness < 3) && "sanity-check table bounds"); bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; - unsigned Opc = OpcTbl[isSingleInstr][isThumb2][Bitness].Opc[isZExt]; + const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; + unsigned Opc = ITP->Opc; assert(ARM::KILL != Opc && "Invalid table entry"); - unsigned Imm = OpcTbl[isSingleInstr][isThumb2][Bitness].Imm[isZExt]; - unsigned hasS = OpcTbl[isSingleInstr][isThumb2][Bitness].hasS[isZExt]; + unsigned hasS = ITP->hasS; + ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; + assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && + "only MOVsi has shift operand addressing mode"); + unsigned Imm = ITP->Imm; // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). bool setsCPSR = &ARM::tGPRRegClass == RC; - unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::LSLi; + unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; unsigned ResultReg; + // MOVsi encodes shift and immediate in shift operand addressing mode. + // The following condition has the same value when emitting two + // instruction sequences: both are shifts. + bool ImmIsSO = (Shift != ARM_AM::no_shift); // Either one or two instructions are emitted. // They're always of the form: @@ -2697,13 +2722,16 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { ResultReg = createResultReg(RC); - unsigned Opcode = ((0 == Instr) && !isSingleInstr) ? LSLOpc : Opc; + bool isLsl = (0 == Instr) && !isSingleInstr; + unsigned Opcode = isLsl ? LSLOpc : Opc; + ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; + unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; bool isKill = 1 == Instr; MachineInstrBuilder MIB = BuildMI( *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg); if (setsCPSR) MIB.addReg(ARM::CPSR, RegState::Define); - AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(Imm)); + AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); if (hasS) AddDefaultCC(MIB); // Second instruction consumes the first's result. @@ -3025,8 +3053,6 @@ bool ARMFastISel::FastLowerArguments() { Idx = 0; for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++Idx) { - if (I->use_empty()) - continue; unsigned SrcReg = GPRArgRegs[Idx]; unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. @@ -3044,13 +3070,23 @@ bool ARMFastISel::FastLowerArguments() { namespace llvm { FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) { - // Completely untested on non-iOS. const TargetMachine &TM = funcInfo.MF->getTarget(); - // Darwin and thumb1 only for now. const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); - if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) + // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. + bool UseFastISel = false; + UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only(); + UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); + UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); + + if (UseFastISel) { + // iOS always has a FP for backtracking, force other targets + // to keep their FP when doing FastISel. The emitted code is + // currently superior, and in cases like test-suite's lencod + // FastISel isn't quite correct when FP is eliminated. + TM.Options.NoFramePointerElim = true; return new ARMFastISel(funcInfo, libInfo); + } return 0; } } diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 962368d..4ca3af6 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -61,7 +61,6 @@ enum AddrMode2Type { class ARMDAGToDAGISel : public SelectionDAGISel { ARMBaseTargetMachine &TM; - const ARMBaseInstrInfo *TII; /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can /// make the right decision when generating code for different targets. @@ -71,7 +70,6 @@ public: explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel) : SelectionDAGISel(tm, OptLevel), TM(tm), - TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())), Subtarget(&TM.getSubtarget<ARMSubtarget>()) { } @@ -177,6 +175,7 @@ public: SDValue &OffImm); bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base, SDValue &OffReg, SDValue &ShImm); + bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm); inline bool is_so_imm(unsigned Imm) const { return ARM_AM::getSOImmVal(Imm) != -1; @@ -423,7 +422,7 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (!CheckVMLxHazard) return true; - if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9() && + if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() && !Subtarget->isSwift()) return true; @@ -434,6 +433,9 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (Use->getOpcode() == ISD::CopyToReg) return true; if (Use->isMachineOpcode()) { + const ARMBaseInstrInfo *TII = + static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo()); + const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); if (MCID.mayStore()) return true; @@ -533,7 +535,8 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, if (N.getOpcode() == ISD::FrameIndex) { // Match frame index. int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); OffImm = CurDAG->getTargetConstant(0, MVT::i32); return true; } @@ -557,7 +560,8 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); return true; @@ -703,7 +707,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, Base = N; if (N.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } else if (N.getOpcode() == ARMISD::Wrapper && !(Subtarget->useMovt() && N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { @@ -724,7 +729,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } Offset = CurDAG->getRegister(0, MVT::i32); @@ -901,7 +907,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N, Base = N; if (N.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } Offset = CurDAG->getRegister(0, MVT::i32); Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32); @@ -915,7 +922,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } Offset = CurDAG->getRegister(0, MVT::i32); @@ -960,7 +968,8 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N, Base = N; if (N.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } else if (N.getOpcode() == ARMISD::Wrapper && !(Subtarget->useMovt() && N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { @@ -978,7 +987,8 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } ARM_AM::AddrOpc AddSub = ARM_AM::add; @@ -1202,7 +1212,8 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm) { if (N.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); OffImm = CurDAG->getTargetConstant(0, MVT::i32); return true; } @@ -1219,7 +1230,8 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); return true; @@ -1267,7 +1279,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N, if (N.getOpcode() == ISD::FrameIndex) { // Match frame index. int FI = cast<FrameIndexSDNode>(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); OffImm = CurDAG->getTargetConstant(0, MVT::i32); return true; } @@ -1297,7 +1310,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); return true; @@ -1326,7 +1340,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); } OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); return true; @@ -1403,6 +1418,34 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, return true; } +bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base, + SDValue &OffImm) { + // This *must* succeed since it's used for the irreplacable ldrex and strex + // instructions. + Base = N; + OffImm = CurDAG->getTargetConstant(0, MVT::i32); + + if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N)) + return true; + + ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); + if (!RHS) + return true; + + uint32_t RHSC = (int)RHS->getZExtValue(); + if (RHSC > 1020 || RHSC % 4 != 0) + return true; + + Base = N.getOperand(0); + if (Base.getOpcode() == ISD::FrameIndex) { + int FI = cast<FrameIndexSDNode>(Base)->getIndex(); + Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy()); + } + + OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32); + return true; +} + //===--------------------------------------------------------------------===// /// getAL - Returns a ARMCC::AL immediate node. @@ -2587,7 +2630,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue CPIdx = CurDAG->getTargetConstantPool(ConstantInt::get( Type::getInt32Ty(*CurDAG->getContext()), Val), - TLI->getPointerTy()); + getTargetLowering()->getPointerTy()); SDNode *ResNode; if (Subtarget->isThumb1Only()) { @@ -2617,7 +2660,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { case ISD::FrameIndex: { // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. int FI = cast<FrameIndexSDNode>(N)->getIndex(); - SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy()); + SDValue TFI = CurDAG->getTargetFrameIndex(FI, + getTargetLowering()->getPointerTy()); if (Subtarget->isThumb1Only()) { SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; @@ -3449,24 +3493,20 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ bool Changed = false; unsigned NumOps = N->getNumOperands(); - ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>( - N->getOperand(InlineAsm::Op_AsmString)); - StringRef AsmString = StringRef(S->getSymbol()); - // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint. // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs // respectively. Since there is no constraint to explicitly specify a - // reg pair, we search %H operand inside the asm string. If it is found, the - // transformation below enforces a GPRPair reg class for "%r" for 64-bit data. - if (AsmString.find(":H}") == StringRef::npos) - return NULL; + // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb, + // the 64-bit data may be referred by H, Q, R modifiers, so we still pack + // them into a GPRPair. SDLoc dl(N); - SDValue Glue = N->getOperand(NumOps-1); + SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0); + SmallVector<bool, 8> OpChanged; // Glue node will be appended late. - for(unsigned i = 0; i < NumOps -1; ++i) { + for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) { SDValue op = N->getOperand(i); AsmNodeOperands.push_back(op); @@ -3480,17 +3520,38 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ else continue; + // Immediate operands to inline asm in the SelectionDAG are modeled with + // two operands. The first is a constant of value InlineAsm::Kind_Imm, and + // the second is a constant with the value of the immediate. If we get here + // and we have a Kind_Imm, skip the next operand, and continue. + if (Kind == InlineAsm::Kind_Imm) { + SDValue op = N->getOperand(++i); + AsmNodeOperands.push_back(op); + continue; + } + + unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); + if (NumRegs) + OpChanged.push_back(false); + + unsigned DefIdx = 0; + bool IsTiedToChangedOp = false; + // If it's a use that is tied with a previous def, it has no + // reg class constraint. + if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) + IsTiedToChangedOp = OpChanged[DefIdx]; + if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef && Kind != InlineAsm::Kind_RegDefEarlyClobber) continue; - unsigned RegNum = InlineAsm::getNumOperandRegisters(Flag); unsigned RC; bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); - if (!HasRC || RC != ARM::GPRRegClassID || RegNum != 2) + if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID)) + || NumRegs != 2) continue; - assert((i+2 < NumOps-1) && "Invalid number of operands in inline asm"); + assert((i+2 < NumOps) && "Invalid number of operands in inline asm"); SDValue V0 = N->getOperand(i+1); SDValue V1 = N->getOperand(i+2); unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg(); @@ -3551,6 +3612,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ Changed = true; if(PairedReg.getNode()) { + OpChanged[OpChanged.size() -1 ] = true; Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/); Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID); // Replace the current flag. @@ -3563,7 +3625,8 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ } } - AsmNodeOperands.push_back(Glue); + if (Glue.getNode()) + AsmNodeOperands.push_back(Glue); if (!Changed) return NULL; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index ec0e9c2..caec11e 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -74,7 +74,7 @@ namespace { class ARMCCState : public CCState { public: ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, - const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, + const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, ParmContext PC) : CCState(CC, isVarArg, MF, TM, locs, C) { assert(((PC == Call) || (PC == Prologue)) && @@ -693,10 +693,36 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::SDIV, MVT::i32, Expand); setOperationAction(ISD::UDIV, MVT::i32, Expand); } + + // FIXME: Also set divmod for SREM on EABI setOperationAction(ISD::SREM, MVT::i32, Expand); setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + // Register based DivRem for AEABI (RTABI 4.2) + if (Subtarget->isTargetAEABI()) { + setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); + setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); + setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); + setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); + setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); + setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); + setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); + setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); + + setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); + setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); + + setOperationAction(ISD::SDIVREM, MVT::i32, Custom); + setOperationAction(ISD::UDIVREM, MVT::i32, Custom); + } else { + setOperationAction(ISD::SDIVREM, MVT::i32, Expand); + setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + } setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setOperationAction(ISD::ConstantPool, MVT::i32, Custom); @@ -717,8 +743,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) if (!Subtarget->isTargetDarwin()) { // Non-Darwin platforms may return values in these registers via the // personality function. - setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); setExceptionPointerRegister(ARM::R0); setExceptionSelectorRegister(ARM::R1); } @@ -1068,6 +1092,19 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; + + case ARMISD::ATOMADD64_DAG: return "ATOMADD64_DAG"; + case ARMISD::ATOMSUB64_DAG: return "ATOMSUB64_DAG"; + case ARMISD::ATOMOR64_DAG: return "ATOMOR64_DAG"; + case ARMISD::ATOMXOR64_DAG: return "ATOMXOR64_DAG"; + case ARMISD::ATOMAND64_DAG: return "ATOMAND64_DAG"; + case ARMISD::ATOMNAND64_DAG: return "ATOMNAND64_DAG"; + case ARMISD::ATOMSWAP64_DAG: return "ATOMSWAP64_DAG"; + case ARMISD::ATOMCMPXCHG64_DAG: return "ATOMCMPXCHG64_DAG"; + case ARMISD::ATOMMIN64_DAG: return "ATOMMIN64_DAG"; + case ARMISD::ATOMUMIN64_DAG: return "ATOMUMIN64_DAG"; + case ARMISD::ATOMMAX64_DAG: return "ATOMMAX64_DAG"; + case ARMISD::ATOMUMAX64_DAG: return "ATOMUMAX64_DAG"; } } @@ -1332,7 +1369,7 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, RegsToPassVector &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, SDValue &StackPtr, - SmallVector<SDValue, 8> &MemOpChains, + SmallVectorImpl<SDValue> &MemOpChains, ISD::ArgFlagsTy Flags) const { SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, @@ -1360,9 +1397,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -1711,10 +1748,17 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, const uint32_t *Mask; const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI); - if (isThisReturn) - // For 'this' returns, use the R0-preserving mask + if (isThisReturn) { + // For 'this' returns, use the R0-preserving mask if applicable Mask = ARI->getThisReturnPreservedMask(CallConv); - else + if (!Mask) { + // Set isThisReturn to false if the calling convention is not one that + // allows 'returned' to be modeled in this way, so LowerCallResult does + // not try to pass 'this' straight through + isThisReturn = false; + Mask = ARI->getCallPreservedMask(CallConv); + } + } else Mask = ARI->getCallPreservedMask(CallConv); assert(Mask && "Missing call preserved mask for calling convention"); @@ -2550,8 +2594,18 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, DAG.getConstant(0, MVT::i32)); } + ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); + AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); + unsigned Domain = ARM_MB::ISH; + if (Subtarget->isSwift() && Ord == Release) { + // Swift happens to implement ISHST barriers in a way that's compatible with + // Release semantics but weaker than ISH so we'd be fools not to use + // it. Beware: other processors probably don't! + Domain = ARM_MB::ISHST; + } + return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), - DAG.getConstant(ARM_MB::ISH, MVT::i32)); + DAG.getConstant(Domain, MVT::i32)); } static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, @@ -2717,7 +2771,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, lastRegToSaveIndex = REnd - ARM::R0; } else { firstRegToSaveIndex = CCInfo.getFirstUnallocated - (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); + (GPRArgRegs, array_lengthof(GPRArgRegs)); lastRegToSaveIndex = 4; } @@ -4620,7 +4674,9 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, if (ValueCounts.size() == 0) return DAG.getUNDEF(VT); - if (isOnlyLowElement) + // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. + // Keep going if we are hitting this case. + if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); unsigned EltSize = VT.getVectorElementType().getSizeInBits(); @@ -4719,6 +4775,24 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, return DAG.getNode(ISD::BITCAST, dl, VT, Val); } + // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we + // know the default expansion would otherwise fall back on something even + // worse. For a vector with one or two non-undef values, that's + // scalar_to_vector for the elements followed by a shuffle (provided the + // shuffle is valid for the target) and materialization element by element + // on the stack followed by a load for everything else. + if (!isConstant && !usesOnlyOneValue) { + SDValue Vec = DAG.getUNDEF(VT); + for (unsigned i = 0 ; i < NumElts; ++i) { + SDValue V = Op.getOperand(i); + if (V.getOpcode() == ISD::UNDEF) + continue; + SDValue LaneIdx = DAG.getConstant(i, MVT::i32); + Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); + } + return Vec; + } + return SDValue(); } @@ -5830,6 +5904,8 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); case ISD::ATOMIC_LOAD: case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); + case ISD::SDIVREM: + case ISD::UDIVREM: return LowerDivRem(Op, DAG); } } @@ -7948,8 +8024,11 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, assert(AddcNode->getNumValues() == 2 && AddcNode->getValueType(0) == MVT::i32 && - AddcNode->getValueType(1) == MVT::Glue && - "Expect ADDC with two result values: i32, glue"); + "Expect ADDC with two result values. First: i32"); + + // Check that we have a glued ADDC node. + if (AddcNode->getValueType(1) != MVT::Glue) + return SDValue(); // Check that the ADDC adds the low result of the S/UMUL_LOHI. if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && @@ -8328,22 +8407,29 @@ static SDValue PerformORCombine(SDNode *N, unsigned SplatBitSize; bool HasAnyUndefs; + APInt SplatBits0, SplatBits1; BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); - APInt SplatBits0; + BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); + // Ensure that the second operand of both ands are constants if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, - HasAnyUndefs) && !HasAnyUndefs) { - BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); - APInt SplatBits1; - if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, - HasAnyUndefs) && !HasAnyUndefs && - SplatBits0 == ~SplatBits1) { - // Canonicalize the vector type to make instruction selection simpler. - EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; - SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, - N0->getOperand(1), N0->getOperand(0), - N1->getOperand(0)); - return DAG.getNode(ISD::BITCAST, dl, VT, Result); - } + HasAnyUndefs) && !HasAnyUndefs) { + if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, + HasAnyUndefs) && !HasAnyUndefs) { + // Ensure that the bit width of the constants are the same and that + // the splat arguments are logical inverses as per the pattern we + // are trying to simplify. + if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && + SplatBits0 == ~SplatBits1) { + // Canonicalize the vector type to make instruction selection + // simpler. + EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; + SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, + N0->getOperand(1), + N0->getOperand(0), + N1->getOperand(0)); + return DAG.getNode(ISD::BITCAST, dl, VT, Result); + } + } } } @@ -8753,6 +8839,98 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N, return DAG.getNode(ISD::BITCAST, dl, VT, BV); } +/// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. +static SDValue +PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { + // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. + // At that time, we may have inserted bitcasts from integer to float. + // If these bitcasts have survived DAGCombine, change the lowering of this + // BUILD_VECTOR in something more vector friendly, i.e., that does not + // force to use floating point types. + + // Make sure we can change the type of the vector. + // This is possible iff: + // 1. The vector is only used in a bitcast to a integer type. I.e., + // 1.1. Vector is used only once. + // 1.2. Use is a bit convert to an integer type. + // 2. The size of its operands are 32-bits (64-bits are not legal). + EVT VT = N->getValueType(0); + EVT EltVT = VT.getVectorElementType(); + + // Check 1.1. and 2. + if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) + return SDValue(); + + // By construction, the input type must be float. + assert(EltVT == MVT::f32 && "Unexpected type!"); + + // Check 1.2. + SDNode *Use = *N->use_begin(); + if (Use->getOpcode() != ISD::BITCAST || + Use->getValueType(0).isFloatingPoint()) + return SDValue(); + + // Check profitability. + // Model is, if more than half of the relevant operands are bitcast from + // i32, turn the build_vector into a sequence of insert_vector_elt. + // Relevant operands are everything that is not statically + // (i.e., at compile time) bitcasted. + unsigned NumOfBitCastedElts = 0; + unsigned NumElts = VT.getVectorNumElements(); + unsigned NumOfRelevantElts = NumElts; + for (unsigned Idx = 0; Idx < NumElts; ++Idx) { + SDValue Elt = N->getOperand(Idx); + if (Elt->getOpcode() == ISD::BITCAST) { + // Assume only bit cast to i32 will go away. + if (Elt->getOperand(0).getValueType() == MVT::i32) + ++NumOfBitCastedElts; + } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt)) + // Constants are statically casted, thus do not count them as + // relevant operands. + --NumOfRelevantElts; + } + + // Check if more than half of the elements require a non-free bitcast. + if (NumOfBitCastedElts <= NumOfRelevantElts / 2) + return SDValue(); + + SelectionDAG &DAG = DCI.DAG; + // Create the new vector type. + EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); + // Check if the type is legal. + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (!TLI.isTypeLegal(VecVT)) + return SDValue(); + + // Combine: + // ARMISD::BUILD_VECTOR E1, E2, ..., EN. + // => BITCAST INSERT_VECTOR_ELT + // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), + // (BITCAST EN), N. + SDValue Vec = DAG.getUNDEF(VecVT); + SDLoc dl(N); + for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { + SDValue V = N->getOperand(Idx); + if (V.getOpcode() == ISD::UNDEF) + continue; + if (V.getOpcode() == ISD::BITCAST && + V->getOperand(0).getValueType() == MVT::i32) + // Fold obvious case. + V = V.getOperand(0); + else { + V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); + // Make the DAGCombiner fold the bitcasts. + DCI.AddToWorklist(V.getNode()); + } + SDValue LaneIdx = DAG.getConstant(Idx, MVT::i32); + Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); + } + Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); + // Make the DAGCombiner fold the bitcasts. + DCI.AddToWorklist(Vec.getNode()); + return Vec; +} + /// PerformInsertEltCombine - Target-specific dag combine xforms for /// ISD::INSERT_VECTOR_ELT. static SDValue PerformInsertEltCombine(SDNode *N, @@ -9131,12 +9309,27 @@ static SDValue PerformVCVTCombine(SDNode *N, !isConstVecPow2(ConstVec, isSigned, C)) return SDValue(); + MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); + MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); + if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) { + // These instructions only exist converting from f32 to i32. We can handle + // smaller integers by generating an extra truncate, but larger ones would + // be lossy. + return SDValue(); + } + unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : Intrinsic::arm_neon_vcvtfp2fxu; - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), - N->getValueType(0), - DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, - DAG.getConstant(Log2_64(C), MVT::i32)); + unsigned NumLanes = Op.getValueType().getVectorNumElements(); + SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), + NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, + DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, + DAG.getConstant(Log2_64(C), MVT::i32)); + + if (IntTy.getSizeInBits() < FloatTy.getSizeInBits()) + FixConv = DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), FixConv); + + return FixConv; } /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) @@ -9167,12 +9360,28 @@ static SDValue PerformVDIVCombine(SDNode *N, !isConstVecPow2(ConstVec, isSigned, C)) return SDValue(); + MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); + MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); + if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) { + // These instructions only exist converting from i32 to f32. We can handle + // smaller integers by generating an extra extend, but larger ones would + // be lossy. + return SDValue(); + } + + SDValue ConvInput = Op.getOperand(0); + unsigned NumLanes = Op.getValueType().getVectorNumElements(); + if (IntTy.getSizeInBits() < FloatTy.getSizeInBits()) + ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, + SDLoc(N), NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, + ConvInput); + unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : Intrinsic::arm_neon_vcvtfxu2fp; return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), Op.getValueType(), DAG.getConstant(IntrinsicOpcode, MVT::i32), - Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); + ConvInput, DAG.getConstant(Log2_64(C), MVT::i32)); } /// Getvshiftimm - Check if this is a valid build_vector for the immediate @@ -9658,6 +9867,8 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, case ARMISD::VLD3DUP: case ARMISD::VLD4DUP: return CombineBaseUpdate(N, DCI); + case ARMISD::BUILD_VECTOR: + return PerformARMBUILD_VECTORCombine(N, DCI); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { @@ -9782,6 +9993,21 @@ bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { return false; } +bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + + if (!isTypeLegal(EVT::getEVT(Ty1))) + return false; + + assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); + + // Assuming the caller doesn't have a zeroext or signext return parameter, + // truncation all the way down to i1 is valid. + return true; +} + + static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { if (V < 0) return false; @@ -10181,9 +10407,19 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const { - KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); + unsigned BitWidth = KnownOne.getBitWidth(); + KnownZero = KnownOne = APInt(BitWidth, 0); switch (Op.getOpcode()) { default: break; + case ARMISD::ADDC: + case ARMISD::ADDE: + case ARMISD::SUBC: + case ARMISD::SUBE: + // These nodes' second result is a boolean + if (Op.getResNo() == 0) + break; + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); + break; case ARMISD::CMOV: { // Bits are known zero/one if known on the LHS and RHS. DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); @@ -10297,7 +10533,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight( typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; RCPair ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC ARM Constraint Letters switch (Constraint[0]) { @@ -10506,6 +10742,54 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); } +SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { + assert(Subtarget->isTargetAEABI() && "Register-based DivRem lowering only"); + unsigned Opcode = Op->getOpcode(); + assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && + "Invalid opcode for Div/Rem lowering"); + bool isSigned = (Opcode == ISD::SDIVREM); + EVT VT = Op->getValueType(0); + Type *Ty = VT.getTypeForEVT(*DAG.getContext()); + + RTLIB::Libcall LC; + switch (VT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("Unexpected request for libcall!"); + case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; + case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; + case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; + case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; + } + + SDValue InChain = DAG.getEntryNode(); + + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { + EVT ArgVT = Op->getOperand(i).getValueType(); + Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); + Entry.Node = Op->getOperand(i); + Entry.Ty = ArgTy; + Entry.isSExt = isSigned; + Entry.isZExt = !isSigned; + Args.push_back(Entry); + } + + SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), + getPointerTy()); + + Type *RetTy = (Type*)StructType::get(Ty, Ty, NULL); + + SDLoc dl(Op); + TargetLowering:: + CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, true, + 0, getLibcallCallingConv(LC), /*isTailCall=*/false, + /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, + Callee, Args, DAG, dl); + std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); + + return CallInfo.first; +} + bool ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { // The ARM target isn't yet aware of offsets. @@ -10591,6 +10875,30 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, Info.writeMem = true; return true; } + case Intrinsic::arm_ldrex: { + PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::getVT(PtrTy->getElementType()); + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); + Info.vol = true; + Info.readMem = true; + Info.writeMem = false; + return true; + } + case Intrinsic::arm_strex: { + PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::getVT(PtrTy->getElementType()); + Info.ptrVal = I.getArgOperand(1); + Info.offset = 0; + Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); + Info.vol = true; + Info.readMem = false; + Info.writeMem = true; + return true; + } case Intrinsic::arm_strexd: { Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::i64; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 2b65019..44c769f 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -298,6 +298,9 @@ namespace llvm { using TargetLowering::isZExtFree; virtual bool isZExtFree(SDValue Val, EVT VT2) const; + virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; + + /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; @@ -349,7 +352,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is @@ -417,7 +420,7 @@ namespace llvm { RegsToPassVector &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, SDValue &StackPtr, - SmallVector<SDValue, 8> &MemOpChains, + SmallVectorImpl<SDValue> &MemOpChains, ISD::ArgFlagsTy Flags) const; SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, @@ -457,6 +460,18 @@ namespace llvm { const ARMSubtarget *ST) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) const; + SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; + + /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster + /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be + /// expanded to FMAs when this method returns true, otherwise fmuladd is + /// expanded to fmul + fadd. + /// + /// ARM supports both fused and unfused multiply-add operations; we already + /// lower a pair of fmul and fadd to the latter so it's not clear that there + /// would be a gain or that the gain would be worthwhile enough to risk + /// correctness bugs. + virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; } SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td index bd9a212..1349476 100644 --- a/lib/Target/ARM/ARMInstrFormats.td +++ b/lib/Target/ARM/ARMInstrFormats.td @@ -1230,8 +1230,9 @@ class T2JTI<dag oops, dag iops, InstrItinClass itin, : Thumb2XI<oops, iops, AddrModeNone, 0, itin, asm, "", pattern>; // Move to/from coprocessor instructions -class T2Cop<bits<4> opc, dag oops, dag iops, string asm, list<dag> pattern> - : T2XI <oops, iops, NoItinerary, asm, pattern>, Requires<[IsThumb2]> { +class T2Cop<bits<4> opc, dag oops, dag iops, string opcstr, string asm, + list<dag> pattern> + : T2I <oops, iops, NoItinerary, opcstr, asm, pattern>, Requires<[IsThumb2]> { let Inst{31-28} = opc; } @@ -1521,6 +1522,32 @@ class ADuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, let Inst{4} = opcod5; } +// Double precision, unary, not-predicated +class ADuInp<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, + bit opcod5, dag oops, dag iops, InstrItinClass itin, + string asm, list<dag> pattern> + : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, VFPUnaryFrm, itin, asm, "", pattern> { + // Instruction operands. + bits<5> Dd; + bits<5> Dm; + + let Inst{31-28} = 0b1111; + + // Encode instruction operands. + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; + let Inst{15-12} = Dd{3-0}; + let Inst{22} = Dd{4}; + + let Inst{27-23} = opcod1; + let Inst{21-20} = opcod2; + let Inst{19-16} = opcod3; + let Inst{11-9} = 0b101; + let Inst{8} = 1; // Double precision + let Inst{7-6} = opcod4; + let Inst{4} = opcod5; +} + // Double precision, binary class ADbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops, InstrItinClass itin, string opc, string asm, @@ -1547,7 +1574,36 @@ class ADbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, let Inst{4} = op4; } -// Single precision, unary +// FP, binary, not predicated +class ADbInp<bits<5> opcod1, bits<2> opcod2, bit opcod3, dag oops, dag iops, + InstrItinClass itin, string asm, list<dag> pattern> + : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, VFPBinaryFrm, itin, + asm, "", pattern> +{ + // Instruction operands. + bits<5> Dd; + bits<5> Dn; + bits<5> Dm; + + let Inst{31-28} = 0b1111; + + // Encode instruction operands. + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; + let Inst{19-16} = Dn{3-0}; + let Inst{7} = Dn{4}; + let Inst{15-12} = Dd{3-0}; + let Inst{22} = Dd{4}; + + let Inst{27-23} = opcod1; + let Inst{21-20} = opcod2; + let Inst{11-9} = 0b101; + let Inst{8} = 1; // double precision + let Inst{6} = opcod3; + let Inst{4} = 0; +} + +// Single precision, unary, predicated class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> @@ -1571,6 +1627,33 @@ class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, let Inst{4} = opcod5; } +// Single precision, unary, non-predicated +class ASuInp<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, + bit opcod5, dag oops, dag iops, InstrItinClass itin, + string asm, list<dag> pattern> + : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, + VFPUnaryFrm, itin, asm, "", pattern> { + // Instruction operands. + bits<5> Sd; + bits<5> Sm; + + let Inst{31-28} = 0b1111; + + // Encode instruction operands. + let Inst{3-0} = Sm{4-1}; + let Inst{5} = Sm{0}; + let Inst{15-12} = Sd{4-1}; + let Inst{22} = Sd{0}; + + let Inst{27-23} = opcod1; + let Inst{21-20} = opcod2; + let Inst{19-16} = opcod3; + let Inst{11-9} = 0b101; + let Inst{8} = 0; // Single precision + let Inst{7-6} = opcod4; + let Inst{4} = opcod5; +} + // Single precision unary, if no NEON. Same as ASuI except not available if // NEON is enabled. class ASuIn<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4, @@ -1606,6 +1689,35 @@ class ASbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops, let Inst{4} = op4; } +// Single precision, binary, not predicated +class ASbInp<bits<5> opcod1, bits<2> opcod2, bit opcod3, dag oops, dag iops, + InstrItinClass itin, string asm, list<dag> pattern> + : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone, + VFPBinaryFrm, itin, asm, "", pattern> +{ + // Instruction operands. + bits<5> Sd; + bits<5> Sn; + bits<5> Sm; + + let Inst{31-28} = 0b1111; + + // Encode instruction operands. + let Inst{3-0} = Sm{4-1}; + let Inst{5} = Sm{0}; + let Inst{19-16} = Sn{4-1}; + let Inst{7} = Sn{0}; + let Inst{15-12} = Sd{4-1}; + let Inst{22} = Sd{0}; + + let Inst{27-23} = opcod1; + let Inst{21-20} = opcod2; + let Inst{11-9} = 0b101; + let Inst{8} = 0; // Single precision + let Inst{6} = opcod3; + let Inst{4} = 0; +} + // Single precision binary, if no NEON. Same as ASbI except not available if // NEON is enabled. class ASbIn<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, @@ -1718,6 +1830,21 @@ class NeonXI<dag oops, dag iops, AddrMode am, IndexMode im, Format f, let DecoderNamespace = "NEON"; } +// Same as NeonI except it is not predicated +class NeonInp<dag oops, dag iops, AddrMode am, IndexMode im, Format f, + InstrItinClass itin, string opc, string dt, string asm, string cstr, + list<dag> pattern> + : InstARM<am, 4, im, f, NeonDomain, cstr, itin> { + let OutOperandList = oops; + let InOperandList = iops; + let AsmString = !strconcat(opc, ".", dt, "\t", asm); + let Pattern = pattern; + list<Predicate> Predicates = [HasNEON]; + let DecoderNamespace = "NEON"; + + let Inst{31-28} = 0b1111; +} + class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4, dag oops, dag iops, InstrItinClass itin, string opc, string dt, string asm, string cstr, list<dag> pattern> @@ -1837,6 +1964,35 @@ class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16, let Inst{5} = Vm{4}; } +// Same as N2V but not predicated. +class N2Vnp<bits<2> op17_16, bits<3> op10_8, bit op7, bit op6, + dag oops, dag iops, InstrItinClass itin, string OpcodeStr, + string Dt, ValueType ResTy, ValueType OpTy, list<dag> pattern> + : NeonInp<oops, iops, AddrModeNone, IndexModeNone, N2RegFrm, itin, + OpcodeStr, Dt, "$Vd, $Vm", "", pattern> { + bits<5> Vd; + bits<5> Vm; + + // Encode instruction operands + let Inst{22} = Vd{4}; + let Inst{15-12} = Vd{3-0}; + let Inst{5} = Vm{4}; + let Inst{3-0} = Vm{3-0}; + + // Encode constant bits + let Inst{27-23} = 0b00111; + let Inst{21-20} = 0b11; + let Inst{19-18} = 0b10; + let Inst{17-16} = op17_16; + let Inst{11} = 0; + let Inst{10-8} = op10_8; + let Inst{7} = op7; + let Inst{6} = op6; + let Inst{4} = 0; + + let DecoderNamespace = "NEON"; +} + // Same as N2V except it doesn't have a datatype suffix. class N2VX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16, bits<5> op11_7, bit op6, bit op4, @@ -1918,6 +2074,32 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4, let Inst{5} = Vm{4}; } +class N3Vnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6, + bit op4, dag oops, dag iops,Format f, InstrItinClass itin, + string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy, + SDPatternOperator IntOp, bit Commutable, list<dag> pattern> + : NeonInp<oops, iops, AddrModeNone, IndexModeNone, f, itin, OpcodeStr, + Dt, "$Vd, $Vn, $Vm", "", pattern> { + bits<5> Vd; + bits<5> Vn; + bits<5> Vm; + + // Encode instruction operands + let Inst{22} = Vd{4}; + let Inst{15-12} = Vd{3-0}; + let Inst{19-16} = Vn{3-0}; + let Inst{7} = Vn{4}; + let Inst{5} = Vm{4}; + let Inst{3-0} = Vm{3-0}; + + // Encode constant bits + let Inst{27-23} = op27_23; + let Inst{21-20} = op21_20; + let Inst{11-8} = op11_8; + let Inst{6} = op6; + let Inst{4} = op4; +} + class N3VLane32<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4, dag oops, dag iops, Format f, InstrItinClass itin, string opc, string dt, string asm, string cstr, diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp index 8062111..8cdb853 100644 --- a/lib/Target/ARM/ARMInstrInfo.cpp +++ b/lib/Target/ARM/ARMInstrInfo.cpp @@ -106,13 +106,14 @@ namespace { if (TM->getRelocationModel() != Reloc::PIC_) return false; - LLVMContext* Context = &MF.getFunction()->getContext(); - GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, - GlobalValue::ExternalLinkage, 0, - "_GLOBAL_OFFSET_TABLE_"); - unsigned Id = AFI->createPICLabelUId(); - ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id); - unsigned Align = TM->getDataLayout()->getPrefTypeAlignment(GV->getType()); + LLVMContext *Context = &MF.getFunction()->getContext(); + unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); + unsigned PCAdj = TM->getSubtarget<ARMSubtarget>().isThumb() ? 4 : 8; + ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( + *Context, "_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, PCAdj); + + unsigned Align = TM->getDataLayout() + ->getPrefTypeAlignment(Type::getInt32PtrTy(*Context)); unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align); MachineBasicBlock &FirstMBB = MF.front(); diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index da815d5..c243402 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -194,6 +194,8 @@ def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">, def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">; def HasV7 : Predicate<"Subtarget->hasV7Ops()">, AssemblerPredicate<"HasV7Ops", "armv7">; +def HasV8 : Predicate<"Subtarget->hasV8Ops()">, + AssemblerPredicate<"HasV8Ops", "armv8">; def NoVFP : Predicate<"!Subtarget->hasVFP2()">; def HasVFP2 : Predicate<"Subtarget->hasVFP2()">, AssemblerPredicate<"FeatureVFP2", "VFP2">; @@ -201,6 +203,8 @@ def HasVFP3 : Predicate<"Subtarget->hasVFP3()">, AssemblerPredicate<"FeatureVFP3", "VFP3">; def HasVFP4 : Predicate<"Subtarget->hasVFP4()">, AssemblerPredicate<"FeatureVFP4", "VFP4">; +def HasV8FP : Predicate<"Subtarget->hasV8FP()">, + AssemblerPredicate<"FeatureV8FP", "V8FP">; def HasNEON : Predicate<"Subtarget->hasNEON()">, AssemblerPredicate<"FeatureNEON", "NEON">; def HasFP16 : Predicate<"Subtarget->hasFP16()">, @@ -258,7 +262,9 @@ def UseMulOps : Predicate<"Subtarget->useMulOps()">; def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion ==" " FPOpFusion::Fast) && " "!Subtarget->isTargetDarwin()">; -def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || " +def DontUseFusedMAC : Predicate<"!(TM.Options.AllowFPOpFusion ==" + " FPOpFusion::Fast &&" + " Subtarget->hasVFP4()) || " "Subtarget->isTargetDarwin()">; // VGETLNi32 is microcoded on Swift - prefer VMOV. @@ -275,8 +281,8 @@ def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">; def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">; def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">; -def IsLE : Predicate<"TLI->isLittleEndian()">; -def IsBE : Predicate<"TLI->isBigEndian()">; +def IsLE : Predicate<"getTargetLowering()->isLittleEndian()">; +def IsBE : Predicate<"getTargetLowering()->isBigEndian()">; //===----------------------------------------------------------------------===// // ARM Flag Definitions. @@ -456,7 +462,7 @@ def AdrLabelAsmOperand : AsmOperandClass { let Name = "AdrLabel"; } def adrlabel : Operand<i32> { let EncoderMethod = "getAdrLabelOpValue"; let ParserMatchClass = AdrLabelAsmOperand; - let PrintMethod = "printAdrLabelOperand"; + let PrintMethod = "printAdrLabelOperand<0>"; } def neon_vcvt_imm32 : Operand<i32> { @@ -1007,11 +1013,6 @@ def p_imm : Operand<i32> { let DecoderMethod = "DecodeCoprocessor"; } -def pf_imm : Operand<i32> { - let PrintMethod = "printPImmediate"; - let ParserMatchClass = CoprocNumAsmOperand; -} - def CoprocRegAsmOperand : AsmOperandClass { let Name = "CoprocReg"; let ParserMethod = "parseCoprocRegOperand"; @@ -1735,12 +1736,13 @@ def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel", // The 16-bit operand $val can be used by a debugger to store more information // about the breakpoint. -def BKPT : AI<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary, - "bkpt", "\t$val", []>, Requires<[IsARM]> { +def BKPT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary, + "bkpt", "\t$val", []>, Requires<[IsARM]> { bits<16> val; let Inst{3-0} = val{3-0}; let Inst{19-8} = val{15-4}; let Inst{27-20} = 0b00010010; + let Inst{31-28} = 0xe; // AL let Inst{7-4} = 0b0111; } @@ -2293,7 +2295,6 @@ multiclass AI2_ldridx<bit isByte, string opc, let Inst{19-16} = addr{16-13}; let Inst{11-0} = addr{11-0}; let DecoderMethod = "DecodeLDRPreImm"; - let AsmMatchConverter = "cvtLdWriteBackRegAddrModeImm12"; } def _PRE_REG : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb), @@ -2306,7 +2307,6 @@ multiclass AI2_ldridx<bit isByte, string opc, let Inst{11-0} = addr{11-0}; let Inst{4} = 0; let DecoderMethod = "DecodeLDRPreReg"; - let AsmMatchConverter = "cvtLdWriteBackRegAddrMode2"; } def _POST_REG : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb), @@ -2364,7 +2364,6 @@ multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> { let Inst{19-16} = addr{12-9}; // Rn let Inst{11-8} = addr{7-4}; // imm7_4/zero let Inst{3-0} = addr{3-0}; // imm3_0/Rm - let AsmMatchConverter = "cvtLdWriteBackRegAddrMode3"; let DecoderMethod = "DecodeAddrMode3Instruction"; } def _POST : AI3ldstidx<op, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), @@ -2400,7 +2399,6 @@ def LDRD_PRE : AI3ldstidx<0b1101, 0, 1, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb), let Inst{11-8} = addr{7-4}; // imm7_4/zero let Inst{3-0} = addr{3-0}; // imm3_0/Rm let DecoderMethod = "DecodeAddrMode3Instruction"; - let AsmMatchConverter = "cvtLdrdPre"; } def LDRD_POST: AI3ldstidx<0b1101, 0, 0, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb), (ins addr_offset_none:$addr, am3offset:$offset), @@ -2503,7 +2501,6 @@ multiclass AI3ldrT<bits<4> op, string opc> { let Inst{22} = 1; let Inst{11-8} = offset{7-4}; let Inst{3-0} = offset{3-0}; - let AsmMatchConverter = "cvtLdExtTWriteBackImm"; } def r : AI3ldstidxT<op, 1, (outs GPRnopc:$Rt, GPRnopc:$base_wb), (ins addr_offset_none:$addr, postidx_reg:$Rm), @@ -2515,7 +2512,6 @@ multiclass AI3ldrT<bits<4> op, string opc> { let Inst{11-8} = 0; let Unpredictable{11-8} = 0b1111; let Inst{3-0} = Rm{3-0}; - let AsmMatchConverter = "cvtLdExtTWriteBackReg"; let DecoderMethod = "DecodeLDR"; } } @@ -2553,7 +2549,6 @@ multiclass AI2_stridx<bit isByte, string opc, let Inst{23} = addr{12}; // U (add = ('U' == 1)) let Inst{19-16} = addr{16-13}; // Rn let Inst{11-0} = addr{11-0}; // imm12 - let AsmMatchConverter = "cvtStWriteBackRegAddrModeImm12"; let DecoderMethod = "DecodeSTRPreImm"; } @@ -2567,7 +2562,6 @@ multiclass AI2_stridx<bit isByte, string opc, let Inst{19-16} = addr{16-13}; // Rn let Inst{11-0} = addr{11-0}; let Inst{4} = 0; // Inst{4} = 0 - let AsmMatchConverter = "cvtStWriteBackRegAddrMode2"; let DecoderMethod = "DecodeSTRPreReg"; } def _POST_REG : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb), @@ -2676,7 +2670,6 @@ def STRH_PRE : AI3ldstidx<0b1011, 0, 1, (outs GPR:$Rn_wb), let Inst{19-16} = addr{12-9}; // Rn let Inst{11-8} = addr{7-4}; // imm7_4/zero let Inst{3-0} = addr{3-0}; // imm3_0/Rm - let AsmMatchConverter = "cvtStWriteBackRegAddrMode3"; let DecoderMethod = "DecodeAddrMode3Instruction"; } @@ -2710,7 +2703,6 @@ def STRD_PRE : AI3ldstidx<0b1111, 0, 1, (outs GPR:$Rn_wb), let Inst{11-8} = addr{7-4}; // imm7_4/zero let Inst{3-0} = addr{3-0}; // imm3_0/Rm let DecoderMethod = "DecodeAddrMode3Instruction"; - let AsmMatchConverter = "cvtStrdPre"; } def STRD_POST: AI3ldstidx<0b1111, 0, 0, (outs GPR:$Rn_wb), @@ -2817,7 +2809,6 @@ multiclass AI3strT<bits<4> op, string opc> { let Inst{22} = 1; let Inst{11-8} = offset{7-4}; let Inst{3-0} = offset{3-0}; - let AsmMatchConverter = "cvtStExtTWriteBackImm"; } def r : AI3ldstidxT<op, 0, (outs GPR:$base_wb), (ins GPR:$Rt, addr_offset_none:$addr, postidx_reg:$Rm), @@ -2828,7 +2819,6 @@ multiclass AI3strT<bits<4> op, string opc> { let Inst{22} = 0; let Inst{11-8} = 0; let Inst{3-0} = Rm{3-0}; - let AsmMatchConverter = "cvtStExtTWriteBackReg"; } } @@ -4011,8 +4001,13 @@ def PKHTB : APKHI<0b01101000, 1, (outs GPRnopc:$Rd), // Alternate cases for PKHTB where identities eliminate some nodes. Note that // a shift amount of 0 is *not legal* here, it is PKHBT instead. +// We also can not replace a srl (17..31) by an arithmetic shift we would use in +// pkhtb src1, src2, asr (17..31). +def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000), + (srl GPRnopc:$src2, imm16:$sh)), + (PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16:$sh)>; def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000), - (srl GPRnopc:$src2, imm16_31:$sh)), + (sra GPRnopc:$src2, imm16_31:$sh)), (PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16_31:$sh)>; def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000), (and (srl GPRnopc:$src2, imm1_15:$sh), 0xFFFF)), @@ -4378,14 +4373,44 @@ let usesCustomInserter = 1 in { [(ARMcopystructbyval GPR:$dst, GPR:$src, imm:$size, imm:$alignment)]>; } +def ldrex_1 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; + +def ldrex_2 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; + +def ldrex_4 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +def strex_1 : PatFrag<(ops node:$val, node:$ptr), + (int_arm_strex node:$val, node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; + +def strex_2 : PatFrag<(ops node:$val, node:$ptr), + (int_arm_strex node:$val, node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; + +def strex_4 : PatFrag<(ops node:$val, node:$ptr), + (int_arm_strex node:$val, node:$ptr), [{ + return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + let mayLoad = 1 in { def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr), NoItinerary, - "ldrexb", "\t$Rt, $addr", []>; + "ldrexb", "\t$Rt, $addr", + [(set GPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>; def LDREXH : AIldrex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr), - NoItinerary, "ldrexh", "\t$Rt, $addr", []>; + NoItinerary, "ldrexh", "\t$Rt, $addr", + [(set GPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>; def LDREX : AIldrex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr), - NoItinerary, "ldrex", "\t$Rt, $addr", []>; + NoItinerary, "ldrex", "\t$Rt, $addr", + [(set GPR:$Rt, (ldrex_4 addr_offset_none:$addr))]>; let hasExtraDefRegAllocReq = 1 in def LDREXD: AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr), NoItinerary, "ldrexd", "\t$Rt, $addr", []> { @@ -4395,11 +4420,14 @@ def LDREXD: AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr), let mayStore = 1, Constraints = "@earlyclobber $Rd" in { def STREXB: AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr), - NoItinerary, "strexb", "\t$Rd, $Rt, $addr", []>; + NoItinerary, "strexb", "\t$Rd, $Rt, $addr", + [(set GPR:$Rd, (strex_1 GPR:$Rt, addr_offset_none:$addr))]>; def STREXH: AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr), - NoItinerary, "strexh", "\t$Rd, $Rt, $addr", []>; + NoItinerary, "strexh", "\t$Rd, $Rt, $addr", + [(set GPR:$Rd, (strex_2 GPR:$Rt, addr_offset_none:$addr))]>; def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr), - NoItinerary, "strex", "\t$Rd, $Rt, $addr", []>; + NoItinerary, "strex", "\t$Rd, $Rt, $addr", + [(set GPR:$Rd, (strex_4 GPR:$Rt, addr_offset_none:$addr))]>; let hasExtraSrcRegAllocReq = 1 in def STREXD : AIstrex<0b01, (outs GPR:$Rd), (ins GPRPairOp:$Rt, addr_offset_none:$addr), @@ -4409,11 +4437,21 @@ def STREXD : AIstrex<0b01, (outs GPR:$Rd), } -def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>, +def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", + [(int_arm_clrex)]>, Requires<[IsARM, HasV7]> { let Inst{31-0} = 0b11110101011111111111000000011111; } +def : ARMPat<(and (ldrex_1 addr_offset_none:$addr), 0xff), + (LDREXB addr_offset_none:$addr)>; +def : ARMPat<(and (ldrex_2 addr_offset_none:$addr), 0xffff), + (LDREXH addr_offset_none:$addr)>; +def : ARMPat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr), + (STREXB GPR:$Rt, addr_offset_none:$addr)>; +def : ARMPat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr), + (STREXH GPR:$Rt, addr_offset_none:$addr)>; + // SWP/SWPB are deprecated in V6/V7. let mayLoad = 1, mayStore = 1 in { def SWP : AIswp<0, (outs GPRnopc:$Rt), @@ -4447,7 +4485,7 @@ def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, let Inst{23-20} = opc1; } -def CDP2 : ABXI<0b1110, (outs), (ins pf_imm:$cop, imm0_15:$opc1, +def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", [(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn, @@ -5187,10 +5225,10 @@ def : MnemonicAlias<"rfeed", "rfeib">; def : MnemonicAlias<"rfe", "rfeia">; // SRS aliases -def : MnemonicAlias<"srsfa", "srsda">; -def : MnemonicAlias<"srsea", "srsdb">; -def : MnemonicAlias<"srsfd", "srsia">; -def : MnemonicAlias<"srsed", "srsib">; +def : MnemonicAlias<"srsfa", "srsib">; +def : MnemonicAlias<"srsea", "srsia">; +def : MnemonicAlias<"srsfd", "srsdb">; +def : MnemonicAlias<"srsed", "srsda">; def : MnemonicAlias<"srs", "srsia">; // QSAX == QSUBADDX diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 9d1a8ea..af4f4d1 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -656,7 +656,6 @@ multiclass VLD1DWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u, @@ -664,7 +663,6 @@ multiclass VLD1DWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } multiclass VLD1QWB<bits<4> op7_4, string Dt> { @@ -675,7 +673,6 @@ multiclass VLD1QWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u, @@ -683,7 +680,6 @@ multiclass VLD1QWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -713,7 +709,6 @@ multiclass VLD1D3WB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u, @@ -721,7 +716,6 @@ multiclass VLD1D3WB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -754,7 +748,6 @@ multiclass VLD1D4WB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u, @@ -762,7 +755,6 @@ multiclass VLD1D4WB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -811,7 +803,6 @@ multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt, let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm), itin, @@ -819,7 +810,6 @@ multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt, "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -1348,7 +1338,6 @@ multiclass VLD1DUPWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD1DupInstruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListOneDAllLanes:$Vd, GPR:$wb), @@ -1357,7 +1346,6 @@ multiclass VLD1DUPWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD1DupInstruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> { @@ -1369,7 +1357,6 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD1DupInstruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListDPairAllLanes:$Vd, GPR:$wb), @@ -1378,7 +1365,6 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD1DupInstruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -1419,7 +1405,6 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD2DupInstruction"; - let AsmMatchConverter = "cvtVLDwbFixed"; } def _register : NLdSt<1, 0b10, 0b1101, op7_4, (outs VdTy:$Vd, GPR:$wb), @@ -1428,7 +1413,6 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLD2DupInstruction"; - let AsmMatchConverter = "cvtVLDwbRegister"; } } @@ -1609,7 +1593,6 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd), @@ -1618,7 +1601,6 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{4} = Rn{4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } multiclass VST1QWB<bits<4> op7_4, string Dt> { @@ -1629,7 +1611,6 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VecListDPair:$Vd), @@ -1638,7 +1619,6 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } @@ -1669,7 +1649,6 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd), @@ -1678,7 +1657,6 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } @@ -1714,7 +1692,6 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd), @@ -1723,7 +1700,6 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST1Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } @@ -1773,7 +1749,6 @@ multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt, let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u, @@ -1781,7 +1756,6 @@ multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt, "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } multiclass VST2QWB<bits<4> op7_4, string Dt> { @@ -1792,7 +1766,6 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> { let Rm = 0b1101; // NLdSt will assign to the right encoding bits. let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVSTwbFixed"; } def _register : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb), (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd), @@ -1801,7 +1774,6 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> { "$Rn.addr = $wb", []> { let Inst{5-4} = Rn{5-4}; let DecoderMethod = "DecodeVLDST2Instruction"; - let AsmMatchConverter = "cvtVSTwbRegister"; } } @@ -2379,6 +2351,21 @@ class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "", [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>; +// Same as above, but not predicated. +class N2VDIntnp<bits<2> op17_16, bits<3> op10_8, bit op7, + InstrItinClass itin, string OpcodeStr, string Dt, + ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp> + : N2Vnp<op17_16, op10_8, op7, 0, (outs DPR:$Vd), (ins DPR:$Vm), + itin, OpcodeStr, Dt, ResTy, OpTy, + [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>; + +class N2VQIntnp<bits<2> op17_16, bits<3> op10_8, bit op7, + InstrItinClass itin, string OpcodeStr, string Dt, + ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp> + : N2Vnp<op17_16, op10_8, op7, 1, (outs QPR:$Vd), (ins QPR:$Vm), + itin, OpcodeStr, Dt, ResTy, OpTy, + [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>; + // Narrow 2-register operations. class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16, bits<5> op11_7, bit op6, bit op4, @@ -2541,6 +2528,16 @@ class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4, let TwoOperandAliasConstraint = "$Vn = $Vd"; let isCommutable = Commutable; } + +class N3VDIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6, + bit op4, Format f, InstrItinClass itin, string OpcodeStr, + string Dt, ValueType ResTy, ValueType OpTy, + SDPatternOperator IntOp, bit Commutable> + : N3Vnp<op27_23, op21_20, op11_8, op6, op4, + (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin, OpcodeStr, Dt, + ResTy, OpTy, IntOp, Commutable, + [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>; + class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp> : N3VLane32<0, 1, op21_20, op11_8, 1, 0, @@ -2552,6 +2549,7 @@ class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin, imm:$lane)))))]> { let isCommutable = 0; } + class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp> : N3VLane16<0, 1, op21_20, op11_8, 1, 0, @@ -2584,6 +2582,16 @@ class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4, let TwoOperandAliasConstraint = "$Vn = $Vd"; let isCommutable = Commutable; } + +class N3VQIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6, + bit op4, Format f, InstrItinClass itin, string OpcodeStr, + string Dt, ValueType ResTy, ValueType OpTy, + SDPatternOperator IntOp, bit Commutable> + : N3Vnp<op27_23, op21_20, op11_8, op6, op4, + (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin, OpcodeStr, Dt, + ResTy, OpTy, IntOp, Commutable, + [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>; + class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin, string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp> @@ -4659,6 +4667,18 @@ def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBINQ, "vmax", "f32", v4f32, v4f32, int_arm_neon_vmaxs, 1>; +// VMAXNM +let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in { + def VMAXNMND : N3VDIntnp<0b00110, 0b00, 0b1111, 0, 1, + N3RegFrm, NoItinerary, "vmaxnm", "f32", + v2f32, v2f32, int_arm_neon_vmaxnm, 1>, + Requires<[HasV8, HasNEON]>; + def VMAXNMNQ : N3VQIntnp<0b00110, 0b00, 0b1111, 1, 1, + N3RegFrm, NoItinerary, "vmaxnm", "f32", + v4f32, v4f32, int_arm_neon_vmaxnm, 1>, + Requires<[HasV8, HasNEON]>; +} + // VMIN : Vector Minimum defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, @@ -4673,6 +4693,18 @@ def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBINQ, "vmin", "f32", v4f32, v4f32, int_arm_neon_vmins, 1>; +// VMINNM +let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in { + def VMINNMND : N3VDIntnp<0b00110, 0b10, 0b1111, 0, 1, + N3RegFrm, NoItinerary, "vminnm", "f32", + v2f32, v2f32, int_arm_neon_vminnm, 1>, + Requires<[HasV8, HasNEON]>; + def VMINNMNQ : N3VQIntnp<0b00110, 0b10, 0b1111, 1, 1, + N3RegFrm, NoItinerary, "vminnm", "f32", + v4f32, v4f32, int_arm_neon_vminnm, 1>, + Requires<[HasV8, HasNEON]>; +} + // Vector Pairwise Operations. // VPADD : Vector Pairwise Add @@ -5386,6 +5418,26 @@ def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32", def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32", v4f32, v4i32, uint_to_fp>; +// VCVT{A, N, P, M} +multiclass VCVT_FPI<string op, bits<3> op10_8, SDPatternOperator IntS, + SDPatternOperator IntU> { + let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in { + def SD : N2VDIntnp<0b11, op10_8, 0, NoItinerary, !strconcat("vcvt", op), + "s32.f32", v2i32, v2f32, IntS>, Requires<[HasV8, HasNEON]>; + def SQ : N2VQIntnp<0b11, op10_8, 0, NoItinerary, !strconcat("vcvt", op), + "s32.f32", v4i32, v4f32, IntS>, Requires<[HasV8, HasNEON]>; + def UD : N2VDIntnp<0b11, op10_8, 1, NoItinerary, !strconcat("vcvt", op), + "u32.f32", v2i32, v2f32, IntU>, Requires<[HasV8, HasNEON]>; + def UQ : N2VQIntnp<0b11, op10_8, 1, NoItinerary, !strconcat("vcvt", op), + "u32.f32", v4i32, v4f32, IntU>, Requires<[HasV8, HasNEON]>; + } +} + +defm VCVTAN : VCVT_FPI<"a", 0b000, int_arm_neon_vcvtas, int_arm_neon_vcvtau>; +defm VCVTNN : VCVT_FPI<"n", 0b001, int_arm_neon_vcvtns, int_arm_neon_vcvtnu>; +defm VCVTPN : VCVT_FPI<"p", 0b010, int_arm_neon_vcvtps, int_arm_neon_vcvtpu>; +defm VCVTMN : VCVT_FPI<"m", 0b011, int_arm_neon_vcvtms, int_arm_neon_vcvtmu>; + // VCVT : Vector Convert Between Floating-Point and Fixed-Point. let DecoderMethod = "DecodeVCVTD" in { def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32", @@ -5658,6 +5710,34 @@ def VTBX4Pseudo IIC_VTBX4, "$orig = $dst", []>; } // DecoderMethod = "DecodeTBLInstruction" +// VRINT : Vector Rounding +multiclass VRINT_FPI<string op, bits<3> op9_7, SDPatternOperator Int> { + let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in { + def D : N2VDIntnp<0b10, 0b100, 0, NoItinerary, + !strconcat("vrint", op), "f32", + v2f32, v2f32, Int>, Requires<[HasV8, HasNEON]> { + let Inst{9-7} = op9_7; + } + def Q : N2VQIntnp<0b10, 0b100, 0, NoItinerary, + !strconcat("vrint", op), "f32", + v4f32, v4f32, Int>, Requires<[HasV8, HasNEON]> { + let Inst{9-7} = op9_7; + } + } + + def : InstAlias<!strconcat("vrint", op, ".f32.f32\t$Dd, $Dm"), + (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>; + def : InstAlias<!strconcat("vrint", op, ".f32.f32\t$Qd, $Qm"), + (!cast<Instruction>(NAME#"Q") QPR:$Qd, QPR:$Qm)>; +} + +defm VRINTNN : VRINT_FPI<"n", 0b000, int_arm_neon_vrintn>; +defm VRINTXN : VRINT_FPI<"x", 0b001, int_arm_neon_vrintx>; +defm VRINTAN : VRINT_FPI<"a", 0b010, int_arm_neon_vrinta>; +defm VRINTZN : VRINT_FPI<"z", 0b011, int_arm_neon_vrintz>; +defm VRINTMN : VRINT_FPI<"m", 0b101, int_arm_neon_vrintm>; +defm VRINTPN : VRINT_FPI<"p", 0b111, int_arm_neon_vrintp>; + //===----------------------------------------------------------------------===// // NEON instructions for single-precision FP math //===----------------------------------------------------------------------===// @@ -6698,12 +6778,17 @@ def VST4qWB_register_Asm_32 : (ins VecListFourQ:$list, addrmode6:$addr, rGPR:$Rm, pred:$p)>; -// VMOV takes an optional datatype suffix +// VMOV/VMVN takes an optional datatype suffix defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm", (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>; defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm", (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>; +defm : NEONDTAnyInstAlias<"vmvn${p}", "$Vd, $Vm", + (VMVNd DPR:$Vd, DPR:$Vm, pred:$p)>; +defm : NEONDTAnyInstAlias<"vmvn${p}", "$Vd, $Vm", + (VMVNq QPR:$Vd, QPR:$Vm, pred:$p)>; + // VCLT (register) is an assembler alias for VCGT w/ the operands reversed. // D-register versions. def : NEONInstAlias<"vcle${p}.s8 $Dd, $Dn, $Dm", diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 1fff41d..e7218c6 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -69,11 +69,6 @@ def thumb_immshifted_shamt : SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(V, MVT::i32); }]>; -// ADR instruction labels. -def t_adrlabel : Operand<i32> { - let EncoderMethod = "getThumbAdrLabelOpValue"; -} - // Scaled 4 immediate. def t_imm0_1020s4_asmoperand: AsmOperandClass { let Name = "Imm0_1020s4"; } def t_imm0_1020s4 : Operand<i32> { @@ -97,12 +92,34 @@ def t_imm0_508s4_neg : Operand<i32> { // Define Thumb specific addressing modes. +// unsigned 8-bit, 2-scaled memory offset +class OperandUnsignedOffset_b8s2 : AsmOperandClass { + let Name = "UnsignedOffset_b8s2"; + let PredicateMethod = "isUnsignedOffset<8, 2>"; +} + +def UnsignedOffset_b8s2 : OperandUnsignedOffset_b8s2; + +// thumb style PC relative operand. signed, 8 bits magnitude, +// two bits shift. can be represented as either [pc, #imm], #imm, +// or relocatable expression... +def ThumbMemPC : AsmOperandClass { + let Name = "ThumbMemPC"; +} + let OperandType = "OPERAND_PCREL" in { def t_brtarget : Operand<OtherVT> { let EncoderMethod = "getThumbBRTargetOpValue"; let DecoderMethod = "DecodeThumbBROperand"; } +// ADR instruction labels. +def t_adrlabel : Operand<i32> { + let EncoderMethod = "getThumbAdrLabelOpValue"; + let PrintMethod = "printAdrLabelOperand<2>"; + let ParserMatchClass = UnsignedOffset_b8s2; +} + def t_bcctarget : Operand<i32> { let EncoderMethod = "getThumbBCCTargetOpValue"; let DecoderMethod = "DecodeThumbBCCTargetOperand"; @@ -122,6 +139,15 @@ def t_blxtarget : Operand<i32> { let EncoderMethod = "getThumbBLXTargetOpValue"; let DecoderMethod = "DecodeThumbBLXOffset"; } + +// t_addrmode_pc := <label> => pc + imm8 * 4 +// +def t_addrmode_pc : Operand<i32> { + let EncoderMethod = "getAddrModePCOpValue"; + let DecoderMethod = "DecodeThumbAddrModePC"; + let PrintMethod = "printThumbLdrLabelOperand"; + let ParserMatchClass = ThumbMemPC; +} } // t_addrmode_rr := reg + reg @@ -218,14 +244,6 @@ def t_addrmode_sp : Operand<i32>, let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm); } -// t_addrmode_pc := <label> => pc + imm8 * 4 -// -def t_addrmode_pc : Operand<i32> { - let EncoderMethod = "getAddrModePCOpValue"; - let DecoderMethod = "DecodeThumbAddrModePC"; - let PrintMethod = "printThumbLdrLabelOperand"; -} - //===----------------------------------------------------------------------===// // Miscellaneous Instructions. // @@ -505,6 +523,7 @@ let isBranch = 1, isTerminator = 1 in let Inst{7-0} = target; } + // Tail calls let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in { // IOS versions. @@ -629,11 +648,9 @@ def tLDRspi : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_sp:$addr), IIC_iLoad_i, let Inst{7-0} = addr; } -// Load tconstpool -// FIXME: Use ldr.n to work around a darwin assembler bug. -let canFoldAsLoad = 1, isReMaterializable = 1, isCodeGenOnly = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1 in def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i, - "ldr", ".n\t$Rt, $addr", + "ldr", "\t$Rt, $addr", [(set tGPR:$Rt, (load (ARMWrapper tconstpool:$addr)))]>, T1Encoding<{0,1,0,0,1,?}> { // A6.2 & A8.6.59 @@ -643,17 +660,8 @@ def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i, let Inst{7-0} = addr; } -// FIXME: Remove this entry when the above ldr.n workaround is fixed. -// For assembly/disassembly use only. -def tLDRpciASM : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i, - "ldr", "\t$Rt, $addr", []>, - T1Encoding<{0,1,0,0,1,?}> { - // A6.2 & A8.6.59 - bits<3> Rt; - bits<8> addr; - let Inst{10-8} = Rt; - let Inst{7-0} = addr; -} +def : tInstAlias<"ldr${p}.n $Rt, $addr", + (tLDRpci tGPR:$Rt, t_addrmode_pc:$addr, pred:$p), 0>; // A8.6.194 & A8.6.192 defm tSTR : thumb_st_rr_ri_enc<0b000, 0b0110, t_addrmode_rrs4, diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index ff21bf7..84086a5 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -173,14 +173,13 @@ def t2ldr_pcrel_imm12 : Operand<i32> { // ADR instruction labels. def t2adrlabel : Operand<i32> { let EncoderMethod = "getT2AdrLabelOpValue"; - let PrintMethod = "printAdrLabelOperand"; + let PrintMethod = "printAdrLabelOperand<0>"; } - // t2addrmode_posimm8 := reg + imm8 def MemPosImm8OffsetAsmOperand : AsmOperandClass {let Name="MemPosImm8Offset";} def t2addrmode_posimm8 : Operand<i32> { - let PrintMethod = "printT2AddrModeImm8Operand"; + let PrintMethod = "printT2AddrModeImm8Operand<false>"; let EncoderMethod = "getT2AddrModeImm8OpValue"; let DecoderMethod = "DecodeT2AddrModeImm8"; let ParserMatchClass = MemPosImm8OffsetAsmOperand; @@ -191,7 +190,7 @@ def t2addrmode_posimm8 : Operand<i32> { def MemNegImm8OffsetAsmOperand : AsmOperandClass {let Name="MemNegImm8Offset";} def t2addrmode_negimm8 : Operand<i32>, ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> { - let PrintMethod = "printT2AddrModeImm8Operand"; + let PrintMethod = "printT2AddrModeImm8Operand<false>"; let EncoderMethod = "getT2AddrModeImm8OpValue"; let DecoderMethod = "DecodeT2AddrModeImm8"; let ParserMatchClass = MemNegImm8OffsetAsmOperand; @@ -200,15 +199,22 @@ def t2addrmode_negimm8 : Operand<i32>, // t2addrmode_imm8 := reg +/- imm8 def MemImm8OffsetAsmOperand : AsmOperandClass { let Name = "MemImm8Offset"; } -def t2addrmode_imm8 : Operand<i32>, - ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> { - let PrintMethod = "printT2AddrModeImm8Operand"; +class T2AddrMode_Imm8 : Operand<i32>, + ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> { let EncoderMethod = "getT2AddrModeImm8OpValue"; let DecoderMethod = "DecodeT2AddrModeImm8"; let ParserMatchClass = MemImm8OffsetAsmOperand; let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm); } +def t2addrmode_imm8 : T2AddrMode_Imm8 { + let PrintMethod = "printT2AddrModeImm8Operand<false>"; +} + +def t2addrmode_imm8_pre : T2AddrMode_Imm8 { + let PrintMethod = "printT2AddrModeImm8Operand<true>"; +} + def t2am_imm8_offset : Operand<i32>, ComplexPattern<i32, 1, "SelectT2AddrModeImm8Offset", [], [SDNPWantRoot]> { @@ -219,14 +225,21 @@ def t2am_imm8_offset : Operand<i32>, // t2addrmode_imm8s4 := reg +/- (imm8 << 2) def MemImm8s4OffsetAsmOperand : AsmOperandClass {let Name = "MemImm8s4Offset";} -def t2addrmode_imm8s4 : Operand<i32> { - let PrintMethod = "printT2AddrModeImm8s4Operand"; +class T2AddrMode_Imm8s4 : Operand<i32> { let EncoderMethod = "getT2AddrModeImm8s4OpValue"; let DecoderMethod = "DecodeT2AddrModeImm8s4"; let ParserMatchClass = MemImm8s4OffsetAsmOperand; let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm); } +def t2addrmode_imm8s4 : T2AddrMode_Imm8s4 { + let PrintMethod = "printT2AddrModeImm8s4Operand<false>"; +} + +def t2addrmode_imm8s4_pre : T2AddrMode_Imm8s4 { + let PrintMethod = "printT2AddrModeImm8s4Operand<true>"; +} + def t2am_imm8s4_offset_asmoperand : AsmOperandClass { let Name = "Imm8s4"; } def t2am_imm8s4_offset : Operand<i32> { let PrintMethod = "printT2AddrModeImm8s4OffsetOperand"; @@ -238,7 +251,8 @@ def t2am_imm8s4_offset : Operand<i32> { def MemImm0_1020s4OffsetAsmOperand : AsmOperandClass { let Name = "MemImm0_1020s4Offset"; } -def t2addrmode_imm0_1020s4 : Operand<i32> { +def t2addrmode_imm0_1020s4 : Operand<i32>, + ComplexPattern<i32, 2, "SelectT2AddrModeExclusive"> { let PrintMethod = "printT2AddrModeImm0_1020s4Operand"; let EncoderMethod = "getT2AddrModeImm0_1020s4OpValue"; let DecoderMethod = "DecodeT2AddrModeImm0_1020s4"; @@ -959,6 +973,8 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, let Inst{19-16} = addr{16-13}; // Rn let Inst{15-12} = Rt; let Inst{11-0} = addr{11-0}; // imm + + let DecoderMethod = "DecodeT2LoadImm12"; } def i8 : T2Ii8 <(outs target:$Rt), (ins t2addrmode_negimm8:$addr), iii, opc, "\t$Rt, $addr", @@ -979,6 +995,8 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, let Inst{9} = addr{8}; // U let Inst{8} = 0; // The W bit. let Inst{7-0} = addr{7-0}; // imm + + let DecoderMethod = "DecodeT2LoadImm8"; } def s : T2Iso <(outs target:$Rt), (ins t2addrmode_so_reg:$addr), iis, opc, ".w\t$Rt, $addr", @@ -1011,14 +1029,18 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, let Inst{31-27} = 0b11111; let Inst{26-25} = 0b00; let Inst{24} = signed; - let Inst{23} = ?; // add = (U == '1') let Inst{22-21} = opcod; let Inst{20} = 1; // load let Inst{19-16} = 0b1111; // Rn + bits<4> Rt; - bits<12> addr; let Inst{15-12} = Rt{3-0}; + + bits<13> addr; + let Inst{23} = addr{12}; // add = (U == '1') let Inst{11-0} = addr{11-0}; + + let DecoderMethod = "DecodeT2LoadLabel"; } } @@ -1228,15 +1250,15 @@ defm t2LDR : T2I_ld<0, 0b10, "ldr", IIC_iLoad_i, IIC_iLoad_si, GPR, // Loads with zero extension defm t2LDRH : T2I_ld<0, 0b01, "ldrh", IIC_iLoad_bh_i, IIC_iLoad_bh_si, - rGPR, UnOpFrag<(zextloadi16 node:$Src)>>; + GPR, UnOpFrag<(zextloadi16 node:$Src)>>; defm t2LDRB : T2I_ld<0, 0b00, "ldrb", IIC_iLoad_bh_i, IIC_iLoad_bh_si, - rGPR, UnOpFrag<(zextloadi8 node:$Src)>>; + GPR, UnOpFrag<(zextloadi8 node:$Src)>>; // Loads with sign extension defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", IIC_iLoad_bh_i, IIC_iLoad_bh_si, - rGPR, UnOpFrag<(sextloadi16 node:$Src)>>; + GPR, UnOpFrag<(sextloadi16 node:$Src)>>; defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", IIC_iLoad_bh_i, IIC_iLoad_bh_si, - rGPR, UnOpFrag<(sextloadi8 node:$Src)>>; + GPR, UnOpFrag<(sextloadi8 node:$Src)>>; let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in { // Load doubleword @@ -1294,12 +1316,9 @@ def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)), let mayLoad = 1, neverHasSideEffects = 1 in { def t2LDR_PRE : T2Ipreldst<0, 0b10, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins t2addrmode_imm8:$addr), + (ins t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoad_iu, - "ldr", "\t$Rt, $addr!", "$addr.base = $Rn_wb", - []> { - let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8"; -} + "ldr", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>; def t2LDR_POST : T2Ipostldst<0, 0b10, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), (ins addr_offset_none:$Rn, t2am_imm8_offset:$offset), @@ -1307,48 +1326,42 @@ def t2LDR_POST : T2Ipostldst<0, 0b10, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), "ldr", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>; def t2LDRB_PRE : T2Ipreldst<0, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins t2addrmode_imm8:$addr), + (ins t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu, - "ldrb", "\t$Rt, $addr!", "$addr.base = $Rn_wb", - []> { - let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8"; -} + "ldrb", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>; + def t2LDRB_POST : T2Ipostldst<0, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), (ins addr_offset_none:$Rn, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu, "ldrb", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>; def t2LDRH_PRE : T2Ipreldst<0, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins t2addrmode_imm8:$addr), + (ins t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu, - "ldrh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", - []> { - let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8"; -} + "ldrh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []>; + def t2LDRH_POST : T2Ipostldst<0, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), (ins addr_offset_none:$Rn, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu, "ldrh", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>; def t2LDRSB_PRE : T2Ipreldst<1, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins t2addrmode_imm8:$addr), + (ins t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu, "ldrsb", "\t$Rt, $addr!", "$addr.base = $Rn_wb", - []> { - let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8"; -} + []>; + def t2LDRSB_POST : T2Ipostldst<1, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), (ins addr_offset_none:$Rn, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu, "ldrsb", "\t$Rt, $Rn$offset", "$Rn = $Rn_wb", []>; def t2LDRSH_PRE : T2Ipreldst<1, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins t2addrmode_imm8:$addr), + (ins t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu, "ldrsh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", - []> { - let AsmMatchConverter = "cvtLdWriteBackRegT2AddrModeImm8"; -} + []>; + def t2LDRSH_POST : T2Ipostldst<1, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb), (ins addr_offset_none:$Rn, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu, @@ -1373,6 +1386,8 @@ class T2IldT<bit signed, bits<2> type, string opc, InstrItinClass ii> let Inst{11} = 1; let Inst{10-8} = 0b110; // PUW. let Inst{7-0} = addr{7-0}; + + let DecoderMethod = "DecodeT2LoadT"; } def t2LDRT : T2IldT<0, 0b10, "ldrt", IIC_iLoad_i>; @@ -1399,27 +1414,22 @@ def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs), let mayStore = 1, neverHasSideEffects = 1 in { def t2STR_PRE : T2Ipreldst<0, 0b10, 0, 1, (outs GPRnopc:$Rn_wb), - (ins GPRnopc:$Rt, t2addrmode_imm8:$addr), + (ins GPRnopc:$Rt, t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iStore_iu, "str", "\t$Rt, $addr!", - "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> { - let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8"; -} + "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>; + def t2STRH_PRE : T2Ipreldst<0, 0b01, 0, 1, (outs GPRnopc:$Rn_wb), - (ins rGPR:$Rt, t2addrmode_imm8:$addr), + (ins rGPR:$Rt, t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iStore_iu, "strh", "\t$Rt, $addr!", - "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> { - let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8"; -} + "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>; def t2STRB_PRE : T2Ipreldst<0, 0b00, 0, 1, (outs GPRnopc:$Rn_wb), - (ins rGPR:$Rt, t2addrmode_imm8:$addr), + (ins rGPR:$Rt, t2addrmode_imm8_pre:$addr), AddrModeT2_i8, IndexModePre, IIC_iStore_bh_iu, "strb", "\t$Rt, $addr!", - "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> { - let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8"; -} + "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []>; } // mayStore = 1, neverHasSideEffects = 1 def t2STR_POST : T2Ipostldst<0, 0b10, 0, 0, (outs GPRnopc:$Rn_wb), @@ -1506,9 +1516,8 @@ def t2STRHT : T2IstT<0b01, "strht", IIC_iStore_bh_i>; // For disassembly only. def t2LDRD_PRE : T2Ii8s4<1, 1, 1, (outs rGPR:$Rt, rGPR:$Rt2, GPR:$wb), - (ins t2addrmode_imm8s4:$addr), IIC_iLoad_d_ru, + (ins t2addrmode_imm8s4_pre:$addr), IIC_iLoad_d_ru, "ldrd", "\t$Rt, $Rt2, $addr!", "$addr.base = $wb", []> { - let AsmMatchConverter = "cvtT2LdrdPre"; let DecoderMethod = "DecodeT2LDRDPreInstruction"; } @@ -1518,10 +1527,9 @@ def t2LDRD_POST : T2Ii8s4post<0, 1, 1, (outs rGPR:$Rt, rGPR:$Rt2, GPR:$wb), "$addr.base = $wb", []>; def t2STRD_PRE : T2Ii8s4<1, 1, 0, (outs GPR:$wb), - (ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr), + (ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4_pre:$addr), IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, $addr!", "$addr.base = $wb", []> { - let AsmMatchConverter = "cvtT2StrdPre"; let DecoderMethod = "DecodeT2STRDPreInstruction"; } @@ -1543,16 +1551,17 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> { Sched<[WritePreLd]> { let Inst{31-25} = 0b1111100; let Inst{24} = instr; + let Inst{23} = 1; let Inst{22} = 0; let Inst{21} = write; let Inst{20} = 1; let Inst{15-12} = 0b1111; bits<17> addr; - let addr{12} = 1; // add = TRUE let Inst{19-16} = addr{16-13}; // Rn - let Inst{23} = addr{12}; // U let Inst{11-0} = addr{11-0}; // imm12 + + let DecoderMethod = "DecodeT2LoadImm12"; } def i8 : T2Ii8<(outs), (ins t2addrmode_negimm8:$addr), IIC_Preload, opc, @@ -1571,6 +1580,8 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> { bits<13> addr; let Inst{19-16} = addr{12-9}; // Rn let Inst{7-0} = addr{7-0}; // imm8 + + let DecoderMethod = "DecodeT2LoadImm8"; } def s : T2Iso<(outs), (ins t2addrmode_so_reg:$addr), IIC_Preload, opc, @@ -1584,7 +1595,7 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> { let Inst{21} = write; let Inst{20} = 1; let Inst{15-12} = 0b1111; - let Inst{11-6} = 0000000; + let Inst{11-6} = 0b000000; bits<10> addr; let Inst{19-16} = addr{9-6}; // Rn @@ -1593,15 +1604,33 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> { let DecoderMethod = "DecodeT2LoadShift"; } - // FIXME: We should have a separate 'pci' variant here. As-is we represent - // it via the i12 variant, which it's related to, but that means we can - // represent negative immediates, which aren't legal for anything except - // the 'pci' case (Rn == 15). } -defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>; -defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>; -defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>; +defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>; +defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>; +defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>; + +// pci variant is very similar to i12, but supports negative offsets +// from the PC. Only PLD and PLI have pci variants (not PLDW) +class T2Iplpci<bits<1> inst, string opc> : T2Iso<(outs), (ins t2ldrlabel:$addr), + IIC_Preload, opc, "\t$addr", + [(ARMPreload (ARMWrapper tconstpool:$addr), + (i32 0), (i32 inst))]>, Sched<[WritePreLd]> { + let Inst{31-25} = 0b1111100; + let Inst{24} = inst; + let Inst{22-20} = 0b001; + let Inst{19-16} = 0b1111; + let Inst{15-12} = 0b1111; + + bits<13> addr; + let Inst{23} = addr{12}; // add = (U == '1') + let Inst{11-0} = addr{11-0}; // imm12 + + let DecoderMethod = "DecodeT2LoadLabel"; +} + +def t2PLDpci : T2Iplpci<0, "pld">, Requires<[IsThumb2]>; +def t2PLIpci : T2Iplpci<1, "pli">, Requires<[IsThumb2,HasV7]>; //===----------------------------------------------------------------------===// // Load / store multiple Instructions. @@ -2907,7 +2936,12 @@ def t2PKHTB : T2ThreeReg< // Alternate cases for PKHTB where identities eliminate some nodes. Note that // a shift amount of 0 is *not legal* here, it is PKHBT instead. -def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (srl rGPR:$src2, imm16_31:$sh)), +// We also can not replace a srl (17..31) by an arithmetic shift we would use in +// pkhtb src1, src2, asr (17..31). +def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (srl rGPR:$src2, imm16:$sh)), + (t2PKHTB rGPR:$src1, rGPR:$src2, imm16:$sh)>, + Requires<[HasT2ExtractPack, IsThumb2]>; +def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (sra rGPR:$src2, imm16_31:$sh)), (t2PKHTB rGPR:$src1, rGPR:$src2, imm16_31:$sh)>, Requires<[HasT2ExtractPack, IsThumb2]>; def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), @@ -3092,26 +3126,24 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd), // memory barriers protect the atomic sequences let hasSideEffects = 1 in { -def t2DMB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary, - "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>, - Requires<[IsThumb, HasDB]> { +def t2DMB : T2I<(outs), (ins memb_opt:$opt), NoItinerary, + "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>, + Requires<[HasDB]> { bits<4> opt; let Inst{31-4} = 0xf3bf8f5; let Inst{3-0} = opt; } } -def t2DSB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary, - "dsb", "\t$opt", []>, - Requires<[IsThumb, HasDB]> { +def t2DSB : T2I<(outs), (ins memb_opt:$opt), NoItinerary, + "dsb", "\t$opt", []>, Requires<[HasDB]> { bits<4> opt; let Inst{31-4} = 0xf3bf8f4; let Inst{3-0} = opt; } -def t2ISB : AInoP<(outs), (ins instsyncb_opt:$opt), ThumbFrm, NoItinerary, - "isb", "\t$opt", - []>, Requires<[IsThumb, HasDB]> { +def t2ISB : T2I<(outs), (ins instsyncb_opt:$opt), NoItinerary, + "isb", "\t$opt", []>, Requires<[HasDB]> { bits<4> opt; let Inst{31-4} = 0xf3bf8f6; let Inst{3-0} = opt; @@ -3154,13 +3186,16 @@ class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, int sz, let mayLoad = 1 in { def t2LDREXB : T2I_ldrex<0b00, (outs rGPR:$Rt), (ins addr_offset_none:$addr), AddrModeNone, 4, NoItinerary, - "ldrexb", "\t$Rt, $addr", "", []>; + "ldrexb", "\t$Rt, $addr", "", + [(set rGPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>; def t2LDREXH : T2I_ldrex<0b01, (outs rGPR:$Rt), (ins addr_offset_none:$addr), AddrModeNone, 4, NoItinerary, - "ldrexh", "\t$Rt, $addr", "", []>; + "ldrexh", "\t$Rt, $addr", "", + [(set rGPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>; def t2LDREX : Thumb2I<(outs rGPR:$Rt), (ins t2addrmode_imm0_1020s4:$addr), AddrModeNone, 4, NoItinerary, - "ldrex", "\t$Rt, $addr", "", []> { + "ldrex", "\t$Rt, $addr", "", + [(set rGPR:$Rt, (ldrex_4 t2addrmode_imm0_1020s4:$addr))]> { bits<4> Rt; bits<12> addr; let Inst{31-27} = 0b11101; @@ -3185,16 +3220,22 @@ let mayStore = 1, Constraints = "@earlyclobber $Rd" in { def t2STREXB : T2I_strex<0b00, (outs rGPR:$Rd), (ins rGPR:$Rt, addr_offset_none:$addr), AddrModeNone, 4, NoItinerary, - "strexb", "\t$Rd, $Rt, $addr", "", []>; + "strexb", "\t$Rd, $Rt, $addr", "", + [(set rGPR:$Rd, (strex_1 rGPR:$Rt, + addr_offset_none:$addr))]>; def t2STREXH : T2I_strex<0b01, (outs rGPR:$Rd), (ins rGPR:$Rt, addr_offset_none:$addr), AddrModeNone, 4, NoItinerary, - "strexh", "\t$Rd, $Rt, $addr", "", []>; + "strexh", "\t$Rd, $Rt, $addr", "", + [(set rGPR:$Rd, (strex_2 rGPR:$Rt, + addr_offset_none:$addr))]>; + def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt, t2addrmode_imm0_1020s4:$addr), AddrModeNone, 4, NoItinerary, "strex", "\t$Rd, $Rt, $addr", "", - []> { + [(set rGPR:$Rd, (strex_4 rGPR:$Rt, + t2addrmode_imm0_1020s4:$addr))]> { bits<4> Rd; bits<4> Rt; bits<12> addr; @@ -3216,7 +3257,7 @@ def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd), } } -def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>, +def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", [(int_arm_clrex)]>, Requires<[IsThumb2, HasV7]> { let Inst{31-16} = 0xf3bf; let Inst{15-14} = 0b10; @@ -3227,6 +3268,15 @@ def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>, let Inst{3-0} = 0b1111; } +def : T2Pat<(and (ldrex_1 addr_offset_none:$addr), 0xff), + (t2LDREXB addr_offset_none:$addr)>; +def : T2Pat<(and (ldrex_2 addr_offset_none:$addr), 0xffff), + (t2LDREXH addr_offset_none:$addr)>; +def : T2Pat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr), + (t2STREXB GPR:$Rt, addr_offset_none:$addr)>; +def : T2Pat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr), + (t2STREXH GPR:$Rt, addr_offset_none:$addr)>; + //===----------------------------------------------------------------------===// // SJLJ Exception handling intrinsics // eh_sjlj_setjmp() is an instruction sequence to store the return @@ -3549,6 +3599,16 @@ def t2RFEIA : T2RFE<0b111010011001, (outs), (ins GPR:$Rn), NoItinerary, "rfeia", "\t$Rn", [/* For disassembly only; pattern left blank */]>; +// B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction. +let Defs = [PC], Uses = [LR] in +def t2SUBS_PC_LR : T2I <(outs), (ins imm0_255:$imm), NoItinerary, + "subs", "\tpc, lr, $imm", []>, Requires<[IsThumb2]> { + let Inst{31-8} = 0b111100111101111010001111; + + bits<8> imm; + let Inst{7-0} = imm; +} + //===----------------------------------------------------------------------===// // Non-Instruction Patterns // @@ -3632,7 +3692,7 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> { let DecoderMethod = "DecodeCopMemInstruction"; } def _PRE : T2CI<op31_28, - (outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr), + (outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr), asm, "\t$cop, $CRd, $addr!"> { bits<13> addr; bits<4> cop; @@ -3783,8 +3843,7 @@ def t2MSR_M : T2I<(outs), (ins msr_mask:$SYSm, rGPR:$Rn), class t2MovRCopro<bits<4> Op, string opc, bit direction, dag oops, dag iops, list<dag> pattern> - : T2Cop<Op, oops, iops, - !strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"), + : T2Cop<Op, oops, iops, opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2", pattern> { let Inst{27-24} = 0b1110; let Inst{20} = direction; @@ -3809,7 +3868,7 @@ class t2MovRRCopro<bits<4> Op, string opc, bit direction, list<dag> pattern = []> : T2Cop<Op, (outs), (ins p_imm:$cop, imm0_15:$opc1, GPR:$Rt, GPR:$Rt2, c_imm:$CRm), - !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> { + opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> { let Inst{27-24} = 0b1100; let Inst{23-21} = 0b010; let Inst{20} = direction; @@ -3834,32 +3893,32 @@ def t2MCR : t2MovRCopro<0b1110, "mcr", 0, c_imm:$CRm, imm0_7:$opc2), [(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn, imm:$CRm, imm:$opc2)]>; -def : t2InstAlias<"mcr $cop, $opc1, $Rt, $CRn, $CRm", +def : t2InstAlias<"mcr${p} $cop, $opc1, $Rt, $CRn, $CRm", (t2MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn, - c_imm:$CRm, 0)>; + c_imm:$CRm, 0, pred:$p)>; def t2MCR2 : t2MovRCopro<0b1111, "mcr2", 0, (outs), (ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), [(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn, imm:$CRm, imm:$opc2)]>; -def : t2InstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm", +def : t2InstAlias<"mcr2${p} $cop, $opc1, $Rt, $CRn, $CRm", (t2MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn, - c_imm:$CRm, 0)>; + c_imm:$CRm, 0, pred:$p)>; /* from coprocessor to ARM core register */ def t2MRC : t2MovRCopro<0b1110, "mrc", 1, - (outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, + (outs GPRwithAPSR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), []>; -def : t2InstAlias<"mrc $cop, $opc1, $Rt, $CRn, $CRm", - (t2MRC GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, - c_imm:$CRm, 0)>; +def : t2InstAlias<"mrc${p} $cop, $opc1, $Rt, $CRn, $CRm", + (t2MRC GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, + c_imm:$CRm, 0, pred:$p)>; def t2MRC2 : t2MovRCopro<0b1111, "mrc2", 1, - (outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, + (outs GPRwithAPSR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), []>; -def : t2InstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm", - (t2MRC2 GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, - c_imm:$CRm, 0)>; +def : t2InstAlias<"mrc2${p} $cop, $opc1, $Rt, $CRn, $CRm", + (t2MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, + c_imm:$CRm, 0, pred:$p)>; def : T2v6Pat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2), (t2MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>; @@ -3886,7 +3945,7 @@ def t2MRRC2 : t2MovRRCopro<0b1111, "mrrc2", 1>; def tCDP : T2Cop<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), - "cdp\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", + "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", [(int_arm_cdp imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn, imm:$CRm, imm:$opc2)]> { let Inst{27-24} = 0b1110; @@ -3909,7 +3968,7 @@ def tCDP : T2Cop<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, def t2CDP2 : T2Cop<0b1111, (outs), (ins p_imm:$cop, imm0_15:$opc1, c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), - "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", + "cdp2", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", [(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn, imm:$CRm, imm:$opc2)]> { let Inst{27-24} = 0b1110; @@ -4097,9 +4156,9 @@ def : t2InstAlias<"tst${p} $Rn, $Rm", (t2TSTrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>; // Memory barriers -def : InstAlias<"dmb", (t2DMB 0xf)>, Requires<[IsThumb, HasDB]>; -def : InstAlias<"dsb", (t2DSB 0xf)>, Requires<[IsThumb, HasDB]>; -def : InstAlias<"isb", (t2ISB 0xf)>, Requires<[IsThumb, HasDB]>; +def : InstAlias<"dmb${p}", (t2DMB 0xf, pred:$p)>, Requires<[IsThumb2, HasDB]>; +def : InstAlias<"dsb${p}", (t2DSB 0xf, pred:$p)>, Requires<[IsThumb2, HasDB]>; +def : InstAlias<"isb${p}", (t2ISB 0xf, pred:$p)>, Requires<[IsThumb2, HasDB]>; // Alias for LDR, LDRB, LDRH, LDRSB, and LDRSH without the ".w" optional // width specifier. @@ -4350,7 +4409,7 @@ def t2LDRSHpcrel : t2AsmPseudo<"ldrsh${p} $Rt, $addr", (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>; // Version w/ the .w suffix. def : t2InstAlias<"ldr${p}.w $Rt, $addr", - (t2LDRpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>; + (t2LDRpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p), 0>; def : t2InstAlias<"ldrb${p}.w $Rt, $addr", (t2LDRBpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>; def : t2InstAlias<"ldrh${p}.w $Rt, $addr", @@ -4362,3 +4421,10 @@ def : t2InstAlias<"ldrsh${p}.w $Rt, $addr", def : t2InstAlias<"add${p} $Rd, pc, $imm", (t2ADR rGPR:$Rd, imm0_4095:$imm, pred:$p)>; + +// PLD/PLDW/PLI with alternate literal form. +def : t2InstAlias<"pld${p} $addr", + (t2PLDpci t2ldr_pcrel_imm12:$addr, pred:$p)>; +def : InstAlias<"pli${p} $addr", + (t2PLIpci t2ldr_pcrel_imm12:$addr, pred:$p)>, + Requires<[IsThumb2,HasV7]>; diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index 597b74a..f9cfa15 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -333,6 +333,42 @@ def VNMULS : ASbI<0b11100, 0b10, 1, 0, let D = VFPNeonA8Domain; } +multiclass vsel_inst<string op, bits<2> opc> { + let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in { + def S : ASbInp<0b11100, opc, 0, + (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), + NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), + []>, Requires<[HasV8FP]>; + + def D : ADbInp<0b11100, opc, 0, + (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), + NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), + []>, Requires<[HasV8FP]>; + } +} + +defm VSELGT : vsel_inst<"gt", 0b11>; +defm VSELGE : vsel_inst<"ge", 0b10>; +defm VSELEQ : vsel_inst<"eq", 0b00>; +defm VSELVS : vsel_inst<"vs", 0b01>; + +multiclass vmaxmin_inst<string op, bit opc> { + let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in { + def S : ASbInp<0b11101, 0b00, opc, + (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), + NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), + []>, Requires<[HasV8FP]>; + + def D : ADbInp<0b11101, 0b00, opc, + (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), + NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), + []>, Requires<[HasV8FP]>; + } +} + +defm VMAXNM : vmaxmin_inst<"vmaxnm", 0>; +defm VMINNM : vmaxmin_inst<"vminnm", 1>; + // Match reassociated forms only if not sign dependent rounding. def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)), (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>; @@ -468,7 +504,7 @@ def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm, let Inst{4} = 0; } -// Between half-precision and single-precision. For disassembly only. +// Between half, single and double-precision. For disassembly only. // FIXME: Verify encoding after integrated assembler is working. def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), @@ -493,6 +529,111 @@ def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", [/* For disassembly only; pattern left blank */]>; +def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, + (outs DPR:$Dd), (ins SPR:$Sm), + NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", + []>, Requires<[HasV8FP]> { + // Instruction operands. + bits<5> Sm; + + // Encode instruction operands. + let Inst{3-0} = Sm{4-1}; + let Inst{5} = Sm{0}; +} + +def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, + (outs SPR:$Sd), (ins DPR:$Dm), + NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", + []>, Requires<[HasV8FP]> { + // Instruction operands. + bits<5> Sd; + bits<5> Dm; + + // Encode instruction operands. + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; + let Inst{15-12} = Sd{4-1}; + let Inst{22} = Sd{0}; +} + +def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, + (outs DPR:$Dd), (ins SPR:$Sm), + NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", + []>, Requires<[HasV8FP]> { + // Instruction operands. + bits<5> Sm; + + // Encode instruction operands. + let Inst{3-0} = Sm{4-1}; + let Inst{5} = Sm{0}; +} + +def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, + (outs SPR:$Sd), (ins DPR:$Dm), + NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", + []>, Requires<[HasV8FP]> { + // Instruction operands. + bits<5> Sd; + bits<5> Dm; + + // Encode instruction operands. + let Inst{15-12} = Sd{4-1}; + let Inst{22} = Sd{0}; + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; +} + +multiclass vcvt_inst<string opc, bits<2> rm> { + let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { + def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, + (outs SPR:$Sd), (ins SPR:$Sm), + NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), + []>, Requires<[HasV8FP]> { + let Inst{17-16} = rm; + } + + def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, + (outs SPR:$Sd), (ins SPR:$Sm), + NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), + []>, Requires<[HasV8FP]> { + let Inst{17-16} = rm; + } + + def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, + (outs SPR:$Sd), (ins DPR:$Dm), + NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), + []>, Requires<[HasV8FP]> { + bits<5> Dm; + + let Inst{17-16} = rm; + + // Encode instruction operands + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; + let Inst{8} = 1; + } + + def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, + (outs SPR:$Sd), (ins DPR:$Dm), + NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), + []>, Requires<[HasV8FP]> { + bits<5> Dm; + + let Inst{17-16} = rm; + + // Encode instruction operands + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; + let Inst{8} = 1; + } + } +} + +defm VCVTA : vcvt_inst<"a", 0b00>; +defm VCVTN : vcvt_inst<"n", 0b01>; +defm VCVTP : vcvt_inst<"p", 0b10>; +defm VCVTM : vcvt_inst<"m", 0b11>; + def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$Dd), (ins DPR:$Dm), IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", @@ -507,6 +648,54 @@ def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0, let D = VFPNeonA8Domain; } +multiclass vrint_inst_zrx<string opc, bit op, bit op2> { + def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, + (outs SPR:$Sd), (ins SPR:$Sm), + NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", + []>, Requires<[HasV8FP]> { + let Inst{7} = op2; + let Inst{16} = op; + } + def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, + (outs DPR:$Dd), (ins DPR:$Dm), + NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", + []>, Requires<[HasV8FP]> { + let Inst{7} = op2; + let Inst{16} = op; + } +} + +defm VRINTZ : vrint_inst_zrx<"z", 0, 1>; +defm VRINTR : vrint_inst_zrx<"r", 0, 0>; +defm VRINTX : vrint_inst_zrx<"x", 1, 0>; + +multiclass vrint_inst_anpm<string opc, bits<2> rm> { + let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { + def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, + (outs SPR:$Sd), (ins SPR:$Sm), + NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), + []>, Requires<[HasV8FP]> { + let Inst{17-16} = rm; + } + def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, + (outs DPR:$Dd), (ins DPR:$Dm), + NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), + []>, Requires<[HasV8FP]> { + let Inst{17-16} = rm; + } + } + + def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"), + (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm)>; + def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"), + (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>; +} + +defm VRINTA : vrint_inst_anpm<"a", 0b00>; +defm VRINTN : vrint_inst_anpm<"n", 0b01>; +defm VRINTP : vrint_inst_anpm<"p", 0b10>; +defm VRINTM : vrint_inst_anpm<"m", 0b11>; + def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$Dd), (ins DPR:$Dm), IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index c8ed576..1803a8a 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -109,12 +109,12 @@ namespace { unsigned PredReg, unsigned Scratch, DebugLoc dl, - SmallVector<MachineBasicBlock::iterator, 4> &Merges); + SmallVectorImpl<MachineBasicBlock::iterator> &Merges); void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base, int Opcode, unsigned Size, ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch, MemOpQueue &MemOps, - SmallVector<MachineBasicBlock::iterator, 4> &Merges); + SmallVectorImpl<MachineBasicBlock::iterator> &Merges); void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps); bool FixInvalidRegPairOp(MachineBasicBlock &MBB, @@ -371,7 +371,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB, ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch, DebugLoc dl, - SmallVector<MachineBasicBlock::iterator, 4> &Merges) { + SmallVectorImpl<MachineBasicBlock::iterator> &Merges) { // First calculate which of the registers should be killed by the merged // instruction. const unsigned insertPos = memOps[insertAfter].Position; @@ -444,10 +444,10 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB, /// load / store multiple instructions. void ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, - unsigned Base, int Opcode, unsigned Size, - ARMCC::CondCodes Pred, unsigned PredReg, - unsigned Scratch, MemOpQueue &MemOps, - SmallVector<MachineBasicBlock::iterator, 4> &Merges) { + unsigned Base, int Opcode, unsigned Size, + ARMCC::CondCodes Pred, unsigned PredReg, + unsigned Scratch, MemOpQueue &MemOps, + SmallVectorImpl<MachineBasicBlock::iterator> &Merges) { bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode); int Offset = MemOps[SIndex].Offset; int SOffset = Offset; @@ -1484,7 +1484,7 @@ namespace { unsigned &PredReg, ARMCC::CondCodes &Pred, bool &isT2); bool RescheduleOps(MachineBasicBlock *MBB, - SmallVector<MachineInstr*, 4> &Ops, + SmallVectorImpl<MachineInstr *> &Ops, unsigned Base, bool isLd, DenseMap<MachineInstr*, unsigned> &MI2LocMap); bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB); @@ -1602,8 +1602,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, return false; // Make sure the base address satisfies i64 ld / st alignment requirement. + // At the moment, we ignore the memoryoperand's value. + // If we want to use AliasAnalysis, we should check it accordingly. if (!Op0->hasOneMemOperand() || - !(*Op0->memoperands_begin())->getValue() || (*Op0->memoperands_begin())->isVolatile()) return false; @@ -1655,7 +1656,7 @@ namespace { } bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, - SmallVector<MachineInstr*, 4> &Ops, + SmallVectorImpl<MachineInstr *> &Ops, unsigned Base, bool isLd, DenseMap<MachineInstr*, unsigned> &MI2LocMap) { bool RetVal = false; @@ -1857,9 +1858,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { if (!StopHere) BI->second.push_back(MI); } else { - SmallVector<MachineInstr*, 4> MIs; - MIs.push_back(MI); - Base2LdsMap[Base] = MIs; + Base2LdsMap[Base].push_back(MI); LdBases.push_back(Base); } } else { @@ -1875,9 +1874,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { if (!StopHere) BI->second.push_back(MI); } else { - SmallVector<MachineInstr*, 4> MIs; - MIs.push_back(MI); - Base2StsMap[Base] = MIs; + Base2StsMap[Base].push_back(MI); StBases.push_back(Base); } } @@ -1893,7 +1890,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { // Re-schedule loads. for (unsigned i = 0, e = LdBases.size(); i != e; ++i) { unsigned Base = LdBases[i]; - SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base]; + SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base]; if (Lds.size() > 1) RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap); } @@ -1901,7 +1898,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { // Re-schedule stores. for (unsigned i = 0, e = StBases.size(); i != e; ++i) { unsigned Base = StBases[i]; - SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base]; + SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base]; if (Sts.size() > 1) RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap); } diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td index 0459d64..bb7d358 100644 --- a/lib/Target/ARM/ARMRegisterInfo.td +++ b/lib/Target/ARM/ARMRegisterInfo.td @@ -215,7 +215,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> { // GPRs without the PC but with APSR. Some instructions allow accessing the // APSR, while actually encoding PC in the register field. This is usefull // for assembly and disassembly only. -def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add GPR, APSR_NZCV)> { +def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> { let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)]; let AltOrderSelect = [{ return 1 + MF.getTarget().getSubtarget<ARMSubtarget>().isThumb1Only(); diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td index d06ad7d..74ee50b 100644 --- a/lib/Target/ARM/ARMScheduleA9.td +++ b/lib/Target/ARM/ARMScheduleA9.td @@ -1883,13 +1883,10 @@ def CortexA9Itineraries : ProcessorItineraries< // Cortex-A9 machine model for scheduling and other instruction cost heuristics. def CortexA9Model : SchedMachineModel { let IssueWidth = 2; // 2 micro-ops are dispatched per cycle. - let MinLatency = 0; // Data dependencies are allowed within dispatch groups. + let MicroOpBufferSize = 56; // Based on available renamed registers. let LoadLatency = 2; // Optimistic load latency assuming bypass. // This is overriden by OperandCycles if the // Itineraries are queried instead. - let ILPWindow = 10; // Don't reschedule small blocks to hide - // latency. Minimum latency requirements are already - // modeled strictly by reserving resources. let MispredictPenalty = 8; // Based on estimate of pipeline depth. let Itineraries = CortexA9Itineraries; @@ -1904,7 +1901,7 @@ def A9UnitALU : ProcResource<2>; def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; } def A9UnitAGU : ProcResource<1>; def A9UnitLS : ProcResource<1>; -def A9UnitFP : ProcResource<1> { let Buffered = 0; } +def A9UnitFP : ProcResource<1> { let BufferSize = 0; } def A9UnitB : ProcResource<1>; //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td index b5cf251..2a41616 100644 --- a/lib/Target/ARM/ARMScheduleSwift.td +++ b/lib/Target/ARM/ARMScheduleSwift.td @@ -1076,7 +1076,7 @@ def SwiftItineraries : ProcessorItineraries< // Swift machine model for scheduling and other instruction cost heuristics. def SwiftModel : SchedMachineModel { let IssueWidth = 3; // 3 micro-ops are dispatched per cycle. - let MinLatency = 0; // Data dependencies are allowed within dispatch groups. + let MicroOpBufferSize = 45; // Based on NEON renamed registers. let LoadLatency = 3; let MispredictPenalty = 14; // A branch direction mispredict. diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index 4d204ce..0a0f30c 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -77,9 +77,11 @@ void ARMSubtarget::initializeEnvironment() { HasV6Ops = false; HasV6T2Ops = false; HasV7Ops = false; + HasV8Ops = false; HasVFPv2 = false; HasVFPv3 = false; HasVFPv4 = false; + HasV8FP = false; HasNEON = false; UseNEONForSinglePrecisionFP = false; UseMulOps = UseFusedMulOps; diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index bc5af96..ad7f1b3 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -37,7 +37,8 @@ protected: /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others. ARMProcFamilyEnum ARMProcFamily; - /// HasV4TOps, HasV5TOps, HasV5TEOps, HasV6Ops, HasV6T2Ops, HasV7Ops - + /// HasV4TOps, HasV5TOps, HasV5TEOps, + /// HasV6Ops, HasV6T2Ops, HasV7Ops, HasV8Ops - /// Specify whether target support specific ARM ISA variants. bool HasV4TOps; bool HasV5TOps; @@ -45,12 +46,14 @@ protected: bool HasV6Ops; bool HasV6T2Ops; bool HasV7Ops; + bool HasV8Ops; - /// HasVFPv2, HasVFPv3, HasVFPv4, HasNEON - Specify what + /// HasVFPv2, HasVFPv3, HasVFPv4, HasV8FP, HasNEON - Specify what /// floating point ISAs are supported. bool HasVFPv2; bool HasVFPv3; bool HasVFPv4; + bool HasV8FP; bool HasNEON; /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been @@ -192,10 +195,6 @@ protected: public: enum { - isELF, isDarwin - } TargetType; - - enum { ARM_ABI_APCS, ARM_ABI_AAPCS // ARM EABI } TargetABI; @@ -231,6 +230,7 @@ public: bool hasV6Ops() const { return HasV6Ops; } bool hasV6T2Ops() const { return HasV6T2Ops; } bool hasV7Ops() const { return HasV7Ops; } + bool hasV8Ops() const { return HasV8Ops; } bool isCortexA5() const { return ARMProcFamily == CortexA5; } bool isCortexA8() const { return ARMProcFamily == CortexA8; } @@ -246,6 +246,7 @@ public: bool hasVFP2() const { return HasVFPv2; } bool hasVFP3() const { return HasVFPv3; } bool hasVFP4() const { return HasVFPv4; } + bool hasV8FP() const { return HasV8FP; } bool hasNEON() const { return HasNEON; } bool useNEONForSinglePrecisionFP() const { return hasNEON() && UseNEONForSinglePrecisionFP; } @@ -279,6 +280,14 @@ public: bool isTargetNaCl() const { return TargetTriple.getOS() == Triple::NaCl; } bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; } bool isTargetELF() const { return !isTargetDarwin(); } + // ARM EABI is the bare-metal EABI described in ARM ABI documents and + // can be accessed via -target arm-none-eabi. This is NOT GNUEABI. + // FIXME: Add a flag for bare-metal for that target and set Triple::EABI + // even for GNUEABI, so we can make a distinction here and still conform to + // the EABI on GNU (and Android) mode. This requires change in Clang, too. + bool isTargetAEABI() const { + return TargetTriple.getEnvironment() == Triple::EABI; + } bool isAPCS_ABI() const { return TargetABI == ARM_ABI_APCS; } bool isAAPCS_ABI() const { return TargetABI == ARM_ABI_AAPCS; } diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index b16ab4b..e6dbcb6 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -60,7 +60,7 @@ void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) { // Add first the target-independent BasicTTI pass, then our ARM pass. This // allows the ARM pass to delegate to the target independent layer when // appropriate. - PM.add(createBasicTargetTransformInfoPass(getTargetLowering())); + PM.add(createBasicTargetTransformInfoPass(this)); PM.add(createARMTargetTransformInfoPass(this)); } @@ -150,7 +150,7 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { bool ARMPassConfig::addPreISel() { if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge) - addPass(createGlobalMergePass(TM->getTargetLowering())); + addPass(createGlobalMergePass(TM)); return false; } @@ -169,7 +169,7 @@ bool ARMPassConfig::addPreRegAlloc() { // FIXME: temporarily disabling load / store optimization pass for Thumb1. if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only()) addPass(createARMLoadStoreOptimizationPass(true)); - if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isLikeA9()) + if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) addPass(createMLxExpansionPass()); // Since the A15SDOptimizer pass can insert VDUP instructions, it can only be // enabled when NEON is available. diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 53ece66..34576ba 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -124,7 +124,7 @@ public: unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const; - unsigned getAddressComputationCost(Type *Val) const; + unsigned getAddressComputationCost(Type *Val, bool IsComplex) const; unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Op1Info = OK_AnyValue, @@ -411,12 +411,14 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, EVT SelCondTy = TLI->getValueType(CondTy); EVT SelValTy = TLI->getValueType(ValTy); - int Idx = ConvertCostTableLookup<MVT>(NEONVectorSelectTbl, - array_lengthof(NEONVectorSelectTbl), - ISD, SelCondTy.getSimpleVT(), - SelValTy.getSimpleVT()); - if (Idx != -1) - return NEONVectorSelectTbl[Idx].Cost; + if (SelCondTy.isSimple() && SelValTy.isSimple()) { + int Idx = ConvertCostTableLookup<MVT>(NEONVectorSelectTbl, + array_lengthof(NEONVectorSelectTbl), + ISD, SelCondTy.getSimpleVT(), + SelValTy.getSimpleVT()); + if (Idx != -1) + return NEONVectorSelectTbl[Idx].Cost; + } std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); return LT.first; @@ -425,7 +427,16 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); } -unsigned ARMTTI::getAddressComputationCost(Type *Ty) const { +unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { + // Address computations in vectorized code with non-consecutive addresses will + // likely result in more instructions compared to scalar code where the + // computation can more often be merged into the index mode. The resulting + // extra micro-ops can significantly decrease throughput. + unsigned NumVectorInstToHideOverhead = 10; + + if (Ty->isVectorTy() && IsComplex) + return NumVectorInstToHideOverhead; + // In many cases the address computation is not merged into the instruction // addressing mode. return 1; diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index c59ca64..80e5c6e 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -161,6 +161,9 @@ class ARMAsmParser : public MCTargetAsmParser { bool hasV7Ops() const { return STI.getFeatureBits() & ARM::HasV7Ops; } + bool hasV8Ops() const { + return STI.getFeatureBits() & ARM::HasV8Ops; + } bool hasARM() const { return !(STI.getFeatureBits() & ARM::FeatureNoARM); } @@ -216,50 +219,17 @@ class ARMAsmParser : public MCTargetAsmParser { SMLoc &EndLoc); // Asm Match Converter Methods - void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdWriteBackRegAddrMode2(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStWriteBackRegAddrModeImm12(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStWriteBackRegAddrMode2(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStWriteBackRegAddrMode3(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdExtTWriteBackImm(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdExtTWriteBackReg(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStExtTWriteBackImm(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStExtTWriteBackReg(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtLdWriteBackRegAddrMode3(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); void cvtThumbMultiply(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtVLDwbFixed(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtVLDwbRegister(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtVSTwbFixed(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); - void cvtVSTwbRegister(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &); bool validateInstruction(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &Ops); bool processInstruction(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &Ops); bool shouldOmitCCOutOperand(StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand*> &Operands); + bool shouldOmitPredicateOperand(StringRef Mnemonic, + SmallVectorImpl<MCParsedAsmOperand*> &Operands); + bool isDeprecated(MCInst &Inst, StringRef &Info); public: enum ARMMatchResultTy { @@ -277,7 +247,7 @@ public: MCAsmParserExtension::Initialize(_Parser); // Cache the MCRegisterInfo. - MRI = &getContext().getRegisterInfo(); + MRI = getContext().getRegisterInfo(); // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); @@ -626,6 +596,40 @@ public: bool isITMask() const { return Kind == k_ITCondMask; } bool isITCondCode() const { return Kind == k_CondCode; } bool isImm() const { return Kind == k_Immediate; } + // checks whether this operand is an unsigned offset which fits is a field + // of specified width and scaled by a specific number of bits + template<unsigned width, unsigned scale> + bool isUnsignedOffset() const { + if (!isImm()) return false; + if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) return true; + if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { + int64_t Val = CE->getValue(); + int64_t Align = 1LL << scale; + int64_t Max = Align * ((1LL << width) - 1); + return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); + } + return false; + } + // checks whether this operand is a memory operand computed as an offset + // applied to PC. the offset may have 8 bits of magnitude and is represented + // with two bits of shift. textually it may be either [pc, #imm], #imm or + // relocable expression... + bool isThumbMemPC() const { + int64_t Val = 0; + if (isImm()) { + if (isa<MCSymbolRefExpr>(Imm.Val)) return true; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); + if (!CE) return false; + Val = CE->getValue(); + } + else if (isMem()) { + if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; + if(Memory.BaseRegNum != ARM::PC) return false; + Val = Memory.OffsetImm->getValue(); + } + else return false; + return ((Val % 4) == 0) && (Val >= -1020) && (Val <= 1020); + } bool isFPImm() const { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); @@ -1704,6 +1708,37 @@ public: Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); } + void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { + if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { + Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2)); + return; + } + + const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); + assert(SR && "Unknown value type!"); + Inst.addOperand(MCOperand::CreateExpr(SR)); + } + + void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + if (isImm()) { + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (CE) { + Inst.addOperand(MCOperand::CreateImm(CE->getValue())); + return; + } + + const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); + assert(SR && "Unknown value type!"); + Inst.addOperand(MCOperand::CreateExpr(SR)); + return; + } + + assert(isMem() && "Unknown value type!"); + assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"); + Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue())); + } + void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The operand is actually a so_imm, but we have its bitwise @@ -2278,21 +2313,24 @@ public: } static ARMOperand * - CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, + CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs, SMLoc StartLoc, SMLoc EndLoc) { + assert (Regs.size() > 0 && "RegList contains no registers?"); KindTy Kind = k_RegisterList; - if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) + if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second)) Kind = k_DPRRegisterList; else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. - contains(Regs.front().first)) + contains(Regs.front().second)) Kind = k_SPRRegisterList; + // Sort based on the register encoding values. + array_pod_sort(Regs.begin(), Regs.end()); + ARMOperand *Op = new ARMOperand(Kind); - for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator + for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator I = Regs.begin(), E = Regs.end(); I != E; ++I) - Op->Registers.push_back(I->first); - array_pod_sort(Op->Registers.begin(), Op->Registers.end()); + Op->Registers.push_back(I->second); Op->StartLoc = StartLoc; Op->EndLoc = EndLoc; return Op; @@ -2972,12 +3010,14 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { // The reglist instructions have at most 16 registers, so reserve // space for that many. - SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; + int EReg = 0; + SmallVector<std::pair<unsigned, unsigned>, 16> Registers; // Allow Q regs and just interpret them as the two D sub-registers. if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { Reg = getDRegFromQReg(Reg); - Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); + EReg = MRI->getEncodingValue(Reg); + Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); ++Reg; } const MCRegisterClass *RC; @@ -2991,7 +3031,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { return Error(RegLoc, "invalid register in register list"); // Store the register. - Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); + EReg = MRI->getEncodingValue(Reg); + Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); // This starts immediately after the first register token in the list, // so we can see either a comma or a minus (range separator) as a legal @@ -3021,7 +3062,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { // Add all the registers in the range to the register list. while (Reg != EndReg) { Reg = getNextRegister(Reg); - Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); + EReg = MRI->getEncodingValue(Reg); + Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); } continue; } @@ -3054,14 +3096,15 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { continue; } // VFP register lists must also be contiguous. - // It's OK to use the enumeration values directly here rather, as the - // VFP register classes have the enum sorted properly. if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && Reg != OldReg + 1) return Error(RegLoc, "non-contiguous register range"); - Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); - if (isQReg) - Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); + EReg = MRI->getEncodingValue(Reg); + Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); + if (isQReg) { + EReg = MRI->getEncodingValue(++Reg); + Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); + } } if (Parser.getTok().isNot(AsmToken::RCurly)) @@ -4039,260 +4082,9 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { return MatchOperand_Success; } -/// cvtT2LdrdPre - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtT2LdrdPre(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Rt, Rt2 - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateReg(0)); - // addr - ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtT2StrdPre - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtT2StrdPre(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateReg(0)); - // Rt, Rt2 - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); - // addr - ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - - ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdWriteBackRegAddrMode2(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - - ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - - ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - - -/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStWriteBackRegAddrModeImm12(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStWriteBackRegAddrMode2(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStWriteBackRegAddrMode3(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdExtTWriteBackImm(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Rt - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // addr - ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); - // offset - ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdExtTWriteBackReg(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Rt - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // addr - ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); - // offset - ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStExtTWriteBackImm(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Rt - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - // addr - ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); - // offset - ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStExtTWriteBackReg(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Rt - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - // addr - ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); - // offset - ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdrdPre - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdrdPre(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Rt, Rt2 - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // addr - ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtStrdPre - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtStrdPre(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Rt, Rt2 - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); - // addr - ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. -void ARMAsmParser:: -cvtLdWriteBackRegAddrMode3(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -/// cvtThumbMultiply - Convert parsed operands to MCInst. -/// Needed here because the Asm Gen Matcher can't handle properly tied operands -/// when they refer multiple MIOperands inside a single one. +/// Convert parsed operands to MCInst. Needed here because this instruction +/// only has two register operands, but multiplication is commutative so +/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". void ARMAsmParser:: cvtThumbMultiply(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { @@ -4310,62 +4102,6 @@ cvtThumbMultiply(MCInst &Inst, ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); } -void ARMAsmParser:: -cvtVLDwbFixed(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Vd - ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Vn - ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -void ARMAsmParser:: -cvtVLDwbRegister(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Vd - ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Vn - ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); - // Vm - ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -void ARMAsmParser:: -cvtVSTwbFixed(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Vn - ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); - // Vt - ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - -void ARMAsmParser:: -cvtVSTwbRegister(MCInst &Inst, - const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Create a writeback register dummy placeholder. - Inst.addOperand(MCOperand::CreateImm(0)); - // Vn - ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); - // Vm - ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); - // Vt - ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); - // pred - ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); -} - /// Parse an ARM memory expression, return false if successful else return true /// or an error. The first token must be a '[' when called. bool ARMAsmParser:: @@ -4869,7 +4605,10 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || - Mnemonic == "fmuls") + Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || + Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || + Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || + Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel")) return Mnemonic; // First, split out any predication code. Ignore mnemonics we know aren't @@ -4966,28 +4705,30 @@ getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, } else CanAcceptCarrySet = false; - if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || - Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || - Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || - Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || - Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || - (Mnemonic == "clrex" && !isThumb()) || - (Mnemonic == "nop" && isThumbOne()) || - ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || - Mnemonic == "ldc2" || Mnemonic == "ldc2l" || - Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || - ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && - !isThumb()) || - Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { + if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || + Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || + Mnemonic == "trap" || Mnemonic == "setend" || + Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") || + Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" || + Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || + Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" || + Mnemonic == "vrintm") { + // These mnemonics are never predicable CanAcceptPredicationCode = false; + } else if (!isThumb()) { + // Some instructions are only predicable in Thumb mode + CanAcceptPredicationCode + = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && + Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && + Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" && + Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" && + Mnemonic != "ldc2" && Mnemonic != "ldc2l" && + Mnemonic != "stc2" && Mnemonic != "stc2l" && + !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs"); + } else if (isThumbOne()) { + CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; } else CanAcceptPredicationCode = true; - - if (isThumb()) { - if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || - Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") - CanAcceptPredicationCode = false; - } } bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, @@ -5042,15 +4783,6 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, static_cast<ARMOperand*>(Operands[5])->isImm()) { // Nest conditions rather than one big 'if' statement for readability. // - // If either register is a high reg, it's either one of the SP - // variants (handled above) or a 32-bit encoding, so we just - // check against T3. If the second register is the PC, this is an - // alternate form of ADR, which uses encoding T4, so check for that too. - if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || - !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && - static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC && - static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) - return false; // If both registers are low, we're in an IT block, and the immediate is // in range, we should use encoding T1 instead, which has a cc_out. if (inITBlock() && @@ -5058,6 +4790,11 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && static_cast<ARMOperand*>(Operands[5])->isImm0_7()) return false; + // Check against T3. If the second register is the PC, this is an + // alternate form of ADR, which uses encoding T4, so check for that too. + if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC && + static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) + return false; // Otherwise, we use encoding T4, which does not have a cc_out // operand. @@ -5120,6 +4857,34 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, return false; } +bool ARMAsmParser::shouldOmitPredicateOperand( + StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) { + // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON + unsigned RegIdx = 3; + if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") && + static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") { + if (static_cast<ARMOperand *>(Operands[3])->isToken() && + static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32") + RegIdx = 4; + + if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() && + (ARMMCRegisterClasses[ARM::DPRRegClassID] + .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) || + ARMMCRegisterClasses[ARM::QPRRegClassID] + .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()))) + return true; + } + return false; +} + +bool ARMAsmParser::isDeprecated(MCInst &Inst, StringRef &Info) { + if (hasV8Ops() && Inst.getOpcode() == ARM::SETEND) { + Info = "armv8"; + return true; + } + return false; +} + static bool isDataTypeToken(StringRef Tok) { return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || @@ -5266,7 +5031,17 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) continue; - if (ExtraToken != ".n") { + // For for ARM mode generate an error if the .n qualifier is used. + if (ExtraToken == ".n" && !isThumb()) { + SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); + return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " + "arm mode"); + } + + // The .n qualifier is always discarded as that is what the tables + // and matcher expect. In ARM mode the .w qualifier has no effect, + // so discard it to avoid errors that can be caused by the matcher. + if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); } @@ -5312,6 +5087,15 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, delete Op; } + // Some instructions have the same mnemonic, but don't always + // have a predicate. Distinguish them here and delete the + // predicate if needed. + if (shouldOmitPredicateOperand(Mnemonic, Operands)) { + ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); + Operands.erase(Operands.begin() + 1); + delete Op; + } + // ARM mode 'blx' need special handling, as the register operand version // is predicable, but the label operand version is not. So, we can't rely // on the Mnemonic based checking to correctly figure out when to put @@ -5363,6 +5147,26 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, } } + // FIXME: As said above, this is all a pretty gross hack. This instruction + // does not fit with other "subs" and tblgen. + // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction + // so the Mnemonic is the original name "subs" and delete the predicate + // operand so it will match the table entry. + if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && + static_cast<ARMOperand*>(Operands[3])->isReg() && + static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC && + static_cast<ARMOperand*>(Operands[4])->isReg() && + static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR && + static_cast<ARMOperand*>(Operands[5])->isImm()) { + ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]); + Operands.erase(Operands.begin()); + delete Op0; + Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc)); + + ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]); + Operands.erase(Operands.begin() + 1); + delete Op1; + } return false; } @@ -5581,6 +5385,10 @@ validateInstruction(MCInst &Inst, } } + StringRef DepInfo; + if (isDeprecated(Inst, DepInfo)) + Warning(Loc, "deprecated on " + DepInfo); + return false; } @@ -5862,7 +5670,9 @@ processInstruction(MCInst &Inst, case ARM::t2LDRpcrel: // Select the narrow version if the immediate will fit. if (Inst.getOperand(1).getImm() > 0 && - Inst.getOperand(1).getImm() <= 0xff) + Inst.getOperand(1).getImm() <= 0xff && + !(static_cast<ARMOperand*>(Operands[2])->isToken() && + static_cast<ARMOperand*>(Operands[2])->getToken() == ".w")) Inst.setOpcode(ARM::tLDRpci); else Inst.setOpcode(ARM::t2LDRpci); @@ -7851,8 +7661,8 @@ bool ARMAsmParser::parseDirectiveARM(SMLoc L) { /// parseDirectiveThumbFunc /// ::= .thumbfunc symbol_name bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { - const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); - bool isMachO = MAI.hasSubsectionsViaSymbols(); + const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo(); + bool isMachO = MAI->hasSubsectionsViaSymbols(); StringRef Name; bool needFuncName = true; @@ -8199,11 +8009,19 @@ bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { if (HandlerDataLoc.isValid()) return Error(L, ".save or .vsave must precede .handlerdata directive"); + // RAII object to make sure parsed operands are deleted. + struct CleanupObject { + SmallVector<MCParsedAsmOperand *, 1> Operands; + ~CleanupObject() { + for (unsigned I = 0, E = Operands.size(); I != E; ++I) + delete Operands[I]; + } + } CO; + // Parse the register list - SmallVector<MCParsedAsmOperand*, 1> Operands; - if (parseRegisterList(Operands)) + if (parseRegisterList(CO.Operands)) return true; - ARMOperand *Op = (ARMOperand*)Operands[0]; + ARMOperand *Op = (ARMOperand*)CO.Operands[0]; if (!IsVector && !Op->isRegList()) return Error(L, ".save expects GPR registers"); if (IsVector && !Op->isDPRRegList()) diff --git a/lib/Target/ARM/CMakeLists.txt b/lib/Target/ARM/CMakeLists.txt index b832508..f271a93 100644 --- a/lib/Target/ARM/CMakeLists.txt +++ b/lib/Target/ARM/CMakeLists.txt @@ -49,7 +49,7 @@ add_llvm_target(ARMCodeGen Thumb2SizeReduction.cpp ) -add_dependencies(LLVMARMCodeGen intrinsics_gen) +add_dependencies(LLVMARMCodeGen ARMCommonTableGen intrinsics_gen) # workaround for hanging compilation on MSVC9, 10 if( MSVC_VERSION EQUAL 1600 OR MSVC_VERSION EQUAL 1500 ) diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index a6eab33..8a06664 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -347,6 +347,14 @@ static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder); static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder); +static DecodeStatus DecodeT2LoadImm12(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder); +static DecodeStatus DecodeT2LoadT(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder); +static DecodeStatus DecodeT2LoadLabel(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder); static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder); static DecodeStatus DecodeT2AddrModeImm8s4(MCInst &Inst, unsigned Val, @@ -448,6 +456,13 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } MI.clear(); + result = decodeInstruction(DecoderTableVFPV832, MI, insn, Address, this, STI); + if (result != MCDisassembler::Fail) { + Size = 4; + return result; + } + + MI.clear(); result = decodeInstruction(DecoderTableNEONData32, MI, insn, Address, this, STI); if (result != MCDisassembler::Fail) { @@ -484,7 +499,14 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } MI.clear(); + result = decodeInstruction(DecoderTablev8NEON32, MI, insn, Address, + this, STI); + if (result != MCDisassembler::Fail) { + Size = 4; + return result; + } + MI.clear(); Size = 0; return MCDisassembler::Fail; } @@ -746,23 +768,34 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size, return result; } - MI.clear(); - result = decodeInstruction(DecoderTableVFP32, MI, insn32, Address, this, STI); - if (result != MCDisassembler::Fail) { - Size = 4; - UpdateThumbVFPPredicate(MI); - return result; + if (fieldFromInstruction(insn32, 28, 4) == 0xE) { + MI.clear(); + result = decodeInstruction(DecoderTableVFP32, MI, insn32, Address, this, STI); + if (result != MCDisassembler::Fail) { + Size = 4; + UpdateThumbVFPPredicate(MI); + return result; + } } MI.clear(); - result = decodeInstruction(DecoderTableNEONDup32, MI, insn32, Address, - this, STI); + result = decodeInstruction(DecoderTableVFPV832, MI, insn32, Address, this, STI); if (result != MCDisassembler::Fail) { Size = 4; - Check(result, AddThumbPredicate(MI)); return result; } + if (fieldFromInstruction(insn32, 28, 4) == 0xE) { + MI.clear(); + result = decodeInstruction(DecoderTableNEONDup32, MI, insn32, Address, + this, STI); + if (result != MCDisassembler::Fail) { + Size = 4; + Check(result, AddThumbPredicate(MI)); + return result; + } + } + if (fieldFromInstruction(insn32, 24, 8) == 0xF9) { MI.clear(); uint32_t NEONLdStInsn = insn32; @@ -792,6 +825,17 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } } + MI.clear(); + uint32_t NEONv8Insn = insn32; + NEONv8Insn &= 0xF3FFFFFF; // Clear bits 27-26 + result = decodeInstruction(DecoderTablev8NEON32, MI, NEONv8Insn, Address, + this, STI); + if (result != MCDisassembler::Fail) { + Size = 4; + return result; + } + + MI.clear(); Size = 0; return MCDisassembler::Fail; } @@ -908,8 +952,11 @@ static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo, static DecodeStatus DecoderGPRRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder) { - if (RegNo == 13 || RegNo == 15) return MCDisassembler::Fail; - return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder); + DecodeStatus S = MCDisassembler::Success; + if (RegNo == 13 || RegNo == 15) + S = MCDisassembler::SoftFail; + Check(S, DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder)); + return S; } static const uint16_t SPRDecoderTable[] = { @@ -2104,7 +2151,7 @@ DecodeT2BInstruction(MCInst &Inst, unsigned Insn, unsigned imm10 = fieldFromInstruction(Insn, 16, 10); unsigned imm11 = fieldFromInstruction(Insn, 0, 11); unsigned tmp = (S << 23) | (I1 << 22) | (I2 << 21) | (imm10 << 11) | imm11; - int imm32 = SignExtend32<24>(tmp << 1); + int imm32 = SignExtend32<25>(tmp << 1); if (!tryAddingSymbolicOperand(Address, Address + imm32 + 4, true, 4, Inst, Decoder)) Inst.addOperand(MCOperand::CreateImm(imm32)); @@ -3164,6 +3211,17 @@ static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val, unsigned Rm = fieldFromInstruction(Val, 2, 4); unsigned imm = fieldFromInstruction(Val, 0, 2); + // Thumb stores cannot use PC as dest register. + switch (Inst.getOpcode()) { + case ARM::t2STRHs: + case ARM::t2STRBs: + case ARM::t2STRs: + if (Rn == 15) + return MCDisassembler::Fail; + default: + break; + } + if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler::Fail; if (!Check(S, DecoderGPRRegisterClass(Inst, Rm, Address, Decoder))) @@ -3177,53 +3235,282 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler::Success; + unsigned Rt = fieldFromInstruction(Insn, 12, 4); + unsigned Rn = fieldFromInstruction(Insn, 16, 4); + + if (Rn == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRBs: + Inst.setOpcode(ARM::t2LDRBpci); + break; + case ARM::t2LDRHs: + Inst.setOpcode(ARM::t2LDRHpci); + break; + case ARM::t2LDRSHs: + Inst.setOpcode(ARM::t2LDRSHpci); + break; + case ARM::t2LDRSBs: + Inst.setOpcode(ARM::t2LDRSBpci); + break; + case ARM::t2LDRs: + Inst.setOpcode(ARM::t2LDRpci); + break; + case ARM::t2PLDs: + Inst.setOpcode(ARM::t2PLDpci); + break; + case ARM::t2PLIs: + Inst.setOpcode(ARM::t2PLIpci); + break; + default: + return MCDisassembler::Fail; + } + + return DecodeT2LoadLabel(Inst, Insn, Address, Decoder); + } + + if (Rt == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRSHs: + return MCDisassembler::Fail; + case ARM::t2LDRHs: + // FIXME: this instruction is only available with MP extensions, + // this should be checked first but we don't have access to the + // feature bits here. + Inst.setOpcode(ARM::t2PLDWs); + break; + default: + break; + } + } + switch (Inst.getOpcode()) { case ARM::t2PLDs: case ARM::t2PLDWs: case ARM::t2PLIs: break; - default: { - unsigned Rt = fieldFromInstruction(Insn, 12, 4); - if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder))) + default: + if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; + } + + unsigned addrmode = fieldFromInstruction(Insn, 4, 2); + addrmode |= fieldFromInstruction(Insn, 0, 4) << 2; + addrmode |= fieldFromInstruction(Insn, 16, 4) << 6; + if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder))) return MCDisassembler::Fail; + + return S; +} + +static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder) { + DecodeStatus S = MCDisassembler::Success; + + unsigned Rn = fieldFromInstruction(Insn, 16, 4); + unsigned Rt = fieldFromInstruction(Insn, 12, 4); + unsigned U = fieldFromInstruction(Insn, 9, 1); + unsigned imm = fieldFromInstruction(Insn, 0, 8); + imm |= (U << 8); + imm |= (Rn << 9); + + if (Rn == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRi8: + Inst.setOpcode(ARM::t2LDRpci); + break; + case ARM::t2LDRBi8: + Inst.setOpcode(ARM::t2LDRBpci); + break; + case ARM::t2LDRSBi8: + Inst.setOpcode(ARM::t2LDRSBpci); + break; + case ARM::t2LDRHi8: + Inst.setOpcode(ARM::t2LDRHpci); + break; + case ARM::t2LDRSHi8: + Inst.setOpcode(ARM::t2LDRSHpci); + break; + case ARM::t2PLDi8: + Inst.setOpcode(ARM::t2PLDpci); + break; + case ARM::t2PLIi8: + Inst.setOpcode(ARM::t2PLIpci); + break; + default: + return MCDisassembler::Fail; + } + return DecodeT2LoadLabel(Inst, Insn, Address, Decoder); + } + + if (Rt == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRSHi8: + return MCDisassembler::Fail; + default: + break; } } + switch (Inst.getOpcode()) { + case ARM::t2PLDi8: + case ARM::t2PLIi8: + case ARM::t2PLDWi8: + break; + default: + if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; + } + + if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder))) + return MCDisassembler::Fail; + return S; +} + +static DecodeStatus DecodeT2LoadImm12(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder) { + DecodeStatus S = MCDisassembler::Success; + unsigned Rn = fieldFromInstruction(Insn, 16, 4); - if (Rn == 0xF) { + unsigned Rt = fieldFromInstruction(Insn, 12, 4); + unsigned imm = fieldFromInstruction(Insn, 0, 12); + imm |= (Rn << 13); + + if (Rn == 15) { switch (Inst.getOpcode()) { - case ARM::t2LDRBs: - Inst.setOpcode(ARM::t2LDRBpci); - break; - case ARM::t2LDRHs: - Inst.setOpcode(ARM::t2LDRHpci); - break; - case ARM::t2LDRSHs: - Inst.setOpcode(ARM::t2LDRSHpci); - break; - case ARM::t2LDRSBs: - Inst.setOpcode(ARM::t2LDRSBpci); + case ARM::t2LDRi12: + Inst.setOpcode(ARM::t2LDRpci); + break; + case ARM::t2LDRHi12: + Inst.setOpcode(ARM::t2LDRHpci); + break; + case ARM::t2LDRSHi12: + Inst.setOpcode(ARM::t2LDRSHpci); + break; + case ARM::t2LDRBi12: + Inst.setOpcode(ARM::t2LDRBpci); + break; + case ARM::t2LDRSBi12: + Inst.setOpcode(ARM::t2LDRSBpci); + break; + case ARM::t2PLDi12: + Inst.setOpcode(ARM::t2PLDpci); + break; + case ARM::t2PLIi12: + Inst.setOpcode(ARM::t2PLIpci); + break; + default: + return MCDisassembler::Fail; + } + return DecodeT2LoadLabel(Inst, Insn, Address, Decoder); + } + + if (Rt == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRSHi12: + return MCDisassembler::Fail; + case ARM::t2LDRHi12: + Inst.setOpcode(ARM::t2PLDi12); + break; + default: + break; + } + } + + switch (Inst.getOpcode()) { + case ARM::t2PLDi12: + case ARM::t2PLDWi12: + case ARM::t2PLIi12: + break; + default: + if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; + } + + if (!Check(S, DecodeT2AddrModeImm12(Inst, imm, Address, Decoder))) + return MCDisassembler::Fail; + return S; +} + +static DecodeStatus DecodeT2LoadT(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder) { + DecodeStatus S = MCDisassembler::Success; + + unsigned Rn = fieldFromInstruction(Insn, 16, 4); + unsigned Rt = fieldFromInstruction(Insn, 12, 4); + unsigned imm = fieldFromInstruction(Insn, 0, 8); + imm |= (Rn << 9); + + if (Rn == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRT: + Inst.setOpcode(ARM::t2LDRpci); + break; + case ARM::t2LDRBT: + Inst.setOpcode(ARM::t2LDRBpci); + break; + case ARM::t2LDRHT: + Inst.setOpcode(ARM::t2LDRHpci); + break; + case ARM::t2LDRSBT: + Inst.setOpcode(ARM::t2LDRSBpci); + break; + case ARM::t2LDRSHT: + Inst.setOpcode(ARM::t2LDRSHpci); + break; + default: + return MCDisassembler::Fail; + } + return DecodeT2LoadLabel(Inst, Insn, Address, Decoder); + } + + if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; + if (!Check(S, DecodeT2AddrModeImm8(Inst, imm, Address, Decoder))) + return MCDisassembler::Fail; + return S; +} + +static DecodeStatus DecodeT2LoadLabel(MCInst &Inst, unsigned Insn, + uint64_t Address, const void* Decoder) { + DecodeStatus S = MCDisassembler::Success; + + unsigned Rt = fieldFromInstruction(Insn, 12, 4); + unsigned U = fieldFromInstruction(Insn, 23, 1); + int imm = fieldFromInstruction(Insn, 0, 12); + + if (Rt == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDRBpci: + case ARM::t2LDRHpci: + Inst.setOpcode(ARM::t2PLDpci); break; - case ARM::t2PLDs: - Inst.setOpcode(ARM::t2PLDi12); - Inst.addOperand(MCOperand::CreateReg(ARM::PC)); + case ARM::t2LDRSBpci: + Inst.setOpcode(ARM::t2PLIpci); break; - default: + case ARM::t2LDRSHpci: return MCDisassembler::Fail; + default: + break; } + } - int imm = fieldFromInstruction(Insn, 0, 12); - if (!fieldFromInstruction(Insn, 23, 1)) imm *= -1; - Inst.addOperand(MCOperand::CreateImm(imm)); - - return S; + switch(Inst.getOpcode()) { + case ARM::t2PLDpci: + case ARM::t2PLIpci: + break; + default: + if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; } - unsigned addrmode = fieldFromInstruction(Insn, 4, 2); - addrmode |= fieldFromInstruction(Insn, 0, 4) << 2; - addrmode |= fieldFromInstruction(Insn, 16, 4) << 6; - if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder))) - return MCDisassembler::Fail; + if (!U) { + // Special case for #-0. + if (imm == 0) + imm = INT32_MIN; + else + imm = -imm; + } + Inst.addOperand(MCOperand::CreateImm(imm)); return S; } @@ -3292,6 +3579,21 @@ static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val, unsigned Rn = fieldFromInstruction(Val, 9, 4); unsigned imm = fieldFromInstruction(Val, 0, 9); + // Thumb stores cannot use PC as dest register. + switch (Inst.getOpcode()) { + case ARM::t2STRT: + case ARM::t2STRBT: + case ARM::t2STRHT: + case ARM::t2STRi8: + case ARM::t2STRHi8: + case ARM::t2STRBi8: + if (Rn == 15) + return MCDisassembler::Fail; + break; + default: + break; + } + // Some instructions always use an additive offset. switch (Inst.getOpcode()) { case ARM::t2LDRT: @@ -3327,6 +3629,37 @@ static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn, addr |= Rn << 9; unsigned load = fieldFromInstruction(Insn, 20, 1); + if (Rn == 15) { + switch (Inst.getOpcode()) { + case ARM::t2LDR_PRE: + case ARM::t2LDR_POST: + Inst.setOpcode(ARM::t2LDRpci); + break; + case ARM::t2LDRB_PRE: + case ARM::t2LDRB_POST: + Inst.setOpcode(ARM::t2LDRBpci); + break; + case ARM::t2LDRH_PRE: + case ARM::t2LDRH_POST: + Inst.setOpcode(ARM::t2LDRHpci); + break; + case ARM::t2LDRSB_PRE: + case ARM::t2LDRSB_POST: + if (Rt == 15) + Inst.setOpcode(ARM::t2PLIpci); + else + Inst.setOpcode(ARM::t2LDRSBpci); + break; + case ARM::t2LDRSH_PRE: + case ARM::t2LDRSH_POST: + Inst.setOpcode(ARM::t2LDRSHpci); + break; + default: + return MCDisassembler::Fail; + } + return DecodeT2LoadLabel(Inst, Insn, Address, Decoder); + } + if (!load) { if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler::Fail; @@ -3353,6 +3686,17 @@ static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val, unsigned Rn = fieldFromInstruction(Val, 13, 4); unsigned imm = fieldFromInstruction(Val, 0, 12); + // Thumb stores cannot use PC as dest register. + switch (Inst.getOpcode()) { + case ARM::t2STRi12: + case ARM::t2STRBi12: + case ARM::t2STRHi12: + if (Rn == 15) + return MCDisassembler::Fail; + default: + break; + } + if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler::Fail; Inst.addOperand(MCOperand::CreateImm(imm)); @@ -4364,10 +4708,8 @@ static DecodeStatus DecodeIT(MCInst &Inst, unsigned Insn, S = MCDisassembler::SoftFail; } - if (mask == 0x0) { - mask |= 0x8; - S = MCDisassembler::SoftFail; - } + if (mask == 0x0) + return MCDisassembler::Fail; Inst.addOperand(MCOperand::CreateImm(pred)); Inst.addOperand(MCOperand::CreateImm(mask)); diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp index 7fef795..97da232 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp @@ -243,15 +243,6 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, return; } - // Thumb1 NOP - if (Opcode == ARM::tMOVr && MI->getOperand(0).getReg() == ARM::R8 && - MI->getOperand(1).getReg() == ARM::R8) { - O << "\tnop"; - printPredicateOperand(MI, 2, O); - printAnnotation(O, Annot); - return; - } - // Combine 2 GPRs from disassember into a GPRPair to match with instr def. // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, // a single GPRPair reg operand is used in the .td file to replace the two @@ -315,15 +306,29 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, void ARMInstPrinter::printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); - if (MO1.isExpr()) + if (MO1.isExpr()) { O << *MO1.getExpr(); - else if (MO1.isImm()) { - O << markup("<mem:") << "[pc, " - << markup("<imm:") << "#" << formatImm(MO1.getImm()) - << markup(">]>", "]"); + return; } - else - llvm_unreachable("Unknown LDR label operand?"); + + O << markup("<mem:") << "[pc, "; + + int32_t OffImm = (int32_t)MO1.getImm(); + bool isSub = OffImm < 0; + + // Special value for #-0. All others are normal. + if (OffImm == INT32_MIN) + OffImm = 0; + if (isSub) { + O << markup("<imm:") + << "#-" << formatImm(-OffImm) + << markup(">"); + } else { + O << markup("<imm:") + << "#" << formatImm(OffImm) + << markup(">"); + } + O << "]" << markup(">"); } // so_reg is a 4-operand unit corresponding to register forms of the A5.1 @@ -895,6 +900,7 @@ void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum, llvm_unreachable("Unhandled PC-relative pseudo-instruction!"); } +template<unsigned scale> void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO = MI->getOperand(OpNum); @@ -904,7 +910,7 @@ void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum, return; } - int32_t OffImm = (int32_t)MO.getImm(); + int32_t OffImm = (int32_t)MO.getImm() << scale; O << markup("<imm:"); if (OffImm == INT32_MIN) @@ -1065,6 +1071,7 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, O << "]" << markup(">"); } +template<bool AlwaysPrintImm0> void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { @@ -1075,22 +1082,25 @@ void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI, printRegName(O, MO1.getReg()); int32_t OffImm = (int32_t)MO2.getImm(); + bool isSub = OffImm < 0; // Don't print +0. - if (OffImm != 0) - O << ", "; - if (OffImm != 0 && UseMarkup) - O << "<imm:"; if (OffImm == INT32_MIN) - O << "#-0"; - else if (OffImm < 0) - O << "#-" << -OffImm; - else if (OffImm > 0) - O << "#" << OffImm; - if (OffImm != 0 && UseMarkup) - O << ">"; + OffImm = 0; + if (isSub) { + O << ", " + << markup("<imm:") + << "#-" << -OffImm + << markup(">"); + } else if (AlwaysPrintImm0 || OffImm > 0) { + O << ", " + << markup("<imm:") + << "#" << OffImm + << markup(">"); + } O << "]" << markup(">"); } +template<bool AlwaysPrintImm0> void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { @@ -1106,22 +1116,24 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI, printRegName(O, MO1.getReg()); int32_t OffImm = (int32_t)MO2.getImm(); + bool isSub = OffImm < 0; assert(((OffImm & 0x3) == 0) && "Not a valid immediate!"); // Don't print +0. - if (OffImm != 0) - O << ", "; - if (OffImm != 0 && UseMarkup) - O << "<imm:"; if (OffImm == INT32_MIN) - O << "#-0"; - else if (OffImm < 0) - O << "#-" << -OffImm; - else if (OffImm > 0) - O << "#" << OffImm; - if (OffImm != 0 && UseMarkup) - O << ">"; + OffImm = 0; + if (isSub) { + O << ", " + << markup("<imm:") + << "#-" << -OffImm + << markup(">"); + } else if (AlwaysPrintImm0 || OffImm > 0) { + O << ", " + << markup("<imm:") + << "#" << OffImm + << markup(">"); + } O << "]" << markup(">"); } @@ -1148,7 +1160,9 @@ void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI, const MCOperand &MO1 = MI->getOperand(OpNum); int32_t OffImm = (int32_t)MO1.getImm(); O << ", " << markup("<imm:"); - if (OffImm < 0) + if (OffImm == INT32_MIN) + O << "#-0"; + else if (OffImm < 0) O << "#-" << -OffImm; else O << "#" << OffImm; @@ -1163,19 +1177,14 @@ void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI, assert(((OffImm & 0x3) == 0) && "Not a valid immediate!"); - // Don't print +0. - if (OffImm != 0) - O << ", "; - if (OffImm != 0 && UseMarkup) - O << "<imm:"; + O << ", " << markup("<imm:"); if (OffImm == INT32_MIN) O << "#-0"; else if (OffImm < 0) O << "#-" << -OffImm; - else if (OffImm > 0) + else O << "#" << OffImm; - if (OffImm != 0 && UseMarkup) - O << ">"; + O << markup(">"); } void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI, diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h index 5a64348..15ae8d1 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h @@ -76,6 +76,7 @@ public: void printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printPKHASRShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O); + template <unsigned scale> void printAdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printThumbSRImm(const MCInst *MI, unsigned OpNum, raw_ostream &O); @@ -97,8 +98,10 @@ public: template<bool AlwaysPrintImm0> void printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + template<bool AlwaysPrintImm0> void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + template<bool AlwaysPrintImm0> void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printT2AddrModeImm0_1020s4Operand(const MCInst *MI, unsigned OpNum, diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp index 8baa3a6..b1e25d8 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -152,7 +152,7 @@ static unsigned getRelaxedOpcode(unsigned Op) { switch (Op) { default: return Op; case ARM::tBcc: return ARM::t2Bcc; - case ARM::tLDRpciASM: return ARM::t2LDRpci; + case ARM::tLDRpci: return ARM::t2LDRpci; case ARM::tADR: return ARM::t2ADR; case ARM::tB: return ARM::t2B; } diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 679d3c4..6b98205 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -109,18 +109,17 @@ public: /// This is one of the functions used to emit data into an ELF section, so the /// ARM streamer overrides it to add the appropriate mapping symbol ($d) if /// necessary. - virtual void EmitBytes(StringRef Data, unsigned AddrSpace) { + virtual void EmitBytes(StringRef Data) { EmitDataMappingSymbol(); - MCELFStreamer::EmitBytes(Data, AddrSpace); + MCELFStreamer::EmitBytes(Data); } /// This is one of the functions used to emit data into an ELF section, so the /// ARM streamer overrides it to add the appropriate mapping symbol ($d) if /// necessary. - virtual void EmitValueImpl(const MCExpr *Value, unsigned Size, - unsigned AddrSpace) { + virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) { EmitDataMappingSymbol(); - MCELFStreamer::EmitValueImpl(Value, Size, AddrSpace); + MCELFStreamer::EmitValueImpl(Value, Size); } virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) { @@ -204,7 +203,7 @@ private: void EmitPersonalityFixup(StringRef Name); void FlushPendingOffset(); - void FlushUnwindOpcodes(bool AllowCompactModel0); + void FlushUnwindOpcodes(bool NoHandlerData); void SwitchToEHSection(const char *Prefix, unsigned Type, unsigned Flags, SectionKind Kind, const MCSymbol &Fn); @@ -336,17 +335,17 @@ void ARMELFStreamer::EmitFnEnd() { MCSymbolRefExpr::VK_ARM_PREL31, getContext()); - EmitValue(FnStartRef, 4, 0); + EmitValue(FnStartRef, 4); if (CantUnwind) { - EmitIntValue(EXIDX_CANTUNWIND, 4, 0); + EmitIntValue(EXIDX_CANTUNWIND, 4); } else if (ExTab) { // Emit a reference to the unwind opcodes in the ".ARM.extab" section. const MCSymbolRefExpr *ExTabEntryRef = MCSymbolRefExpr::Create(ExTab, MCSymbolRefExpr::VK_ARM_PREL31, getContext()); - EmitValue(ExTabEntryRef, 4, 0); + EmitValue(ExTabEntryRef, 4); } else { // For the __aeabi_unwind_cpp_pr0, we have to emit the unwind opcodes in // the second word of exception index table entry. The size of the unwind @@ -356,7 +355,7 @@ void ARMELFStreamer::EmitFnEnd() { assert(Opcodes.size() == 4u && "Unwind opcode size for __aeabi_cpp_unwind_pr0 must be equal to 4"); EmitBytes(StringRef(reinterpret_cast<const char*>(Opcodes.data()), - Opcodes.size()), 0); + Opcodes.size())); } // Switch to the section containing FnStart @@ -377,13 +376,13 @@ void ARMELFStreamer::FlushPendingOffset() { } } -void ARMELFStreamer::FlushUnwindOpcodes(bool AllowCompactModel0) { +void ARMELFStreamer::FlushUnwindOpcodes(bool NoHandlerData) { // Emit the unwind opcode to restore $sp. if (UsedFP) { - const MCRegisterInfo &MRI = getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = getContext().getRegisterInfo(); int64_t LastRegSaveSPOffset = SPOffset - PendingOffset; UnwindOpAsm.EmitSPOffset(LastRegSaveSPOffset - FPOffset); - UnwindOpAsm.EmitSetSP(MRI.getEncodingValue(FPReg)); + UnwindOpAsm.EmitSetSP(MRI->getEncodingValue(FPReg)); } else { FlushPendingOffset(); } @@ -394,7 +393,7 @@ void ARMELFStreamer::FlushUnwindOpcodes(bool AllowCompactModel0) { // For compact model 0, we have to emit the unwind opcodes in the .ARM.exidx // section. Thus, we don't have to create an entry in the .ARM.extab // section. - if (AllowCompactModel0 && PersonalityIndex == AEABI_UNWIND_CPP_PR0) + if (NoHandlerData && PersonalityIndex == AEABI_UNWIND_CPP_PR0) return; // Switch to .ARM.extab section. @@ -412,12 +411,22 @@ void ARMELFStreamer::FlushUnwindOpcodes(bool AllowCompactModel0) { MCSymbolRefExpr::VK_ARM_PREL31, getContext()); - EmitValue(PersonalityRef, 4, 0); + EmitValue(PersonalityRef, 4); } // Emit unwind opcodes EmitBytes(StringRef(reinterpret_cast<const char *>(Opcodes.data()), - Opcodes.size()), 0); + Opcodes.size())); + + // According to ARM EHABI section 9.2, if the __aeabi_unwind_cpp_pr1() or + // __aeabi_unwind_cpp_pr2() is used, then the handler data must be emitted + // after the unwind opcodes. The handler data consists of several 32-bit + // words, and should be terminated by zero. + // + // In case that the .handlerdata directive is not specified by the + // programmer, we should emit zero to terminate the handler data. + if (NoHandlerData && !Personality) + EmitIntValue(0, 4); } void ARMELFStreamer::EmitHandlerData() { @@ -458,9 +467,9 @@ void ARMELFStreamer::EmitRegSave(const SmallVectorImpl<unsigned> &RegList, // Collect the registers in the register list unsigned Count = 0; uint32_t Mask = 0; - const MCRegisterInfo &MRI = getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = getContext().getRegisterInfo(); for (size_t i = 0; i < RegList.size(); ++i) { - unsigned Reg = MRI.getEncodingValue(RegList[i]); + unsigned Reg = MRI->getEncodingValue(RegList[i]); assert(Reg < (IsVector ? 32U : 16U) && "Register out of range"); unsigned Bit = (1u << Reg); if ((Mask & Bit) == 0) { diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index 2aa1010..a18d465 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -315,6 +315,8 @@ public: unsigned EncodedValue) const; unsigned NEONThumb2DupPostEncoder(const MCInst &MI, unsigned EncodedValue) const; + unsigned NEONThumb2V8PostEncoder(const MCInst &MI, + unsigned EncodedValue) const; unsigned VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue) const; @@ -389,6 +391,17 @@ unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI, return EncodedValue; } +/// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form +/// if we are in Thumb2. +unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI, + unsigned EncodedValue) const { + if (isThumb2()) { + EncodedValue |= 0xC000000; // Set bits 27-26 + } + + return EncodedValue; +} + /// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite /// them to their Thumb2 form if we are currently in Thumb2 mode. unsigned ARMMCCodeEmitter:: @@ -407,7 +420,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) { unsigned Reg = MO.getReg(); - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); // Q registers are encoded as 2x their register number. switch (Reg) { @@ -436,7 +449,7 @@ EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg, const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx + 1); - Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); int32_t SImm = MO1.getImm(); bool isAdd = true; @@ -724,8 +737,8 @@ getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, // {2-0} = Rn const MCOperand &MO1 = MI.getOperand(OpIdx); const MCOperand &MO2 = MI.getOperand(OpIdx + 1); - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO1.getReg()); - unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO2.getReg()); + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg()); + unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO2.getReg()); return (Rm << 3) | Rn; } @@ -741,12 +754,12 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, // If The first operand isn't a register, we have a label reference. const MCOperand &MO = MI.getOperand(OpIdx); if (!MO.isReg()) { - Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC. + Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC. Imm12 = 0; - isAdd = false ; // 'U' bit is set as part of the fixup. if (MO.isExpr()) { const MCExpr *Expr = MO.getExpr(); + isAdd = false ; // 'U' bit is set as part of the fixup. MCFixupKind Kind; if (isThumb2()) @@ -821,7 +834,7 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, // If The first operand isn't a register, we have a label reference. const MCOperand &MO = MI.getOperand(OpIdx); if (!MO.isReg()) { - Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC. + Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC. Imm8 = 0; isAdd = false ; // 'U' bit is set as part of the fixup. @@ -857,7 +870,7 @@ getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, // {7-0} = imm8 const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx + 1); - unsigned Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); unsigned Imm8 = MO1.getImm(); return (Reg << 8) | Imm8; } @@ -940,8 +953,8 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx+1); const MCOperand &MO2 = MI.getOperand(OpIdx+2); - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); - unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO1.getReg()); + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); + unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg()); unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()); bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add; ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm()); @@ -975,7 +988,7 @@ getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx, // {12} isAdd // {11-0} imm12/Rm const MCOperand &MO = MI.getOperand(OpIdx); - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups); Binary |= Rn << 14; return Binary; @@ -998,7 +1011,7 @@ getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm); Binary <<= 7; // Shift amount is bits [11:7] Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5] - Binary |= CTX.getRegisterInfo().getEncodingValue(MO.getReg()); // Rm is bits [3:0] + Binary |= CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Rm is bits [3:0] } return Binary | (isAdd << 12) | (isReg << 13); } @@ -1011,7 +1024,7 @@ getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx+1); bool isAdd = MO1.getImm() != 0; - return CTX.getRegisterInfo().getEncodingValue(MO.getReg()) | (isAdd << 4); + return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()) | (isAdd << 4); } uint32_t ARMMCCodeEmitter:: @@ -1029,7 +1042,7 @@ getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 if (!isImm) - Imm8 = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); return Imm8 | (isAdd << 8) | (isImm << 9); } @@ -1047,7 +1060,7 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, // If The first operand isn't a register, we have a label reference. if (!MO.isReg()) { - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC. + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC. assert(MO.isExpr() && "Unexpected machine operand type!"); const MCExpr *Expr = MO.getExpr(); @@ -1057,14 +1070,14 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, ++MCNumCPRelocations; return (Rn << 9) | (1 << 13); } - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); unsigned Imm = MO2.getImm(); bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add; bool isImm = MO1.getReg() == 0; uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 if (!isImm) - Imm8 = CTX.getRegisterInfo().getEncodingValue(MO1.getReg()); + Imm8 = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg()); return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13); } @@ -1092,7 +1105,7 @@ getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, // {2-0} = Rn const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx + 1); - unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); unsigned Imm5 = MO1.getImm(); return ((Imm5 & 0x1f) << 3) | Rn; } @@ -1119,7 +1132,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, // If The first operand isn't a register, we have a label reference. const MCOperand &MO = MI.getOperand(OpIdx); if (!MO.isReg()) { - Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC. + Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC. Imm8 = 0; isAdd = false; // 'U' bit is handled as part of the fixup. @@ -1165,7 +1178,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm()); // Encode Rm. - unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Encode the shift opcode. unsigned SBits = 0; @@ -1190,7 +1203,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, // Encode the shift operation Rs. // Encode Rs bit[11:8]. assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0); - return Binary | (CTX.getRegisterInfo().getEncodingValue(Rs) << ARMII::RegRsShift); + return Binary | (CTX.getRegisterInfo()->getEncodingValue(Rs) << ARMII::RegRsShift); } unsigned ARMMCCodeEmitter:: @@ -1209,7 +1222,7 @@ getSORegImmOpValue(const MCInst &MI, unsigned OpIdx, ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm()); // Encode Rm. - unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Encode the shift opcode. unsigned SBits = 0; @@ -1248,9 +1261,9 @@ getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, // Encoded as [Rn, Rm, imm]. // FIXME: Needs fixup support. - unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg()); + unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg()); Value <<= 4; - Value |= CTX.getRegisterInfo().getEncodingValue(MO2.getReg()); + Value |= CTX.getRegisterInfo()->getEncodingValue(MO2.getReg()); Value <<= 2; Value |= MO3.getImm(); @@ -1264,7 +1277,7 @@ getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum, const MCOperand &MO2 = MI.getOperand(OpNum+1); // FIXME: Needs fixup support. - unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg()); + unsigned Value = CTX.getRegisterInfo()->getEncodingValue(MO1.getReg()); // Even though the immediate is 8 bits long, we need 9 bits in order // to represent the (inverse of the) sign bit. @@ -1326,7 +1339,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx, ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm()); // Encode Rm. - unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Encode the shift opcode. unsigned SBits = 0; @@ -1382,7 +1395,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op, if (SPRRegs || DPRRegs) { // VLDM/VSTM - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff; Binary |= (RegNo & 0x1f) << 8; if (SPRRegs) @@ -1391,7 +1404,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op, Binary |= NumRegs * 2; } else { for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(MI.getOperand(I).getReg()); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(MI.getOperand(I).getReg()); Binary |= 1 << RegNo; } } @@ -1407,7 +1420,7 @@ getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, const MCOperand &Reg = MI.getOperand(Op); const MCOperand &Imm = MI.getOperand(Op + 1); - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg()); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg()); unsigned Align = 0; switch (Imm.getImm()) { @@ -1430,7 +1443,7 @@ getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, const MCOperand &Reg = MI.getOperand(Op); const MCOperand &Imm = MI.getOperand(Op + 1); - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg()); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg()); unsigned Align = 0; switch (Imm.getImm()) { @@ -1456,7 +1469,7 @@ getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, const MCOperand &Reg = MI.getOperand(Op); const MCOperand &Imm = MI.getOperand(Op + 1); - unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg()); + unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg.getReg()); unsigned Align = 0; switch (Imm.getImm()) { @@ -1475,7 +1488,7 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(Op); if (MO.getReg() == 0) return 0x0D; - return CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); } unsigned ARMMCCodeEmitter:: diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp index 14fd03f..caa1949 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -59,7 +59,10 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) { std::string ARMArchFeature; if (Idx) { unsigned SubVer = TT[Idx]; - if (SubVer >= '7' && SubVer <= '9') { + if (SubVer == '8') { + // FIXME: Parse v8 features + ARMArchFeature = "+v8"; + } else if (SubVer == '7') { if (Len >= Idx+2 && TT[Idx+1] == 'm') { isThumb = true; if (NoCPU) diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index 3e69098..0ddcad2 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -1832,7 +1832,7 @@ void CppWriter::printInline(const std::string& fname, unsigned arg_count = 1; for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end(); AI != AE; ++AI) { - Out << ", Value* arg_" << arg_count; + Out << ", Value* arg_" << arg_count++; } Out << ") {"; nl(Out); diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt index 57044b2..2b79791 100644 --- a/lib/Target/Hexagon/CMakeLists.txt +++ b/lib/Target/Hexagon/CMakeLists.txt @@ -9,8 +9,6 @@ tablegen(LLVM HexagonGenSubtargetInfo.inc -gen-subtarget) tablegen(LLVM HexagonGenDFAPacketizer.inc -gen-dfa-packetizer) add_public_tablegen_target(HexagonCommonTableGen) -set(LLVM_COMMON_DEPENDS intrinsics_gen) - add_llvm_target(HexagonCodeGen HexagonAsmPrinter.cpp HexagonCallingConvLower.cpp @@ -38,6 +36,8 @@ add_llvm_target(HexagonCodeGen HexagonCopyToCombine.cpp ) +add_dependencies(LLVMHexagonCodeGen HexagonCommonTableGen intrinsics_gen) + add_subdirectory(TargetInfo) add_subdirectory(InstPrinter) add_subdirectory(MCTargetDesc) diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h index b88637a..5467ee3 100644 --- a/lib/Target/Hexagon/Hexagon.h +++ b/lib/Target/Hexagon/Hexagon.h @@ -29,7 +29,7 @@ namespace llvm { class HexagonTargetMachine; class raw_ostream; - FunctionPass *createHexagonISelDag(const HexagonTargetMachine &TM, + FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM, CodeGenOpt::Level OptLevel); FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM); FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM); diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp index fc5503a..f5f958c 100644 --- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp +++ b/lib/Target/Hexagon/HexagonCallingConvLower.cpp @@ -25,7 +25,7 @@ using namespace llvm; Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm, - SmallVector<CCValAssign, 16> &locs, + SmallVectorImpl<CCValAssign> &locs, LLVMContext &c) : CallingConv(CC), IsVarArg(isVarArg), TM(tm), Locs(locs), Context(c) { // No stack is used. diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.h b/lib/Target/Hexagon/HexagonCallingConvLower.h index eed99f4..33c8306 100644 --- a/lib/Target/Hexagon/HexagonCallingConvLower.h +++ b/lib/Target/Hexagon/HexagonCallingConvLower.h @@ -48,14 +48,14 @@ class Hexagon_CCState { CallingConv::ID CallingConv; bool IsVarArg; const TargetMachine &TM; - SmallVector<CCValAssign, 16> &Locs; + SmallVectorImpl<CCValAssign> &Locs; LLVMContext &Context; unsigned StackOffset; SmallVector<uint32_t, 16> UsedRegs; public: Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM, - SmallVector<CCValAssign, 16> &locs, LLVMContext &c); + SmallVectorImpl<CCValAssign> &locs, LLVMContext &c); void addLoc(const CCValAssign &V) { Locs.push_back(V); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index d002788..3c4ca0f 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -134,7 +134,7 @@ namespace { /// has a computable trip count and, if so, return a value that represents /// the trip count expression. CountValue *getLoopTripCount(MachineLoop *L, - SmallVector<MachineInstr*, 2> &OldInsts); + SmallVectorImpl<MachineInstr *> &OldInsts); /// \brief Return the expression that represents the number of times /// a loop iterates. The function takes the operands that represent the @@ -164,7 +164,7 @@ namespace { /// \brief Return true if the instruction is now dead. bool isDead(const MachineInstr *MI, - SmallVector<MachineInstr*, 1> &DeadPhis) const; + SmallVectorImpl<MachineInstr *> &DeadPhis) const; /// \brief Remove the instruction if it is now dead. void removeIfDead(MachineInstr *MI); @@ -428,7 +428,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L, /// induction variable patterns that are used in the calculation for /// the number of time the loop is executed. CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, - SmallVector<MachineInstr*, 2> &OldInsts) { + SmallVectorImpl<MachineInstr *> &OldInsts) { MachineBasicBlock *TopMBB = L->getTopBlock(); MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin(); assert(PI != TopMBB->pred_end() && @@ -890,7 +890,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const { /// for inline asm, physical registers and instructions with side effects /// removed. bool HexagonHardwareLoops::isDead(const MachineInstr *MI, - SmallVector<MachineInstr*, 1> &DeadPhis) const { + SmallVectorImpl<MachineInstr *> &DeadPhis) const { // Examine each operand. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 22740b7..9e78e51 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -52,7 +52,7 @@ class HexagonDAGToDAGISel : public SelectionDAGISel { const HexagonTargetMachine& TM; DenseMap<const GlobalValue *, unsigned> GlobalAddressUseCountMap; public: - explicit HexagonDAGToDAGISel(const HexagonTargetMachine &targetmachine, + explicit HexagonDAGToDAGISel(HexagonTargetMachine &targetmachine, CodeGenOpt::Level OptLevel) : SelectionDAGISel(targetmachine, OptLevel), Subtarget(targetmachine.getSubtarget<HexagonSubtarget>()), @@ -178,7 +178,7 @@ inline SDValue XformUToUM1Imm(unsigned Imm) { /// createHexagonISelDag - This pass converts a legalized DAG into a /// Hexagon-specific DAG, ready for instruction scheduling. /// -FunctionPass *llvm::createHexagonISelDag(const HexagonTargetMachine &TM, +FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM, CodeGenOpt::Level OptLevel) { return new HexagonDAGToDAGISel(TM, OptLevel); } @@ -394,7 +394,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl) { EVT LoadedVT = LD->getMemoryVT(); int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset(); if (Offset != 0 && OffsetFitsS11(LoadedVT, Offset)) { - MVT PointerTy = TLI->getPointerTy(); + MVT PointerTy = getTargetLowering()->getPointerTy(); const GlobalValue* GV = cast<GlobalAddressSDNode>(Base)->getGlobal(); SDValue TargAddr = @@ -443,10 +443,10 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD, SDValue CPTmpN1_0; SDValue CPTmpN1_1; - const HexagonInstrInfo *TII = - static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) && N1.getNode()->getValueType(0) == MVT::i32) { + const HexagonInstrInfo *TII = + static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); if (TII->isValidAutoIncImm(LoadedVT, Val)) { SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32); SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32, @@ -510,10 +510,10 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD, SDValue CPTmpN1_0; SDValue CPTmpN1_1; - const HexagonInstrInfo *TII = - static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) && N1.getNode()->getValueType(0) == MVT::i32) { + const HexagonInstrInfo *TII = + static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); if (TII->isValidAutoIncImm(LoadedVT, Val)) { SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32); SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32); @@ -777,7 +777,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, EVT StoredVT = ST->getMemoryVT(); int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset(); if (Offset != 0 && OffsetFitsS11(StoredVT, Offset)) { - MVT PointerTy = TLI->getPointerTy(); + MVT PointerTy = getTargetLowering()->getPointerTy(); const GlobalValue* GV = cast<GlobalAddressSDNode>(Base)->getGlobal(); SDValue TargAddr = @@ -1215,10 +1215,10 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { // We are concerned with only those intrinsics that have predicate registers // as at least one of the operands. - const HexagonInstrInfo *TII = - static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); if (IntrinsicWithPred) { SmallVector<SDValue, 8> Ops; + const HexagonInstrInfo *TII = + static_cast<const HexagonInstrInfo*>(TM.getInstrInfo()); const MCInstrDesc &MCID = TII->get(IntrinsicWithPred); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 2b0fa5e..567faca 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -382,10 +382,10 @@ SDValue HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDLoc &dl = CLI.DL; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -1428,11 +1428,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); - setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); if (TM.getSubtargetImpl()->isSubtargetV2()) { @@ -1509,6 +1504,19 @@ bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32)); } +bool +HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { + // Assuming the caller does not have either a signext or zeroext modifier, and + // only one value is accepted, any reasonable truncation is allowed. + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + + // FIXME: in principle up to 64-bit could be made safe, but it would be very + // fragile at the moment: any support for multiple value returns would be + // liable to disallow tail calls involving i64 -> iN truncation in many cases. + return Ty1->getPrimitiveSizeInBits() <= 32; +} + SDValue HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); @@ -1590,11 +1598,11 @@ const { std::pair<unsigned, const TargetRegisterClass*> HexagonTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': // R0-R31 - switch (VT.getSimpleVT().SimpleTy) { + switch (VT.SimpleTy) { default: llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type"); case MVT::i32: diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 70642e6..4fe0107 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -95,6 +95,8 @@ namespace llvm { virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; virtual bool isTruncateFree(EVT VT1, EVT VT2) const; + virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; virtual const char *getTargetNodeName(unsigned Opcode) const; @@ -150,7 +152,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; // Intrinsics virtual SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 3218134..5af645c 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -558,16 +558,6 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, return(0); } -MachineInstr* -HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE)) - .addImm(0).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { MachineRegisterInfo &RegInfo = MF->getRegInfo(); diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index 42ffb48..3c28df4 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -148,11 +148,6 @@ public: isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles, const BranchProbability &Probability) const; - virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; virtual DFAPacketizer* CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const; diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp index 6e966ec..10bb3e9 100644 --- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp +++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -208,6 +208,8 @@ void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) { Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); + delete Top.ResourceModel; + delete Bot.ResourceModel; Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel()); Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel()); @@ -222,7 +224,7 @@ void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) { for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; - unsigned MinLatency = I->getMinLatency(); + unsigned MinLatency = I->getLatency(); #ifndef NDEBUG Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); #endif @@ -241,7 +243,7 @@ void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) { for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; - unsigned MinLatency = I->getMinLatency(); + unsigned MinLatency = I->getLatency(); #ifndef NDEBUG Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); #endif diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index b113b35..cd96b58 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -126,7 +126,7 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { } bool HexagonPassConfig::addInstSelector() { - const HexagonTargetMachine &TM = getHexagonTargetMachine(); + HexagonTargetMachine &TM = getHexagonTargetMachine(); bool NoOpt = (getOptLevel() == CodeGenOpt::None); if (!NoOpt) diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp index 36da6df..2ea0d2e 100644 --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp @@ -179,7 +179,7 @@ void HexagonInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const { // Branches can take an immediate operand. This is used by the branch // selection pass to print $+8, an eight byte displacement from the PC. - assert("Unknown branch operand."); + llvm_unreachable("Unknown branch operand."); } void HexagonInstPrinter::printCallOperand(const MCInst *MI, unsigned OpNo, @@ -196,15 +196,9 @@ void HexagonInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo, void HexagonInstPrinter::printSymbol(const MCInst *MI, unsigned OpNo, raw_ostream &O, bool hi) const { - const MCOperand& MO = MI->getOperand(OpNo); + assert(MI->getOperand(OpNo).isImm() && "Unknown symbol operand"); - O << '#' << (hi? "HI": "LO") << '('; - if (MO.isImm()) { - O << '#'; - printOperand(MI, OpNo, O); - } else { - assert("Unknown symbol operand"); - printOperand(MI, OpNo, O); - } + O << '#' << (hi ? "HI" : "LO") << "(#"; + printOperand(MI, OpNo, O); O << ')'; } diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt index 1022ae9..98d26bc 100644 --- a/lib/Target/LLVMBuild.txt +++ b/lib/Target/LLVMBuild.txt @@ -16,7 +16,7 @@ ;===------------------------------------------------------------------------===; [common] -subdirectories = AArch64 ARM CppBackend Hexagon MBlaze MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore +subdirectories = AArch64 ARM CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore ; This is a special group whose required libraries are extended (by llvm-build) ; with the best execution engine (the native JIT, if available, or the diff --git a/lib/Target/MBlaze/AsmParser/CMakeLists.txt b/lib/Target/MBlaze/AsmParser/CMakeLists.txt deleted file mode 100644 index 4a7d8e8..0000000 --- a/lib/Target/MBlaze/AsmParser/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. - ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - -add_llvm_library(LLVMMBlazeAsmParser - MBlazeAsmParser.cpp - ) - -add_dependencies(LLVMMBlazeAsmParser MBlazeCommonTableGen) diff --git a/lib/Target/MBlaze/AsmParser/LLVMBuild.txt b/lib/Target/MBlaze/AsmParser/LLVMBuild.txt deleted file mode 100644 index b10189a..0000000 --- a/lib/Target/MBlaze/AsmParser/LLVMBuild.txt +++ /dev/null @@ -1,23 +0,0 @@ -;===- ./lib/Target/MBlaze/AsmParser/LLVMBuild.txt --------------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[component_0] -type = Library -name = MBlazeAsmParser -parent = MBlaze -required_libraries = MBlazeInfo MC MCParser Support -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp deleted file mode 100644 index dda6e24..0000000 --- a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp +++ /dev/null @@ -1,572 +0,0 @@ -//===-- MBlazeAsmParser.cpp - Parse MBlaze asm to MCInst instructions -----===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MCTargetDesc/MBlazeBaseInfo.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/Twine.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCInst.h" -#include "llvm/MC/MCParser/MCAsmLexer.h" -#include "llvm/MC/MCParser/MCAsmParser.h" -#include "llvm/MC/MCParser/MCParsedAsmOperand.h" -#include "llvm/MC/MCStreamer.h" -#include "llvm/MC/MCTargetAsmParser.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/raw_ostream.h" -using namespace llvm; - -namespace { -struct MBlazeOperand; - -class MBlazeAsmParser : public MCTargetAsmParser { - MCAsmParser &Parser; - - MCAsmParser &getParser() const { return Parser; } - MCAsmLexer &getLexer() const { return Parser.getLexer(); } - - void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } - bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } - - MBlazeOperand *ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands); - MBlazeOperand *ParseRegister(); - MBlazeOperand *ParseRegister(SMLoc &StartLoc, SMLoc &EndLoc); - MBlazeOperand *ParseImmediate(); - MBlazeOperand *ParseFsl(); - MBlazeOperand* ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands); - - virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); - - bool ParseDirectiveWord(unsigned Size, SMLoc L); - - bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl<MCParsedAsmOperand*> &Operands, - MCStreamer &Out, unsigned &ErrorInfo, - bool MatchingInlineAsm); - - /// @name Auto-generated Match Functions - /// { - -#define GET_ASSEMBLER_HEADER -#include "MBlazeGenAsmMatcher.inc" - - /// } - -public: - MBlazeAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) - : MCTargetAsmParser(), Parser(_Parser) {} - - virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, - SMLoc NameLoc, - SmallVectorImpl<MCParsedAsmOperand*> &Operands); - - virtual bool ParseDirective(AsmToken DirectiveID); -}; - -/// MBlazeOperand - Instances of this class represent a parsed MBlaze machine -/// instruction. -struct MBlazeOperand : public MCParsedAsmOperand { - enum KindTy { - Token, - Immediate, - Register, - Memory, - Fsl - } Kind; - - SMLoc StartLoc, EndLoc; - - struct TokOp { - const char *Data; - unsigned Length; - }; - - struct RegOp { - unsigned RegNum; - }; - - struct ImmOp { - const MCExpr *Val; - }; - - struct MemOp { - unsigned Base; - unsigned OffReg; - const MCExpr *Off; - }; - - struct FslImmOp { - const MCExpr *Val; - }; - - union { - struct TokOp Tok; - struct RegOp Reg; - struct ImmOp Imm; - struct MemOp Mem; - struct FslImmOp FslImm; - }; - - MBlazeOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} -public: - MBlazeOperand(const MBlazeOperand &o) : MCParsedAsmOperand() { - Kind = o.Kind; - StartLoc = o.StartLoc; - EndLoc = o.EndLoc; - switch (Kind) { - case Register: - Reg = o.Reg; - break; - case Immediate: - Imm = o.Imm; - break; - case Token: - Tok = o.Tok; - break; - case Memory: - Mem = o.Mem; - break; - case Fsl: - FslImm = o.FslImm; - break; - } - } - - /// getStartLoc - Get the location of the first token of this operand. - SMLoc getStartLoc() const { return StartLoc; } - - /// getEndLoc - Get the location of the last token of this operand. - SMLoc getEndLoc() const { return EndLoc; } - - unsigned getReg() const { - assert(Kind == Register && "Invalid access!"); - return Reg.RegNum; - } - - const MCExpr *getImm() const { - assert(Kind == Immediate && "Invalid access!"); - return Imm.Val; - } - - const MCExpr *getFslImm() const { - assert(Kind == Fsl && "Invalid access!"); - return FslImm.Val; - } - - unsigned getMemBase() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Base; - } - - const MCExpr* getMemOff() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Off; - } - - unsigned getMemOffReg() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.OffReg; - } - - bool isToken() const { return Kind == Token; } - bool isImm() const { return Kind == Immediate; } - bool isMem() const { return Kind == Memory; } - bool isFsl() const { return Kind == Fsl; } - bool isReg() const { return Kind == Register; } - - void addExpr(MCInst &Inst, const MCExpr *Expr) const { - // Add as immediates when possible. Null MCExpr = 0. - if (Expr == 0) - Inst.addOperand(MCOperand::CreateImm(0)); - else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) - Inst.addOperand(MCOperand::CreateImm(CE->getValue())); - else - Inst.addOperand(MCOperand::CreateExpr(Expr)); - } - - void addRegOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::CreateReg(getReg())); - } - - void addImmOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - addExpr(Inst, getImm()); - } - - void addFslOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - addExpr(Inst, getFslImm()); - } - - void addMemOperands(MCInst &Inst, unsigned N) const { - assert(N == 2 && "Invalid number of operands!"); - - Inst.addOperand(MCOperand::CreateReg(getMemBase())); - - unsigned RegOff = getMemOffReg(); - if (RegOff) - Inst.addOperand(MCOperand::CreateReg(RegOff)); - else - addExpr(Inst, getMemOff()); - } - - StringRef getToken() const { - assert(Kind == Token && "Invalid access!"); - return StringRef(Tok.Data, Tok.Length); - } - - virtual void print(raw_ostream &OS) const; - - static MBlazeOperand *CreateToken(StringRef Str, SMLoc S) { - MBlazeOperand *Op = new MBlazeOperand(Token); - Op->Tok.Data = Str.data(); - Op->Tok.Length = Str.size(); - Op->StartLoc = S; - Op->EndLoc = S; - return Op; - } - - static MBlazeOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { - MBlazeOperand *Op = new MBlazeOperand(Register); - Op->Reg.RegNum = RegNum; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } - - static MBlazeOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { - MBlazeOperand *Op = new MBlazeOperand(Immediate); - Op->Imm.Val = Val; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } - - static MBlazeOperand *CreateFslImm(const MCExpr *Val, SMLoc S, SMLoc E) { - MBlazeOperand *Op = new MBlazeOperand(Fsl); - Op->Imm.Val = Val; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } - - static MBlazeOperand *CreateMem(unsigned Base, const MCExpr *Off, SMLoc S, - SMLoc E) { - MBlazeOperand *Op = new MBlazeOperand(Memory); - Op->Mem.Base = Base; - Op->Mem.Off = Off; - Op->Mem.OffReg = 0; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } - - static MBlazeOperand *CreateMem(unsigned Base, unsigned Off, SMLoc S, - SMLoc E) { - MBlazeOperand *Op = new MBlazeOperand(Memory); - Op->Mem.Base = Base; - Op->Mem.OffReg = Off; - Op->Mem.Off = 0; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } -}; - -} // end anonymous namespace. - -void MBlazeOperand::print(raw_ostream &OS) const { - switch (Kind) { - case Immediate: - getImm()->print(OS); - break; - case Register: - OS << "<register R"; - OS << getMBlazeRegisterNumbering(getReg()) << ">"; - break; - case Token: - OS << "'" << getToken() << "'"; - break; - case Memory: { - OS << "<memory R"; - OS << getMBlazeRegisterNumbering(getMemBase()); - OS << ", "; - - unsigned RegOff = getMemOffReg(); - if (RegOff) - OS << "R" << getMBlazeRegisterNumbering(RegOff); - else - OS << getMemOff(); - OS << ">"; - } - break; - case Fsl: - getFslImm()->print(OS); - break; - } -} - -/// @name Auto-generated Match Functions -/// { - -static unsigned MatchRegisterName(StringRef Name); - -/// } -// -bool MBlazeAsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl<MCParsedAsmOperand*> &Operands, - MCStreamer &Out, unsigned &ErrorInfo, - bool MatchingInlineAsm) { - MCInst Inst; - switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, - MatchingInlineAsm)) { - default: break; - case Match_Success: - Out.EmitInstruction(Inst); - return false; - case Match_MissingFeature: - return Error(IDLoc, "instruction use requires an option to be enabled"); - case Match_MnemonicFail: - return Error(IDLoc, "unrecognized instruction mnemonic"); - case Match_InvalidOperand: { - SMLoc ErrorLoc = IDLoc; - if (ErrorInfo != ~0U) { - if (ErrorInfo >= Operands.size()) - return Error(IDLoc, "too few operands for instruction"); - - ErrorLoc = ((MBlazeOperand*)Operands[ErrorInfo])->getStartLoc(); - if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; - } - - return Error(ErrorLoc, "invalid operand for instruction"); - } - } - - llvm_unreachable("Implement any new match types added!"); -} - -MBlazeOperand *MBlazeAsmParser:: -ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - if (Operands.size() != 4) - return 0; - - MBlazeOperand &Base = *(MBlazeOperand*)Operands[2]; - MBlazeOperand &Offset = *(MBlazeOperand*)Operands[3]; - - SMLoc S = Base.getStartLoc(); - SMLoc O = Offset.getStartLoc(); - SMLoc E = Offset.getEndLoc(); - - if (!Base.isReg()) { - Error(S, "base address must be a register"); - return 0; - } - - if (!Offset.isReg() && !Offset.isImm()) { - Error(O, "offset must be a register or immediate"); - return 0; - } - - MBlazeOperand *Op; - if (Offset.isReg()) - Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getReg(), S, E); - else - Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getImm(), S, E); - - delete Operands.pop_back_val(); - delete Operands.pop_back_val(); - Operands.push_back(Op); - - return Op; -} - -bool MBlazeAsmParser::ParseRegister(unsigned &RegNo, - SMLoc &StartLoc, SMLoc &EndLoc) { - MBlazeOperand *Reg = ParseRegister(StartLoc, EndLoc); - if (!Reg) - return true; - RegNo = Reg->getReg(); - return false; -} - -MBlazeOperand *MBlazeAsmParser::ParseRegister() { - SMLoc S, E; - return ParseRegister(S, E); -} - -MBlazeOperand *MBlazeAsmParser::ParseRegister(SMLoc &StartLoc, SMLoc &EndLoc) { - StartLoc = Parser.getTok().getLoc(); - EndLoc = Parser.getTok().getEndLoc(); - - if (getLexer().getKind() != AsmToken::Identifier) - return 0; - - unsigned RegNo = MatchRegisterName(getLexer().getTok().getIdentifier()); - if (RegNo == 0) - return 0; - - getLexer().Lex(); - return MBlazeOperand::CreateReg(RegNo, StartLoc, EndLoc); -} - -static unsigned MatchFslRegister(StringRef String) { - if (!String.startswith("rfsl")) - return -1; - - unsigned regNum; - if (String.substr(4).getAsInteger(10,regNum)) - return -1; - - return regNum; -} - -MBlazeOperand *MBlazeAsmParser::ParseFsl() { - SMLoc S = Parser.getTok().getLoc(); - SMLoc E = Parser.getTok().getEndLoc(); - - switch (getLexer().getKind()) { - default: return 0; - case AsmToken::Identifier: - unsigned reg = MatchFslRegister(getLexer().getTok().getIdentifier()); - if (reg >= 16) - return 0; - - getLexer().Lex(); - const MCExpr *EVal = MCConstantExpr::Create(reg,getContext()); - return MBlazeOperand::CreateFslImm(EVal,S,E); - } -} - -MBlazeOperand *MBlazeAsmParser::ParseImmediate() { - SMLoc S = Parser.getTok().getLoc(); - SMLoc E = Parser.getTok().getEndLoc(); - - const MCExpr *EVal; - switch (getLexer().getKind()) { - default: return 0; - case AsmToken::LParen: - case AsmToken::Plus: - case AsmToken::Minus: - case AsmToken::Integer: - case AsmToken::Identifier: - if (getParser().parseExpression(EVal)) - return 0; - - return MBlazeOperand::CreateImm(EVal, S, E); - } -} - -MBlazeOperand *MBlazeAsmParser:: -ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - MBlazeOperand *Op; - - // Attempt to parse the next token as a register name - Op = ParseRegister(); - - // Attempt to parse the next token as an FSL immediate - if (!Op) - Op = ParseFsl(); - - // Attempt to parse the next token as an immediate - if (!Op) - Op = ParseImmediate(); - - // If the token could not be parsed then fail - if (!Op) { - Error(Parser.getTok().getLoc(), "unknown operand"); - return 0; - } - - // Push the parsed operand into the list of operands - Operands.push_back(Op); - return Op; -} - -/// Parse an mblaze instruction mnemonic followed by its operands. -bool MBlazeAsmParser:: -ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, - SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // The first operands is the token for the instruction name - size_t dotLoc = Name.find('.'); - Operands.push_back(MBlazeOperand::CreateToken(Name.substr(0,dotLoc),NameLoc)); - if (dotLoc < Name.size()) - Operands.push_back(MBlazeOperand::CreateToken(Name.substr(dotLoc),NameLoc)); - - // If there are no more operands then finish - if (getLexer().is(AsmToken::EndOfStatement)) - return false; - - // Parse the first operand - if (!ParseOperand(Operands)) - return true; - - while (getLexer().isNot(AsmToken::EndOfStatement) && - getLexer().is(AsmToken::Comma)) { - // Consume the comma token - getLexer().Lex(); - - // Parse the next operand - if (!ParseOperand(Operands)) - return true; - } - - // If the instruction requires a memory operand then we need to - // replace the last two operands (base+offset) with a single - // memory operand. - if (Name.startswith("lw") || Name.startswith("sw") || - Name.startswith("lh") || Name.startswith("sh") || - Name.startswith("lb") || Name.startswith("sb")) - return (ParseMemory(Operands) == NULL); - - return false; -} - -/// ParseDirective parses the MBlaze specific directives -bool MBlazeAsmParser::ParseDirective(AsmToken DirectiveID) { - StringRef IDVal = DirectiveID.getIdentifier(); - if (IDVal == ".word") - return ParseDirectiveWord(2, DirectiveID.getLoc()); - return true; -} - -/// ParseDirectiveWord -/// ::= .word [ expression (, expression)* ] -bool MBlazeAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { - if (getLexer().isNot(AsmToken::EndOfStatement)) { - for (;;) { - const MCExpr *Value; - if (getParser().parseExpression(Value)) - return true; - - getParser().getStreamer().EmitValue(Value, Size); - - if (getLexer().is(AsmToken::EndOfStatement)) - break; - - // FIXME: Improve diagnostic. - if (getLexer().isNot(AsmToken::Comma)) - return Error(L, "unexpected token in directive"); - Parser.Lex(); - } - } - - Parser.Lex(); - return false; -} - -/// Force static initialization. -extern "C" void LLVMInitializeMBlazeAsmParser() { - RegisterMCAsmParser<MBlazeAsmParser> X(TheMBlazeTarget); -} - -#define GET_REGISTER_MATCHER -#define GET_MATCHER_IMPLEMENTATION -#include "MBlazeGenAsmMatcher.inc" diff --git a/lib/Target/MBlaze/AsmParser/Makefile b/lib/Target/MBlaze/AsmParser/Makefile deleted file mode 100644 index 1e68766..0000000 --- a/lib/Target/MBlaze/AsmParser/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -##===- lib/Target/MBlaze/AsmParser/Makefile ----------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -LIBRARYNAME = LLVMMBlazeAsmParser - -# Hack: we need to include 'main' MBlaze target directory for private headers -CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common diff --git a/lib/Target/MBlaze/CMakeLists.txt b/lib/Target/MBlaze/CMakeLists.txt deleted file mode 100644 index 91a41f3..0000000 --- a/lib/Target/MBlaze/CMakeLists.txt +++ /dev/null @@ -1,37 +0,0 @@ -set(LLVM_TARGET_DEFINITIONS MBlaze.td) - -tablegen(LLVM MBlazeGenRegisterInfo.inc -gen-register-info) -tablegen(LLVM MBlazeGenInstrInfo.inc -gen-instr-info) -tablegen(LLVM MBlazeGenCodeEmitter.inc -gen-emitter) -tablegen(LLVM MBlazeGenAsmWriter.inc -gen-asm-writer) -tablegen(LLVM MBlazeGenAsmMatcher.inc -gen-asm-matcher) -tablegen(LLVM MBlazeGenDAGISel.inc -gen-dag-isel) -tablegen(LLVM MBlazeGenCallingConv.inc -gen-callingconv) -tablegen(LLVM MBlazeGenSubtargetInfo.inc -gen-subtarget) -tablegen(LLVM MBlazeGenIntrinsics.inc -gen-tgt-intrinsic) -add_public_tablegen_target(MBlazeCommonTableGen) - -add_llvm_target(MBlazeCodeGen - MBlazeDelaySlotFiller.cpp - MBlazeInstrInfo.cpp - MBlazeISelDAGToDAG.cpp - MBlazeISelLowering.cpp - MBlazeFrameLowering.cpp - MBlazeMachineFunction.cpp - MBlazeRegisterInfo.cpp - MBlazeSubtarget.cpp - MBlazeTargetMachine.cpp - MBlazeTargetObjectFile.cpp - MBlazeIntrinsicInfo.cpp - MBlazeSelectionDAGInfo.cpp - MBlazeAsmPrinter.cpp - MBlazeMCInstLower.cpp - ) - -add_dependencies(LLVMMBlazeCodeGen intrinsics_gen) - -add_subdirectory(AsmParser) -add_subdirectory(Disassembler) -add_subdirectory(InstPrinter) -add_subdirectory(TargetInfo) -add_subdirectory(MCTargetDesc) diff --git a/lib/Target/MBlaze/Disassembler/CMakeLists.txt b/lib/Target/MBlaze/Disassembler/CMakeLists.txt deleted file mode 100644 index be2dce1..0000000 --- a/lib/Target/MBlaze/Disassembler/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. - ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - -add_llvm_library(LLVMMBlazeDisassembler - MBlazeDisassembler.cpp - ) - -# workaround for hanging compilation on MSVC9 and 10 -if( MSVC_VERSION EQUAL 1500 OR MSVC_VERSION EQUAL 1600 ) -set_property( - SOURCE MBlazeDisassembler.cpp - PROPERTY COMPILE_FLAGS "/Od" - ) -endif() - -add_dependencies(LLVMMBlazeDisassembler MBlazeCommonTableGen) diff --git a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt b/lib/Target/MBlaze/Disassembler/LLVMBuild.txt deleted file mode 100644 index 28dd9dc..0000000 --- a/lib/Target/MBlaze/Disassembler/LLVMBuild.txt +++ /dev/null @@ -1,23 +0,0 @@ -;===- ./lib/Target/MBlaze/Disassembler/LLVMBuild.txt -----------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[component_0] -type = Library -name = MBlazeDisassembler -parent = MBlaze -required_libraries = MBlazeDesc MBlazeInfo MC Support -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp deleted file mode 100644 index 0acfb3e..0000000 --- a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp +++ /dev/null @@ -1,718 +0,0 @@ -//===-- MBlazeDisassembler.cpp - Disassembler for MicroBlaze -------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is part of the MBlaze Disassembler. It contains code to translate -// the data produced by the decoder into MCInsts. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeDisassembler.h" -#include "MBlaze.h" -#include "llvm/MC/MCDisassembler.h" -#include "llvm/MC/MCInst.h" -#include "llvm/MC/MCInstrDesc.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/MemoryObject.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/raw_ostream.h" - -// #include "MBlazeGenDecoderTables.inc" -// #include "MBlazeGenRegisterNames.inc" - -namespace llvm { -extern const MCInstrDesc MBlazeInsts[]; -} - -using namespace llvm; - -const uint16_t UNSUPPORTED = -1; - -static const uint16_t mblazeBinary2Opcode[] = { - MBlaze::ADD, MBlaze::RSUB, MBlaze::ADDC, MBlaze::RSUBC, //00,01,02,03 - MBlaze::ADDK, MBlaze::RSUBK, MBlaze::ADDKC, MBlaze::RSUBKC, //04,05,06,07 - MBlaze::ADDI, MBlaze::RSUBI, MBlaze::ADDIC, MBlaze::RSUBIC, //08,09,0A,0B - MBlaze::ADDIK, MBlaze::RSUBIK, MBlaze::ADDIKC, MBlaze::RSUBIKC, //0C,0D,0E,0F - - MBlaze::MUL, MBlaze::BSRL, MBlaze::IDIV, MBlaze::GETD, //10,11,12,13 - UNSUPPORTED, UNSUPPORTED, MBlaze::FADD, UNSUPPORTED, //14,15,16,17 - MBlaze::MULI, MBlaze::BSRLI, UNSUPPORTED, MBlaze::GET, //18,19,1A,1B - UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, //1C,1D,1E,1F - - MBlaze::OR, MBlaze::AND, MBlaze::XOR, MBlaze::ANDN, //20,21,22,23 - MBlaze::SEXT8, MBlaze::MFS, MBlaze::BR, MBlaze::BEQ, //24,25,26,27 - MBlaze::ORI, MBlaze::ANDI, MBlaze::XORI, MBlaze::ANDNI, //28,29,2A,2B - MBlaze::IMM, MBlaze::RTSD, MBlaze::BRI, MBlaze::BEQI, //2C,2D,2E,2F - - MBlaze::LBU, MBlaze::LHU, MBlaze::LW, UNSUPPORTED, //30,31,32,33 - MBlaze::SB, MBlaze::SH, MBlaze::SW, UNSUPPORTED, //34,35,36,37 - MBlaze::LBUI, MBlaze::LHUI, MBlaze::LWI, UNSUPPORTED, //38,39,3A,3B - MBlaze::SBI, MBlaze::SHI, MBlaze::SWI, UNSUPPORTED, //3C,3D,3E,3F -}; - -static unsigned getRD(uint32_t insn) { - if (!isMBlazeRegister((insn>>21)&0x1F)) - return UNSUPPORTED; - return getMBlazeRegisterFromNumbering((insn>>21)&0x1F); -} - -static unsigned getRA(uint32_t insn) { - if (!getMBlazeRegisterFromNumbering((insn>>16)&0x1F)) - return UNSUPPORTED; - return getMBlazeRegisterFromNumbering((insn>>16)&0x1F); -} - -static unsigned getRB(uint32_t insn) { - if (!getMBlazeRegisterFromNumbering((insn>>11)&0x1F)) - return UNSUPPORTED; - return getMBlazeRegisterFromNumbering((insn>>11)&0x1F); -} - -static int64_t getRS(uint32_t insn) { - if (!isSpecialMBlazeRegister(insn&0x3FFF)) - return UNSUPPORTED; - return getSpecialMBlazeRegisterFromNumbering(insn&0x3FFF); -} - -static int64_t getIMM(uint32_t insn) { - int16_t val = (insn & 0xFFFF); - return val; -} - -static int64_t getSHT(uint32_t insn) { - int16_t val = (insn & 0x1F); - return val; -} - -static unsigned getFLAGS(int32_t insn) { - return (insn & 0x7FF); -} - -static int64_t getFSL(uint32_t insn) { - int16_t val = (insn & 0xF); - return val; -} - -static unsigned decodeMUL(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0: return MBlaze::MUL; - case 1: return MBlaze::MULH; - case 2: return MBlaze::MULHSU; - case 3: return MBlaze::MULHU; - } -} - -static unsigned decodeSEXT(uint32_t insn) { - switch (insn&0x7FF) { - default: return UNSUPPORTED; - case 0x60: return MBlaze::SEXT8; - case 0x68: return MBlaze::WIC; - case 0x64: return MBlaze::WDC; - case 0x66: return MBlaze::WDCC; - case 0x74: return MBlaze::WDCF; - case 0x61: return MBlaze::SEXT16; - case 0x41: return MBlaze::SRL; - case 0x21: return MBlaze::SRC; - case 0x01: return MBlaze::SRA; - case 0xE0: return MBlaze::CLZ; - } -} - -static unsigned decodeBEQ(uint32_t insn) { - switch ((insn>>21)&0x1F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::BEQ; - case 0x10: return MBlaze::BEQD; - case 0x05: return MBlaze::BGE; - case 0x15: return MBlaze::BGED; - case 0x04: return MBlaze::BGT; - case 0x14: return MBlaze::BGTD; - case 0x03: return MBlaze::BLE; - case 0x13: return MBlaze::BLED; - case 0x02: return MBlaze::BLT; - case 0x12: return MBlaze::BLTD; - case 0x01: return MBlaze::BNE; - case 0x11: return MBlaze::BNED; - } -} - -static unsigned decodeBEQI(uint32_t insn) { - switch ((insn>>21)&0x1F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::BEQI; - case 0x10: return MBlaze::BEQID; - case 0x05: return MBlaze::BGEI; - case 0x15: return MBlaze::BGEID; - case 0x04: return MBlaze::BGTI; - case 0x14: return MBlaze::BGTID; - case 0x03: return MBlaze::BLEI; - case 0x13: return MBlaze::BLEID; - case 0x02: return MBlaze::BLTI; - case 0x12: return MBlaze::BLTID; - case 0x01: return MBlaze::BNEI; - case 0x11: return MBlaze::BNEID; - } -} - -static unsigned decodeBR(uint32_t insn) { - switch ((insn>>16)&0x1F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::BR; - case 0x08: return MBlaze::BRA; - case 0x0C: return MBlaze::BRK; - case 0x10: return MBlaze::BRD; - case 0x14: return MBlaze::BRLD; - case 0x18: return MBlaze::BRAD; - case 0x1C: return MBlaze::BRALD; - } -} - -static unsigned decodeBRI(uint32_t insn) { - switch (insn&0x3FFFFFF) { - default: break; - case 0x0020004: return MBlaze::IDMEMBAR; - case 0x0220004: return MBlaze::DMEMBAR; - case 0x0420004: return MBlaze::IMEMBAR; - } - - switch ((insn>>16)&0x1F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::BRI; - case 0x08: return MBlaze::BRAI; - case 0x0C: return MBlaze::BRKI; - case 0x10: return MBlaze::BRID; - case 0x14: return MBlaze::BRLID; - case 0x18: return MBlaze::BRAID; - case 0x1C: return MBlaze::BRALID; - } -} - -static unsigned decodeBSRL(uint32_t insn) { - switch ((insn>>9)&0x3) { - default: return UNSUPPORTED; - case 0x2: return MBlaze::BSLL; - case 0x1: return MBlaze::BSRA; - case 0x0: return MBlaze::BSRL; - } -} - -static unsigned decodeBSRLI(uint32_t insn) { - switch ((insn>>9)&0x3) { - default: return UNSUPPORTED; - case 0x2: return MBlaze::BSLLI; - case 0x1: return MBlaze::BSRAI; - case 0x0: return MBlaze::BSRLI; - } -} - -static unsigned decodeRSUBK(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::RSUBK; - case 0x1: return MBlaze::CMP; - case 0x3: return MBlaze::CMPU; - } -} - -static unsigned decodeFADD(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0x000: return MBlaze::FADD; - case 0x080: return MBlaze::FRSUB; - case 0x100: return MBlaze::FMUL; - case 0x180: return MBlaze::FDIV; - case 0x200: return MBlaze::FCMP_UN; - case 0x210: return MBlaze::FCMP_LT; - case 0x220: return MBlaze::FCMP_EQ; - case 0x230: return MBlaze::FCMP_LE; - case 0x240: return MBlaze::FCMP_GT; - case 0x250: return MBlaze::FCMP_NE; - case 0x260: return MBlaze::FCMP_GE; - case 0x280: return MBlaze::FLT; - case 0x300: return MBlaze::FINT; - case 0x380: return MBlaze::FSQRT; - } -} - -static unsigned decodeGET(uint32_t insn) { - switch ((insn>>10)&0x3F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::GET; - case 0x01: return MBlaze::EGET; - case 0x02: return MBlaze::AGET; - case 0x03: return MBlaze::EAGET; - case 0x04: return MBlaze::TGET; - case 0x05: return MBlaze::TEGET; - case 0x06: return MBlaze::TAGET; - case 0x07: return MBlaze::TEAGET; - case 0x08: return MBlaze::CGET; - case 0x09: return MBlaze::ECGET; - case 0x0A: return MBlaze::CAGET; - case 0x0B: return MBlaze::ECAGET; - case 0x0C: return MBlaze::TCGET; - case 0x0D: return MBlaze::TECGET; - case 0x0E: return MBlaze::TCAGET; - case 0x0F: return MBlaze::TECAGET; - case 0x10: return MBlaze::NGET; - case 0x11: return MBlaze::NEGET; - case 0x12: return MBlaze::NAGET; - case 0x13: return MBlaze::NEAGET; - case 0x14: return MBlaze::TNGET; - case 0x15: return MBlaze::TNEGET; - case 0x16: return MBlaze::TNAGET; - case 0x17: return MBlaze::TNEAGET; - case 0x18: return MBlaze::NCGET; - case 0x19: return MBlaze::NECGET; - case 0x1A: return MBlaze::NCAGET; - case 0x1B: return MBlaze::NECAGET; - case 0x1C: return MBlaze::TNCGET; - case 0x1D: return MBlaze::TNECGET; - case 0x1E: return MBlaze::TNCAGET; - case 0x1F: return MBlaze::TNECAGET; - case 0x20: return MBlaze::PUT; - case 0x22: return MBlaze::APUT; - case 0x24: return MBlaze::TPUT; - case 0x26: return MBlaze::TAPUT; - case 0x28: return MBlaze::CPUT; - case 0x2A: return MBlaze::CAPUT; - case 0x2C: return MBlaze::TCPUT; - case 0x2E: return MBlaze::TCAPUT; - case 0x30: return MBlaze::NPUT; - case 0x32: return MBlaze::NAPUT; - case 0x34: return MBlaze::TNPUT; - case 0x36: return MBlaze::TNAPUT; - case 0x38: return MBlaze::NCPUT; - case 0x3A: return MBlaze::NCAPUT; - case 0x3C: return MBlaze::TNCPUT; - case 0x3E: return MBlaze::TNCAPUT; - } -} - -static unsigned decodeGETD(uint32_t insn) { - switch ((insn>>5)&0x3F) { - default: return UNSUPPORTED; - case 0x00: return MBlaze::GETD; - case 0x01: return MBlaze::EGETD; - case 0x02: return MBlaze::AGETD; - case 0x03: return MBlaze::EAGETD; - case 0x04: return MBlaze::TGETD; - case 0x05: return MBlaze::TEGETD; - case 0x06: return MBlaze::TAGETD; - case 0x07: return MBlaze::TEAGETD; - case 0x08: return MBlaze::CGETD; - case 0x09: return MBlaze::ECGETD; - case 0x0A: return MBlaze::CAGETD; - case 0x0B: return MBlaze::ECAGETD; - case 0x0C: return MBlaze::TCGETD; - case 0x0D: return MBlaze::TECGETD; - case 0x0E: return MBlaze::TCAGETD; - case 0x0F: return MBlaze::TECAGETD; - case 0x10: return MBlaze::NGETD; - case 0x11: return MBlaze::NEGETD; - case 0x12: return MBlaze::NAGETD; - case 0x13: return MBlaze::NEAGETD; - case 0x14: return MBlaze::TNGETD; - case 0x15: return MBlaze::TNEGETD; - case 0x16: return MBlaze::TNAGETD; - case 0x17: return MBlaze::TNEAGETD; - case 0x18: return MBlaze::NCGETD; - case 0x19: return MBlaze::NECGETD; - case 0x1A: return MBlaze::NCAGETD; - case 0x1B: return MBlaze::NECAGETD; - case 0x1C: return MBlaze::TNCGETD; - case 0x1D: return MBlaze::TNECGETD; - case 0x1E: return MBlaze::TNCAGETD; - case 0x1F: return MBlaze::TNECAGETD; - case 0x20: return MBlaze::PUTD; - case 0x22: return MBlaze::APUTD; - case 0x24: return MBlaze::TPUTD; - case 0x26: return MBlaze::TAPUTD; - case 0x28: return MBlaze::CPUTD; - case 0x2A: return MBlaze::CAPUTD; - case 0x2C: return MBlaze::TCPUTD; - case 0x2E: return MBlaze::TCAPUTD; - case 0x30: return MBlaze::NPUTD; - case 0x32: return MBlaze::NAPUTD; - case 0x34: return MBlaze::TNPUTD; - case 0x36: return MBlaze::TNAPUTD; - case 0x38: return MBlaze::NCPUTD; - case 0x3A: return MBlaze::NCAPUTD; - case 0x3C: return MBlaze::TNCPUTD; - case 0x3E: return MBlaze::TNCAPUTD; - } -} - -static unsigned decodeIDIV(uint32_t insn) { - switch (insn&0x3) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::IDIV; - case 0x2: return MBlaze::IDIVU; - } -} - -static unsigned decodeLBU(uint32_t insn) { - switch ((insn>>9)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::LBU; - case 0x1: return MBlaze::LBUR; - } -} - -static unsigned decodeLHU(uint32_t insn) { - switch ((insn>>9)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::LHU; - case 0x1: return MBlaze::LHUR; - } -} - -static unsigned decodeLW(uint32_t insn) { - switch ((insn>>9)&0x3) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::LW; - case 0x1: return MBlaze::LWR; - case 0x2: return MBlaze::LWX; - } -} - -static unsigned decodeSB(uint32_t insn) { - switch ((insn>>9)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::SB; - case 0x1: return MBlaze::SBR; - } -} - -static unsigned decodeSH(uint32_t insn) { - switch ((insn>>9)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::SH; - case 0x1: return MBlaze::SHR; - } -} - -static unsigned decodeSW(uint32_t insn) { - switch ((insn>>9)&0x3) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::SW; - case 0x1: return MBlaze::SWR; - case 0x2: return MBlaze::SWX; - } -} - -static unsigned decodeMFS(uint32_t insn) { - switch ((insn>>15)&0x1) { - default: return UNSUPPORTED; - case 0x0: - switch ((insn>>16)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::MSRSET; - case 0x1: return MBlaze::MSRCLR; - } - case 0x1: - switch ((insn>>14)&0x1) { - default: return UNSUPPORTED; - case 0x0: return MBlaze::MFS; - case 0x1: return MBlaze::MTS; - } - } -} - -static unsigned decodeOR(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0x000: return MBlaze::OR; - case 0x400: return MBlaze::PCMPBF; - } -} - -static unsigned decodeXOR(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0x000: return MBlaze::XOR; - case 0x400: return MBlaze::PCMPEQ; - } -} - -static unsigned decodeANDN(uint32_t insn) { - switch (getFLAGS(insn)) { - default: return UNSUPPORTED; - case 0x000: return MBlaze::ANDN; - case 0x400: return MBlaze::PCMPNE; - } -} - -static unsigned decodeRTSD(uint32_t insn) { - switch ((insn>>21)&0x1F) { - default: return UNSUPPORTED; - case 0x10: return MBlaze::RTSD; - case 0x11: return MBlaze::RTID; - case 0x12: return MBlaze::RTBD; - case 0x14: return MBlaze::RTED; - } -} - -static unsigned getOPCODE(uint32_t insn) { - unsigned opcode = mblazeBinary2Opcode[ (insn>>26)&0x3F ]; - switch (opcode) { - case MBlaze::MUL: return decodeMUL(insn); - case MBlaze::SEXT8: return decodeSEXT(insn); - case MBlaze::BEQ: return decodeBEQ(insn); - case MBlaze::BEQI: return decodeBEQI(insn); - case MBlaze::BR: return decodeBR(insn); - case MBlaze::BRI: return decodeBRI(insn); - case MBlaze::BSRL: return decodeBSRL(insn); - case MBlaze::BSRLI: return decodeBSRLI(insn); - case MBlaze::RSUBK: return decodeRSUBK(insn); - case MBlaze::FADD: return decodeFADD(insn); - case MBlaze::GET: return decodeGET(insn); - case MBlaze::GETD: return decodeGETD(insn); - case MBlaze::IDIV: return decodeIDIV(insn); - case MBlaze::LBU: return decodeLBU(insn); - case MBlaze::LHU: return decodeLHU(insn); - case MBlaze::LW: return decodeLW(insn); - case MBlaze::SB: return decodeSB(insn); - case MBlaze::SH: return decodeSH(insn); - case MBlaze::SW: return decodeSW(insn); - case MBlaze::MFS: return decodeMFS(insn); - case MBlaze::OR: return decodeOR(insn); - case MBlaze::XOR: return decodeXOR(insn); - case MBlaze::ANDN: return decodeANDN(insn); - case MBlaze::RTSD: return decodeRTSD(insn); - default: return opcode; - } -} - -// -// Public interface for the disassembler -// - -MCDisassembler::DecodeStatus MBlazeDisassembler::getInstruction(MCInst &instr, - uint64_t &size, - const MemoryObject ®ion, - uint64_t address, - raw_ostream &vStream, - raw_ostream &cStream) const { - // The machine instruction. - uint32_t insn; - uint8_t bytes[4]; - - // By default we consume 1 byte on failure - size = 1; - - // We want to read exactly 4 bytes of data. - if (region.readBytes(address, 4, bytes) == -1) - return Fail; - - // Encoded as a big-endian 32-bit word in the stream. - insn = (bytes[0]<<24) | (bytes[1]<<16) | (bytes[2]<< 8) | (bytes[3]<<0); - - // Get the MCInst opcode from the binary instruction and make sure - // that it is a valid instruction. - unsigned opcode = getOPCODE(insn); - if (opcode == UNSUPPORTED) - return Fail; - - instr.setOpcode(opcode); - - unsigned RD = getRD(insn); - unsigned RA = getRA(insn); - unsigned RB = getRB(insn); - unsigned RS = getRS(insn); - - uint64_t tsFlags = MBlazeInsts[opcode].TSFlags; - switch ((tsFlags & MBlazeII::FormMask)) { - default: - return Fail; - - case MBlazeII::FC: - break; - - case MBlazeII::FRRRR: - if (RD == UNSUPPORTED || RA == UNSUPPORTED || RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RB)); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - - case MBlazeII::FRRR: - if (RD == UNSUPPORTED || RA == UNSUPPORTED || RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RA)); - instr.addOperand(MCOperand::CreateReg(RB)); - break; - - case MBlazeII::FRR: - if (RD == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - - case MBlazeII::FRI: - switch (opcode) { - default: - return Fail; - case MBlaze::MFS: - if (RD == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateImm(insn&0x3FFF)); - break; - case MBlaze::MTS: - if (RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateImm(insn&0x3FFF)); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - case MBlaze::MSRSET: - case MBlaze::MSRCLR: - if (RD == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateImm(insn&0x7FFF)); - break; - } - break; - - case MBlazeII::FRRI: - if (RD == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RA)); - switch (opcode) { - default: - instr.addOperand(MCOperand::CreateImm(getIMM(insn))); - break; - case MBlaze::BSRLI: - case MBlaze::BSRAI: - case MBlaze::BSLLI: - instr.addOperand(MCOperand::CreateImm(insn&0x1F)); - break; - } - break; - - case MBlazeII::FCRR: - if (RA == UNSUPPORTED || RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RA)); - instr.addOperand(MCOperand::CreateReg(RB)); - break; - - case MBlazeII::FCRI: - if (RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RA)); - instr.addOperand(MCOperand::CreateImm(getIMM(insn))); - break; - - case MBlazeII::FRCR: - if (RD == UNSUPPORTED || RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RB)); - break; - - case MBlazeII::FRCI: - if (RD == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateImm(getIMM(insn))); - break; - - case MBlazeII::FCCR: - if (RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RB)); - break; - - case MBlazeII::FCCI: - instr.addOperand(MCOperand::CreateImm(getIMM(insn))); - break; - - case MBlazeII::FRRCI: - if (RD == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RA)); - instr.addOperand(MCOperand::CreateImm(getSHT(insn))); - break; - - case MBlazeII::FRRC: - if (RD == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - - case MBlazeII::FRCX: - if (RD == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateImm(getFSL(insn))); - break; - - case MBlazeII::FRCS: - if (RD == UNSUPPORTED || RS == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateReg(RS)); - break; - - case MBlazeII::FCRCS: - if (RS == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RS)); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - - case MBlazeII::FCRCX: - if (RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RA)); - instr.addOperand(MCOperand::CreateImm(getFSL(insn))); - break; - - case MBlazeII::FCX: - instr.addOperand(MCOperand::CreateImm(getFSL(insn))); - break; - - case MBlazeII::FCR: - if (RB == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RB)); - break; - - case MBlazeII::FRIR: - if (RD == UNSUPPORTED || RA == UNSUPPORTED) - return Fail; - instr.addOperand(MCOperand::CreateReg(RD)); - instr.addOperand(MCOperand::CreateImm(getIMM(insn))); - instr.addOperand(MCOperand::CreateReg(RA)); - break; - } - - // We always consume 4 bytes of data on success - size = 4; - - return Success; -} - -static MCDisassembler *createMBlazeDisassembler(const Target &T, - const MCSubtargetInfo &STI) { - return new MBlazeDisassembler(STI); -} - -extern "C" void LLVMInitializeMBlazeDisassembler() { - // Register the disassembler. - TargetRegistry::RegisterMCDisassembler(TheMBlazeTarget, - createMBlazeDisassembler); -} diff --git a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h b/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h deleted file mode 100644 index b8ff8f6..0000000 --- a/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h +++ /dev/null @@ -1,49 +0,0 @@ -//===-- MBlazeDisassembler.h - Disassembler for MicroBlaze -----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is part of the MBlaze Disassembler. It it the header for -// MBlazeDisassembler, a subclass of MCDisassembler. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZEDISASSEMBLER_H -#define MBLAZEDISASSEMBLER_H - -#include "llvm/MC/MCDisassembler.h" - -namespace llvm { - -class MCInst; -class MemoryObject; -class raw_ostream; - -/// MBlazeDisassembler - Disassembler for all MBlaze platforms. -class MBlazeDisassembler : public MCDisassembler { -public: - /// Constructor - Initializes the disassembler. - /// - MBlazeDisassembler(const MCSubtargetInfo &STI) : - MCDisassembler(STI) { - } - - ~MBlazeDisassembler() { - } - - /// getInstruction - See MCDisassembler. - MCDisassembler::DecodeStatus getInstruction(MCInst &instr, - uint64_t &size, - const MemoryObject ®ion, - uint64_t address, - raw_ostream &vStream, - raw_ostream &cStream) const; -}; - -} // namespace llvm - -#endif diff --git a/lib/Target/MBlaze/Disassembler/Makefile b/lib/Target/MBlaze/Disassembler/Makefile deleted file mode 100644 index 0530b32..0000000 --- a/lib/Target/MBlaze/Disassembler/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -##===- lib/Target/MBlaze/Disassembler/Makefile -------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## - -LEVEL = ../../../.. -LIBRARYNAME = LLVMMBlazeDisassembler - -# Hack: we need to include 'main' MBlaze target directory to grab headers -CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common diff --git a/lib/Target/MBlaze/InstPrinter/CMakeLists.txt b/lib/Target/MBlaze/InstPrinter/CMakeLists.txt deleted file mode 100644 index 586e2d3..0000000 --- a/lib/Target/MBlaze/InstPrinter/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. - ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - -add_llvm_library(LLVMMBlazeAsmPrinter - MBlazeInstPrinter.cpp - ) - -add_dependencies(LLVMMBlazeAsmPrinter MBlazeCommonTableGen) diff --git a/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt b/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt deleted file mode 100644 index 3a21a05..0000000 --- a/lib/Target/MBlaze/InstPrinter/LLVMBuild.txt +++ /dev/null @@ -1,23 +0,0 @@ -;===- ./lib/Target/MBlaze/InstPrinter/LLVMBuild.txt ------------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[component_0] -type = Library -name = MBlazeAsmPrinter -parent = MBlaze -required_libraries = MC Support -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp b/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp deleted file mode 100644 index fc2b3d5..0000000 --- a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp +++ /dev/null @@ -1,71 +0,0 @@ -//===-- MBlazeInstPrinter.cpp - Convert MBlaze MCInst to assembly syntax --===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This class prints an MBlaze MCInst to a .s file. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "asm-printer" -#include "MBlazeInstPrinter.h" -#include "MBlaze.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCInst.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/FormattedStream.h" -using namespace llvm; - - -// Include the auto-generated portion of the assembly writer. -#include "MBlazeGenAsmWriter.inc" - -void MBlazeInstPrinter::printInst(const MCInst *MI, raw_ostream &O, - StringRef Annot) { - printInstruction(MI, O); - printAnnotation(O, Annot); -} - -void MBlazeInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O, const char *Modifier) { - assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported"); - const MCOperand &Op = MI->getOperand(OpNo); - if (Op.isReg()) { - O << getRegisterName(Op.getReg()); - } else if (Op.isImm()) { - O << (int32_t)Op.getImm(); - } else { - assert(Op.isExpr() && "unknown operand kind in printOperand"); - O << *Op.getExpr(); - } -} - -void MBlazeInstPrinter::printFSLImm(const MCInst *MI, int OpNo, - raw_ostream &O) { - const MCOperand &MO = MI->getOperand(OpNo); - if (MO.isImm()) - O << "rfsl" << MO.getImm(); - else - printOperand(MI, OpNo, O, NULL); -} - -void MBlazeInstPrinter::printUnsignedImm(const MCInst *MI, int OpNo, - raw_ostream &O) { - const MCOperand &MO = MI->getOperand(OpNo); - if (MO.isImm()) - O << (uint32_t)MO.getImm(); - else - printOperand(MI, OpNo, O, NULL); -} - -void MBlazeInstPrinter::printMemOperand(const MCInst *MI, int OpNo, - raw_ostream &O, const char *Modifier) { - printOperand(MI, OpNo, O, NULL); - O << ", "; - printOperand(MI, OpNo+1, O, NULL); -} diff --git a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h b/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h deleted file mode 100644 index 51ba7c3..0000000 --- a/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h +++ /dev/null @@ -1,43 +0,0 @@ -//= MBlazeInstPrinter.h - Convert MBlaze MCInst to assembly syntax -*- C++ -*-// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This class prints a MBlaze MCInst to a .s file. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZEINSTPRINTER_H -#define MBLAZEINSTPRINTER_H - -#include "llvm/MC/MCInstPrinter.h" - -namespace llvm { - class MCOperand; - - class MBlazeInstPrinter : public MCInstPrinter { - public: - MBlazeInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, - const MCRegisterInfo &MRI) - : MCInstPrinter(MAI, MII, MRI) {} - - virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot); - - // Autogenerated by tblgen. - void printInstruction(const MCInst *MI, raw_ostream &O); - static const char *getRegisterName(unsigned RegNo); - - void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O, - const char *Modifier = 0); - void printFSLImm(const MCInst *MI, int OpNo, raw_ostream &O); - void printUnsignedImm(const MCInst *MI, int OpNo, raw_ostream &O); - void printMemOperand(const MCInst *MI, int OpNo,raw_ostream &O, - const char *Modifier = 0); - }; -} - -#endif diff --git a/lib/Target/MBlaze/InstPrinter/Makefile b/lib/Target/MBlaze/InstPrinter/Makefile deleted file mode 100644 index 9fb6e86..0000000 --- a/lib/Target/MBlaze/InstPrinter/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -##===- lib/Target/MBlaze/AsmPrinter/Makefile ---------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -LIBRARYNAME = LLVMMBlazeAsmPrinter - -# Hack: we need to include 'main' MBlaze target directory to grab -# private headers -CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common diff --git a/lib/Target/MBlaze/LLVMBuild.txt b/lib/Target/MBlaze/LLVMBuild.txt deleted file mode 100644 index 0b29007..0000000 --- a/lib/Target/MBlaze/LLVMBuild.txt +++ /dev/null @@ -1,34 +0,0 @@ -;===- ./lib/Target/MBlaze/LLVMBuild.txt ------------------------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[common] -subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo - -[component_0] -type = TargetGroup -name = MBlaze -parent = Target -has_asmparser = 1 -has_asmprinter = 1 -has_disassembler = 1 - -[component_1] -type = Library -name = MBlazeCodeGen -parent = MBlaze -required_libraries = AsmPrinter CodeGen Core MBlazeAsmPrinter MBlazeDesc MBlazeInfo MC SelectionDAG Support Target -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/MBlaze.h b/lib/Target/MBlaze/MBlaze.h deleted file mode 100644 index 1399b85..0000000 --- a/lib/Target/MBlaze/MBlaze.h +++ /dev/null @@ -1,32 +0,0 @@ -//===-- MBlaze.h - Top-level interface for MBlaze ---------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the entry points for global functions defined in -// the LLVM MBlaze back-end. -// -//===----------------------------------------------------------------------===// - -#ifndef TARGET_MBLAZE_H -#define TARGET_MBLAZE_H - -#include "MCTargetDesc/MBlazeBaseInfo.h" -#include "MCTargetDesc/MBlazeMCTargetDesc.h" -#include "llvm/Target/TargetMachine.h" - -namespace llvm { - class MBlazeTargetMachine; - class FunctionPass; - class MachineCodeEmitter; - - FunctionPass *createMBlazeISelDag(MBlazeTargetMachine &TM); - FunctionPass *createMBlazeDelaySlotFillerPass(MBlazeTargetMachine &TM); - -} // end namespace llvm; - -#endif diff --git a/lib/Target/MBlaze/MBlaze.td b/lib/Target/MBlaze/MBlaze.td deleted file mode 100644 index c288855..0000000 --- a/lib/Target/MBlaze/MBlaze.td +++ /dev/null @@ -1,73 +0,0 @@ -//===-- MBlaze.td - Describe the MBlaze Target Machine -----*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// This is the top level entry point for the MBlaze target. -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Target-independent interfaces -//===----------------------------------------------------------------------===// - -include "llvm/Target/Target.td" - -//===----------------------------------------------------------------------===// -// Register File, Calling Conv, Instruction Descriptions -//===----------------------------------------------------------------------===// - -include "MBlazeRegisterInfo.td" -include "MBlazeSchedule.td" -include "MBlazeIntrinsics.td" -include "MBlazeInstrInfo.td" -include "MBlazeCallingConv.td" - -def MBlazeInstrInfo : InstrInfo; - -//===----------------------------------------------------------------------===// -// Microblaze Subtarget features // -//===----------------------------------------------------------------------===// - -def FeatureBarrel : SubtargetFeature<"barrel", "HasBarrel", "true", - "Implements barrel shifter">; -def FeatureDiv : SubtargetFeature<"div", "HasDiv", "true", - "Implements hardware divider">; -def FeatureMul : SubtargetFeature<"mul", "HasMul", "true", - "Implements hardware multiplier">; -def FeaturePatCmp : SubtargetFeature<"patcmp", "HasPatCmp", "true", - "Implements pattern compare instruction">; -def FeatureFPU : SubtargetFeature<"fpu", "HasFPU", "true", - "Implements floating point unit">; -def FeatureMul64 : SubtargetFeature<"mul64", "HasMul64", "true", - "Implements multiplier with 64-bit result">; -def FeatureSqrt : SubtargetFeature<"sqrt", "HasSqrt", "true", - "Implements sqrt and floating point convert">; - -//===----------------------------------------------------------------------===// -// MBlaze processors supported. -//===----------------------------------------------------------------------===// - -def : Processor<"mblaze", NoItineraries, []>; -def : Processor<"mblaze3", MBlazePipe3Itineraries, []>; -def : Processor<"mblaze5", MBlazePipe5Itineraries, []>; - -//===----------------------------------------------------------------------===// -// Instruction Descriptions -//===----------------------------------------------------------------------===// - -def MBlazeAsmWriter : AsmWriter { - string AsmWriterClassName = "InstPrinter"; - bit isMCAsmWriter = 1; -} - -//===----------------------------------------------------------------------===// -// Target Declaration -//===----------------------------------------------------------------------===// - -def MBlaze : Target { - let InstructionSet = MBlazeInstrInfo; - let AssemblyWriters = [MBlazeAsmWriter]; -} diff --git a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp deleted file mode 100644 index 7dafaef..0000000 --- a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp +++ /dev/null @@ -1,326 +0,0 @@ -//===-- MBlazeAsmPrinter.cpp - MBlaze LLVM assembly writer ----------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains a printer that converts from our internal representation -// of machine-dependent LLVM code to GAS-format MBlaze assembly language. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-asm-printer" - -#include "MBlaze.h" -#include "InstPrinter/MBlazeInstPrinter.h" -#include "MBlazeInstrInfo.h" -#include "MBlazeMCInstLower.h" -#include "MBlazeMachineFunction.h" -#include "MBlazeSubtarget.h" -#include "MBlazeTargetMachine.h" -#include "llvm/CodeGen/AsmPrinter.h" -#include "llvm/CodeGen/MachineConstantPool.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Module.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCInst.h" -#include "llvm/MC/MCStreamer.h" -#include "llvm/MC/MCSymbol.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetLoweringObjectFile.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" -#include <cctype> - -using namespace llvm; - -namespace { - class MBlazeAsmPrinter : public AsmPrinter { - const MBlazeSubtarget *Subtarget; - public: - explicit MBlazeAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) - : AsmPrinter(TM, Streamer) { - Subtarget = &TM.getSubtarget<MBlazeSubtarget>(); - } - - virtual const char *getPassName() const { - return "MBlaze Assembly Printer"; - } - - void printSavedRegsBitmask(); - void emitFrameDirective(); - virtual void EmitFunctionBodyStart(); - virtual void EmitFunctionBodyEnd(); - virtual void EmitFunctionEntryLabel(); - - virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) - const; - - bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &O); - void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O); - void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O); - void printFSLImm(const MachineInstr *MI, int opNum, raw_ostream &O); - void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier = 0); - - void EmitInstruction(const MachineInstr *MI); - }; -} // end of anonymous namespace - -// #include "MBlazeGenAsmWriter.inc" - -//===----------------------------------------------------------------------===// -// -// MBlaze Asm Directives -// -// -- Frame directive "frame Stackpointer, Stacksize, RARegister" -// Describe the stack frame. -// -// -- Mask directives "mask bitmask, offset" -// Tells the assembler which registers are saved and where. -// bitmask - contain a little endian bitset indicating which registers are -// saved on function prologue (e.g. with a 0x80000000 mask, the -// assembler knows the register 31 (RA) is saved at prologue. -// offset - the position before stack pointer subtraction indicating where -// the first saved register on prologue is located. (e.g. with a -// -// Consider the following function prologue: -// -// .frame R19,48,R15 -// .mask 0xc0000000,-8 -// addiu R1, R1, -48 -// sw R15, 40(R1) -// sw R19, 36(R1) -// -// With a 0xc0000000 mask, the assembler knows the register 15 (R15) and -// 19 (R19) are saved at prologue. As the save order on prologue is from -// left to right, R15 is saved first. A -8 offset means that after the -// stack pointer subtration, the first register in the mask (R15) will be -// saved at address 48-8=40. -// -//===----------------------------------------------------------------------===// - -// Print a 32 bit hex number with all numbers. -static void printHex32(unsigned int Value, raw_ostream &O) { - O << "0x"; - for (int i = 7; i >= 0; i--) - O.write_hex((Value & (0xF << (i*4))) >> (i*4)); -} - -// Create a bitmask with all callee saved registers for CPU or Floating Point -// registers. For CPU registers consider RA, GP and FP for saving if necessary. -void MBlazeAsmPrinter::printSavedRegsBitmask() { - const TargetFrameLowering *TFI = TM.getFrameLowering(); - const TargetRegisterInfo &RI = *TM.getRegisterInfo(); - - // CPU Saved Registers Bitmasks - unsigned int CPUBitmask = 0; - - // Set the CPU Bitmasks - const MachineFrameInfo *MFI = MF->getFrameInfo(); - const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); - for (unsigned i = 0, e = CSI.size(); i != e; ++i) { - unsigned Reg = CSI[i].getReg(); - unsigned RegNum = getMBlazeRegisterNumbering(Reg); - if (MBlaze::GPRRegClass.contains(Reg)) - CPUBitmask |= (1 << RegNum); - } - - // Return Address and Frame registers must also be set in CPUBitmask. - if (TFI->hasFP(*MF)) - CPUBitmask |= (1 << getMBlazeRegisterNumbering(RI.getFrameRegister(*MF))); - - if (MFI->adjustsStack()) - CPUBitmask |= (1 << getMBlazeRegisterNumbering(RI.getRARegister())); - - // Print CPUBitmask - OutStreamer.EmitRawText("\t.mask\t0x" + Twine::utohexstr(CPUBitmask)); -} - -/// Frame Directive -void MBlazeAsmPrinter::emitFrameDirective() { - if (!OutStreamer.hasRawTextSupport()) - return; - - const TargetRegisterInfo &RI = *TM.getRegisterInfo(); - unsigned stkReg = RI.getFrameRegister(*MF); - unsigned retReg = RI.getRARegister(); - unsigned stkSze = MF->getFrameInfo()->getStackSize(); - - OutStreamer.EmitRawText("\t.frame\t" + - Twine(MBlazeInstPrinter::getRegisterName(stkReg)) + - "," + Twine(stkSze) + "," + - Twine(MBlazeInstPrinter::getRegisterName(retReg))); -} - -void MBlazeAsmPrinter::EmitFunctionEntryLabel() { - if (OutStreamer.hasRawTextSupport()) - OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName())); - AsmPrinter::EmitFunctionEntryLabel(); -} - -void MBlazeAsmPrinter::EmitFunctionBodyStart() { - if (!OutStreamer.hasRawTextSupport()) - return; - - emitFrameDirective(); - printSavedRegsBitmask(); -} - -void MBlazeAsmPrinter::EmitFunctionBodyEnd() { - if (OutStreamer.hasRawTextSupport()) - OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName())); -} - -//===----------------------------------------------------------------------===// -void MBlazeAsmPrinter::EmitInstruction(const MachineInstr *MI) { - MBlazeMCInstLower MCInstLowering(OutContext, *this); - - MCInst TmpInst; - MCInstLowering.Lower(MI, TmpInst); - OutStreamer.EmitInstruction(TmpInst); -} - -// Print out an operand for an inline asm expression. -bool MBlazeAsmPrinter:: -PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant,const char *ExtraCode, raw_ostream &O) { - // Does this asm operand have a single letter operand modifier? - if (ExtraCode && ExtraCode[0]) - if (ExtraCode[1] != 0) return true; // Unknown modifier. - - switch (ExtraCode[0]) { - default: - // See if this is a generic print operand - return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O); - } - - printOperand(MI, OpNo, O); - return false; -} - -void MBlazeAsmPrinter::printOperand(const MachineInstr *MI, int opNum, - raw_ostream &O) { - const MachineOperand &MO = MI->getOperand(opNum); - - switch (MO.getType()) { - case MachineOperand::MO_Register: - O << MBlazeInstPrinter::getRegisterName(MO.getReg()); - break; - - case MachineOperand::MO_Immediate: - O << (int32_t)MO.getImm(); - break; - - case MachineOperand::MO_FPImmediate: { - const ConstantFP *fp = MO.getFPImm(); - printHex32(fp->getValueAPF().bitcastToAPInt().getZExtValue(), O); - O << ";\t# immediate = " << *fp; - break; - } - - case MachineOperand::MO_MachineBasicBlock: - O << *MO.getMBB()->getSymbol(); - return; - - case MachineOperand::MO_GlobalAddress: - O << *Mang->getSymbol(MO.getGlobal()); - break; - - case MachineOperand::MO_ExternalSymbol: - O << *GetExternalSymbolSymbol(MO.getSymbolName()); - break; - - case MachineOperand::MO_JumpTableIndex: - O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() - << '_' << MO.getIndex(); - break; - - case MachineOperand::MO_ConstantPoolIndex: - O << MAI->getPrivateGlobalPrefix() << "CPI" - << getFunctionNumber() << "_" << MO.getIndex(); - if (MO.getOffset()) - O << "+" << MO.getOffset(); - break; - - default: - llvm_unreachable("<unknown operand type>"); - } -} - -void MBlazeAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum, - raw_ostream &O) { - const MachineOperand &MO = MI->getOperand(opNum); - if (MO.isImm()) - O << (uint32_t)MO.getImm(); - else - printOperand(MI, opNum, O); -} - -void MBlazeAsmPrinter::printFSLImm(const MachineInstr *MI, int opNum, - raw_ostream &O) { - const MachineOperand &MO = MI->getOperand(opNum); - if (MO.isImm()) - O << "rfsl" << (unsigned int)MO.getImm(); - else - printOperand(MI, opNum, O); -} - -void MBlazeAsmPrinter:: -printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier) { - printOperand(MI, opNum, O); - O << ", "; - printOperand(MI, opNum+1, O); -} - -/// isBlockOnlyReachableByFallthough - Return true if the basic block has -/// exactly one predecessor and the control transfer mechanism between -/// the predecessor and this block is a fall-through. -bool MBlazeAsmPrinter:: -isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { - // If this is a landing pad, it isn't a fall through. If it has no preds, - // then nothing falls through to it. - if (MBB->isLandingPad() || MBB->pred_empty()) - return false; - - // If there isn't exactly one predecessor, it can't be a fall through. - MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI; - ++PI2; - if (PI2 != MBB->pred_end()) - return false; - - // The predecessor has to be immediately before this block. - const MachineBasicBlock *Pred = *PI; - - if (!Pred->isLayoutSuccessor(MBB)) - return false; - - // If the block is completely empty, then it definitely does fall through. - if (Pred->empty()) - return true; - - // Check if the last terminator is an unconditional branch. - MachineBasicBlock::const_iterator I = Pred->end(); - while (I != Pred->begin() && !(--I)->isTerminator()) - ; // Noop - return I == Pred->end() || !I->isBarrier(); -} - -// Force static initialization. -extern "C" void LLVMInitializeMBlazeAsmPrinter() { - RegisterAsmPrinter<MBlazeAsmPrinter> X(TheMBlazeTarget); -} diff --git a/lib/Target/MBlaze/MBlazeCallingConv.td b/lib/Target/MBlaze/MBlazeCallingConv.td deleted file mode 100644 index 00a4219..0000000 --- a/lib/Target/MBlaze/MBlazeCallingConv.td +++ /dev/null @@ -1,24 +0,0 @@ -//===- MBlazeCallingConv.td - Calling Conventions for MBlaze -*- tablegen -*-=// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// This describes the calling conventions for MBlaze architecture. -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze ABI Calling Convention -//===----------------------------------------------------------------------===// - -def RetCC_MBlaze : CallingConv<[ - // i32 are returned in registers R3, R4 - CCIfType<[i32,f32], CCAssignToReg<[R3, R4]>> -]>; - -def CC_MBlaze : CallingConv<[ - CCIfType<[i32,f32], CCCustom<"CC_MBlaze_AssignReg">>, - CCIfType<[i32,f32], CCAssignToStack<4, 4>> -]>; diff --git a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp deleted file mode 100644 index 1d18cc4..0000000 --- a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp +++ /dev/null @@ -1,252 +0,0 @@ -//===-- DelaySlotFiller.cpp - MBlaze delay slot filler --------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// A pass that attempts to fill instructions with delay slots. If no -// instructions can be moved into the delay slot then a NOP is placed there. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "delay-slot-filler" - -#include "MBlaze.h" -#include "MBlazeTargetMachine.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetInstrInfo.h" - -using namespace llvm; - -STATISTIC(FilledSlots, "Number of delay slots filled"); - -static cl::opt<bool> MBDisableDelaySlotFiller( - "disable-mblaze-delay-filler", - cl::init(false), - cl::desc("Disable the MBlaze delay slot filter."), - cl::Hidden); - -namespace { - struct Filler : public MachineFunctionPass { - TargetMachine &TM; - - static char ID; - Filler(TargetMachine &tm) - : MachineFunctionPass(ID), TM(tm) { } - - virtual const char *getPassName() const { - return "MBlaze Delay Slot Filler"; - } - - bool runOnMachineBasicBlock(MachineBasicBlock &MBB); - bool runOnMachineFunction(MachineFunction &F) { - bool Changed = false; - for (MachineFunction::iterator FI = F.begin(), FE = F.end(); - FI != FE; ++FI) - Changed |= runOnMachineBasicBlock(*FI); - return Changed; - } - - }; - char Filler::ID = 0; -} // end of anonymous namespace - -static bool hasImmInstruction(MachineBasicBlock::iterator &candidate) { - // Any instruction with an immediate mode operand greater than - // 16-bits requires an implicit IMM instruction. - unsigned numOper = candidate->getNumOperands(); - for (unsigned op = 0; op < numOper; ++op) { - MachineOperand &mop = candidate->getOperand(op); - - // The operand requires more than 16-bits to represent. - if (mop.isImm() && (mop.getImm() < -0x8000 || mop.getImm() > 0x7fff)) - return true; - - // We must assume that unknown immediate values require more than - // 16-bits to represent. - if (mop.isGlobal() || mop.isSymbol() || mop.isJTI() || mop.isCPI()) - return true; - - // FIXME: we could probably check to see if the FP value happens - // to not need an IMM instruction. For now we just always - // assume that FP values do. - if (mop.isFPImm()) - return true; - } - - return false; -} - -static unsigned getLastRealOperand(MachineBasicBlock::iterator &instr) { - switch (instr->getOpcode()) { - default: return instr->getNumOperands(); - - // These instructions have a variable number of operands but the first two - // are the "real" operands that we care about during hazard detection. - case MBlaze::BRLID: - case MBlaze::BRALID: - case MBlaze::BRLD: - case MBlaze::BRALD: - return 2; - } -} - -static bool delayHasHazard(MachineBasicBlock::iterator &candidate, - MachineBasicBlock::iterator &slot) { - // Hazard check - MachineBasicBlock::iterator a = candidate; - MachineBasicBlock::iterator b = slot; - - // MBB layout:- - // candidate := a0 = operation(a1, a2) - // ...middle bit... - // slot := b0 = operation(b1, b2) - - // Possible hazards:-/ - // 1. a1 or a2 was written during the middle bit - // 2. a0 was read or written during the middle bit - // 3. a0 is one or more of {b0, b1, b2} - // 4. b0 is one or more of {a1, a2} - // 5. a accesses memory, and the middle bit - // contains a store operation. - bool a_is_memory = candidate->mayLoad() || candidate->mayStore(); - - // Determine the number of operands in the slot instruction and in the - // candidate instruction. - const unsigned aend = getLastRealOperand(a); - const unsigned bend = getLastRealOperand(b); - - // Check hazards type 1, 2 and 5 by scanning the middle bit - MachineBasicBlock::iterator m = a; - for (++m; m != b; ++m) { - for (unsigned aop = 0; aop<aend; ++aop) { - bool aop_is_reg = a->getOperand(aop).isReg(); - if (!aop_is_reg) continue; - - bool aop_is_def = a->getOperand(aop).isDef(); - unsigned aop_reg = a->getOperand(aop).getReg(); - - const unsigned mend = getLastRealOperand(m); - for (unsigned mop = 0; mop<mend; ++mop) { - bool mop_is_reg = m->getOperand(mop).isReg(); - if (!mop_is_reg) continue; - - bool mop_is_def = m->getOperand(mop).isDef(); - unsigned mop_reg = m->getOperand(mop).getReg(); - - if (aop_is_def && (mop_reg == aop_reg)) - return true; // Hazard type 2, because aop = a0 - else if (mop_is_def && (mop_reg == aop_reg)) - return true; // Hazard type 1, because aop in {a1, a2} - } - } - - // Check hazard type 5 - if (a_is_memory && m->mayStore()) - return true; - } - - // Check hazard type 3 & 4 - for (unsigned aop = 0; aop<aend; ++aop) { - if (a->getOperand(aop).isReg()) { - unsigned aop_reg = a->getOperand(aop).getReg(); - - for (unsigned bop = 0; bop<bend; ++bop) { - if (b->getOperand(bop).isReg() && !b->getOperand(bop).isImplicit()) { - unsigned bop_reg = b->getOperand(bop).getReg(); - if (aop_reg == bop_reg) - return true; - } - } - } - } - - return false; -} - -static bool isDelayFiller(MachineBasicBlock &MBB, - MachineBasicBlock::iterator candidate) { - if (candidate == MBB.begin()) - return false; - - --candidate; - return (candidate->hasDelaySlot()); -} - -static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) { - if (!I->hasUnmodeledSideEffects()) - return false; - - unsigned op = I->getOpcode(); - if (op == MBlaze::ADDK || op == MBlaze::ADDIK || - op == MBlaze::ADDC || op == MBlaze::ADDIC || - op == MBlaze::ADDKC || op == MBlaze::ADDIKC || - op == MBlaze::RSUBK || op == MBlaze::RSUBIK || - op == MBlaze::RSUBC || op == MBlaze::RSUBIC || - op == MBlaze::RSUBKC || op == MBlaze::RSUBIKC) - return false; - - return true; -} - -static MachineBasicBlock::iterator -findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) { - MachineBasicBlock::iterator I = slot; - while (true) { - if (I == MBB.begin()) - break; - - --I; - if (I->hasDelaySlot() || I->isBranch() || isDelayFiller(MBB,I) || - I->isCall() || I->isReturn() || I->isBarrier() || - hasUnknownSideEffects(I)) - break; - - if (hasImmInstruction(I) || delayHasHazard(I,slot)) - continue; - - return I; - } - - return MBB.end(); -} - -/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. -/// Currently, we fill delay slots with NOPs. We assume there is only one -/// delay slot per delayed instruction. -bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { - bool Changed = false; - for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) - if (I->hasDelaySlot()) { - MachineBasicBlock::iterator D = MBB.end(); - MachineBasicBlock::iterator J = I; - - if (!MBDisableDelaySlotFiller) - D = findDelayInstr(MBB,I); - - ++FilledSlots; - Changed = true; - - if (D == MBB.end()) - BuildMI(MBB, ++J, I->getDebugLoc(),TM.getInstrInfo()->get(MBlaze::NOP)); - else - MBB.splice(++J, &MBB, D); - } - return Changed; -} - -/// createMBlazeDelaySlotFillerPass - Returns a pass that fills in delay -/// slots in MBlaze MachineFunctions -FunctionPass *llvm::createMBlazeDelaySlotFillerPass(MBlazeTargetMachine &tm) { - return new Filler(tm); -} - diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/lib/Target/MBlaze/MBlazeFrameLowering.cpp deleted file mode 100644 index 172304b..0000000 --- a/lib/Target/MBlaze/MBlazeFrameLowering.cpp +++ /dev/null @@ -1,488 +0,0 @@ -//===-- MBlazeFrameLowering.cpp - MBlaze Frame Information ---------------====// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of TargetFrameLowering class. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-frame-lowering" - -#include "MBlazeFrameLowering.h" -#include "InstPrinter/MBlazeInstPrinter.h" -#include "MBlazeInstrInfo.h" -#include "MBlazeMachineFunction.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineModuleInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/Function.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetOptions.h" - -using namespace llvm; - -static cl::opt<bool> MBDisableStackAdjust( - "disable-mblaze-stack-adjust", - cl::init(false), - cl::desc("Disable MBlaze stack layout adjustment."), - cl::Hidden); - -static void replaceFrameIndexes(MachineFunction &MF, - SmallVector<std::pair<int,int64_t>, 16> &FR) { - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - const SmallVector<std::pair<int,int64_t>, 16>::iterator FRB = FR.begin(); - const SmallVector<std::pair<int,int64_t>, 16>::iterator FRE = FR.end(); - - SmallVector<std::pair<int,int64_t>, 16>::iterator FRI = FRB; - for (; FRI != FRE; ++FRI) { - MFI->RemoveStackObject(FRI->first); - int NFI = MFI->CreateFixedObject(4, FRI->second, true); - MBlazeFI->recordReplacement(FRI->first, NFI); - - for (MachineFunction::iterator MB=MF.begin(), ME=MF.end(); MB!=ME; ++MB) { - MachineBasicBlock::iterator MBB = MB->begin(); - const MachineBasicBlock::iterator MBE = MB->end(); - - for (; MBB != MBE; ++MBB) { - MachineInstr::mop_iterator MIB = MBB->operands_begin(); - const MachineInstr::mop_iterator MIE = MBB->operands_end(); - - for (MachineInstr::mop_iterator MII = MIB; MII != MIE; ++MII) { - if (!MII->isFI() || MII->getIndex() != FRI->first) continue; - DEBUG(dbgs() << "FOUND FI#" << MII->getIndex() << "\n"); - MII->setIndex(NFI); - } - } - } - } -} - -//===----------------------------------------------------------------------===// -// -// Stack Frame Processing methods -// +----------------------------+ -// -// The stack is allocated decrementing the stack pointer on -// the first instruction of a function prologue. Once decremented, -// all stack references are are done through a positive offset -// from the stack/frame pointer, so the stack is considered -// to grow up. -// -//===----------------------------------------------------------------------===// - -static void analyzeFrameIndexes(MachineFunction &MF) { - if (MBDisableStackAdjust) return; - - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - const MachineRegisterInfo &MRI = MF.getRegInfo(); - - MachineRegisterInfo::livein_iterator LII = MRI.livein_begin(); - MachineRegisterInfo::livein_iterator LIE = MRI.livein_end(); - const SmallVector<int, 16> &LiveInFI = MBlazeFI->getLiveIn(); - SmallVector<MachineInstr*, 16> EraseInstr; - SmallVector<std::pair<int,int64_t>, 16> FrameRelocate; - - MachineBasicBlock *MBB = MF.getBlockNumbered(0); - MachineBasicBlock::iterator MIB = MBB->begin(); - MachineBasicBlock::iterator MIE = MBB->end(); - - int StackAdjust = 0; - int StackOffset = -28; - - // In this loop we are searching frame indexes that corrospond to incoming - // arguments that are already in the stack. We look for instruction sequences - // like the following: - // - // LWI REG, FI1, 0 - // ... - // SWI REG, FI2, 0 - // - // As long as there are no defs of REG in the ... part, we can eliminate - // the SWI instruction because the value has already been stored to the - // stack by the caller. All we need to do is locate FI at the correct - // stack location according to the calling convensions. - // - // Additionally, if the SWI operation kills the def of REG then we don't - // need the LWI operation so we can erase it as well. - for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i) { - for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) { - if (I->getOpcode() != MBlaze::LWI || I->getNumOperands() != 3 || - !I->getOperand(1).isFI() || !I->getOperand(0).isReg() || - I->getOperand(1).getIndex() != LiveInFI[i]) continue; - - unsigned FIReg = I->getOperand(0).getReg(); - MachineBasicBlock::iterator SI = I; - for (SI++; SI != MIE; ++SI) { - if (!SI->getOperand(0).isReg() || - !SI->getOperand(1).isFI() || - SI->getOpcode() != MBlaze::SWI) continue; - - int FI = SI->getOperand(1).getIndex(); - if (SI->getOperand(0).getReg() != FIReg || - MFI->isFixedObjectIndex(FI) || - MFI->getObjectSize(FI) != 4) continue; - - if (SI->getOperand(0).isDef()) break; - - if (SI->getOperand(0).isKill()) { - DEBUG(dbgs() << "LWI for FI#" << I->getOperand(1).getIndex() - << " removed\n"); - EraseInstr.push_back(I); - } - - EraseInstr.push_back(SI); - DEBUG(dbgs() << "SWI for FI#" << FI << " removed\n"); - - FrameRelocate.push_back(std::make_pair(FI,StackOffset)); - DEBUG(dbgs() << "FI#" << FI << " relocated to " << StackOffset << "\n"); - - StackOffset -= 4; - StackAdjust += 4; - break; - } - } - } - - // In this loop we are searching for frame indexes that corrospond to - // incoming arguments that are in registers. We look for instruction - // sequences like the following: - // - // ... SWI REG, FI, 0 - // - // As long as the ... part does not define REG and if REG is an incoming - // parameter register then we know that, according to ABI convensions, the - // caller has allocated stack space for it already. Instead of allocating - // stack space on our frame, we record the correct location in the callers - // frame. - for (MachineRegisterInfo::livein_iterator LI = LII; LI != LIE; ++LI) { - for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) { - if (I->definesRegister(LI->first)) - break; - - if (I->getOpcode() != MBlaze::SWI || I->getNumOperands() != 3 || - !I->getOperand(1).isFI() || !I->getOperand(0).isReg() || - I->getOperand(1).getIndex() < 0) continue; - - if (I->getOperand(0).getReg() == LI->first) { - int FI = I->getOperand(1).getIndex(); - MBlazeFI->recordLiveIn(FI); - - int FILoc = 0; - switch (LI->first) { - default: llvm_unreachable("invalid incoming parameter!"); - case MBlaze::R5: FILoc = -4; break; - case MBlaze::R6: FILoc = -8; break; - case MBlaze::R7: FILoc = -12; break; - case MBlaze::R8: FILoc = -16; break; - case MBlaze::R9: FILoc = -20; break; - case MBlaze::R10: FILoc = -24; break; - } - - StackAdjust += 4; - FrameRelocate.push_back(std::make_pair(FI,FILoc)); - DEBUG(dbgs() << "FI#" << FI << " relocated to " << FILoc << "\n"); - break; - } - } - } - - // Go ahead and erase all of the instructions that we determined were - // no longer needed. - for (int i = 0, e = EraseInstr.size(); i < e; ++i) - MBB->erase(EraseInstr[i]); - - // Replace all of the frame indexes that we have relocated with new - // fixed object frame indexes. - replaceFrameIndexes(MF, FrameRelocate); -} - -static void interruptFrameLayout(MachineFunction &MF) { - const Function *F = MF.getFunction(); - CallingConv::ID CallConv = F->getCallingConv(); - - // If this function is not using either the interrupt_handler - // calling convention or the save_volatiles calling convention - // then we don't need to do any additional frame layout. - if (CallConv != CallingConv::MBLAZE_INTR && - CallConv != CallingConv::MBLAZE_SVOL) - return; - - MachineFrameInfo *MFI = MF.getFrameInfo(); - const MachineRegisterInfo &MRI = MF.getRegInfo(); - const MBlazeInstrInfo &TII = - *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo()); - - // Determine if the calling convention is the interrupt_handler - // calling convention. Some pieces of the prologue and epilogue - // only need to be emitted if we are lowering and interrupt handler. - bool isIntr = CallConv == CallingConv::MBLAZE_INTR; - - // Determine where to put prologue and epilogue additions - MachineBasicBlock &MENT = MF.front(); - MachineBasicBlock &MEXT = MF.back(); - - MachineBasicBlock::iterator MENTI = MENT.begin(); - MachineBasicBlock::iterator MEXTI = prior(MEXT.end()); - - DebugLoc ENTDL = MENTI != MENT.end() ? MENTI->getDebugLoc() : DebugLoc(); - DebugLoc EXTDL = MEXTI != MEXT.end() ? MEXTI->getDebugLoc() : DebugLoc(); - - // Store the frame indexes generated during prologue additions for use - // when we are generating the epilogue additions. - SmallVector<int, 10> VFI; - - // Build the prologue SWI for R3 - R12 if needed. Note that R11 must - // always have a SWI because it is used when processing RMSR. - for (unsigned r = MBlaze::R3; r <= MBlaze::R12; ++r) { - if (!MRI.isPhysRegUsed(r) && !(isIntr && r == MBlaze::R11)) continue; - - int FI = MFI->CreateStackObject(4,4,false,false); - VFI.push_back(FI); - - BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), r) - .addFrameIndex(FI).addImm(0); - } - - // Build the prologue SWI for R17, R18 - int R17FI = MFI->CreateStackObject(4,4,false,false); - int R18FI = MFI->CreateStackObject(4,4,false,false); - - BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R17) - .addFrameIndex(R17FI).addImm(0); - - BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R18) - .addFrameIndex(R18FI).addImm(0); - - // Buid the prologue SWI and the epilogue LWI for RMSR if needed - if (isIntr) { - int MSRFI = MFI->CreateStackObject(4,4,false,false); - BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::MFS), MBlaze::R11) - .addReg(MBlaze::RMSR); - BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R11) - .addFrameIndex(MSRFI).addImm(0); - - BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R11) - .addFrameIndex(MSRFI).addImm(0); - BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::MTS), MBlaze::RMSR) - .addReg(MBlaze::R11); - } - - // Build the epilogue LWI for R17, R18 - BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R18) - .addFrameIndex(R18FI).addImm(0); - - BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R17) - .addFrameIndex(R17FI).addImm(0); - - // Build the epilogue LWI for R3 - R12 if needed - for (unsigned r = MBlaze::R12, i = VFI.size(); r >= MBlaze::R3; --r) { - if (!MRI.isPhysRegUsed(r)) continue; - BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), r) - .addFrameIndex(VFI[--i]).addImm(0); - } -} - -static void determineFrameLayout(MachineFunction &MF) { - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - - // Replace the dummy '0' SPOffset by the negative offsets, as explained on - // LowerFORMAL_ARGUMENTS. Leaving '0' for while is necessary to avoid - // the approach done by calculateFrameObjectOffsets to the stack frame. - MBlazeFI->adjustLoadArgsFI(MFI); - MBlazeFI->adjustStoreVarArgsFI(MFI); - - // Get the number of bytes to allocate from the FrameInfo - unsigned FrameSize = MFI->getStackSize(); - DEBUG(dbgs() << "Original Frame Size: " << FrameSize << "\n" ); - - // Get the alignments provided by the target, and the maximum alignment - // (if any) of the fixed frame objects. - // unsigned MaxAlign = MFI->getMaxAlignment(); - unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); - unsigned AlignMask = TargetAlign - 1; - - // Make sure the frame is aligned. - FrameSize = (FrameSize + AlignMask) & ~AlignMask; - MFI->setStackSize(FrameSize); - DEBUG(dbgs() << "Aligned Frame Size: " << FrameSize << "\n" ); -} - -int MBlazeFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) - const { - const MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - if (MBlazeFI->hasReplacement(FI)) - FI = MBlazeFI->getReplacement(FI); - return TargetFrameLowering::getFrameIndexOffset(MF,FI); -} - -// hasFP - Return true if the specified function should have a dedicated frame -// pointer register. This is true if the function has variable sized allocas or -// if frame pointer elimination is disabled. -bool MBlazeFrameLowering::hasFP(const MachineFunction &MF) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); - return MF.getTarget().Options.DisableFramePointerElim(MF) || - MFI->hasVarSizedObjects(); -} - -void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const { - MachineBasicBlock &MBB = MF.front(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - const MBlazeInstrInfo &TII = - *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo()); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - MachineBasicBlock::iterator MBBI = MBB.begin(); - DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); - - CallingConv::ID CallConv = MF.getFunction()->getCallingConv(); - bool requiresRA = CallConv == CallingConv::MBLAZE_INTR; - - // Determine the correct frame layout - determineFrameLayout(MF); - - // Get the number of bytes to allocate from the FrameInfo. - unsigned StackSize = MFI->getStackSize(); - - // No need to allocate space on the stack. - if (StackSize == 0 && !MFI->adjustsStack() && !requiresRA) return; - - int FPOffset = MBlazeFI->getFPStackOffset(); - int RAOffset = MBlazeFI->getRAStackOffset(); - - // Adjust stack : addi R1, R1, -imm - BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADDIK), MBlaze::R1) - .addReg(MBlaze::R1).addImm(-StackSize); - - // swi R15, R1, stack_loc - if (MFI->adjustsStack() || requiresRA) { - BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI)) - .addReg(MBlaze::R15).addReg(MBlaze::R1).addImm(RAOffset); - } - - if (hasFP(MF)) { - // swi R19, R1, stack_loc - BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI)) - .addReg(MBlaze::R19).addReg(MBlaze::R1).addImm(FPOffset); - - // add R19, R1, R0 - BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADD), MBlaze::R19) - .addReg(MBlaze::R1).addReg(MBlaze::R0); - } -} - -void MBlazeFrameLowering::emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const { - MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - const MBlazeInstrInfo &TII = - *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo()); - - DebugLoc dl = MBBI->getDebugLoc(); - - CallingConv::ID CallConv = MF.getFunction()->getCallingConv(); - bool requiresRA = CallConv == CallingConv::MBLAZE_INTR; - - // Get the FI's where RA and FP are saved. - int FPOffset = MBlazeFI->getFPStackOffset(); - int RAOffset = MBlazeFI->getRAStackOffset(); - - if (hasFP(MF)) { - // add R1, R19, R0 - BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADD), MBlaze::R1) - .addReg(MBlaze::R19).addReg(MBlaze::R0); - - // lwi R19, R1, stack_loc - BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R19) - .addReg(MBlaze::R1).addImm(FPOffset); - } - - // lwi R15, R1, stack_loc - if (MFI->adjustsStack() || requiresRA) { - BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R15) - .addReg(MBlaze::R1).addImm(RAOffset); - } - - // Get the number of bytes from FrameInfo - int StackSize = (int) MFI->getStackSize(); - - // addi R1, R1, imm - if (StackSize) { - BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADDIK), MBlaze::R1) - .addReg(MBlaze::R1).addImm(StackSize); - } -} - -// Eliminate ADJCALLSTACKDOWN/ADJCALLSTACKUP pseudo instructions -void MBlazeFrameLowering:: -eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator I) const { - const MBlazeInstrInfo &TII = - *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo()); - if (!hasReservedCallFrame(MF)) { - // If we have a frame pointer, turn the adjcallstackup instruction into a - // 'addi r1, r1, -<amt>' and the adjcallstackdown instruction into - // 'addi r1, r1, <amt>' - MachineInstr *Old = I; - int Amount = Old->getOperand(0).getImm() + 4; - if (Amount != 0) { - // We need to keep the stack aligned properly. To do this, we round the - // amount of space needed for the outgoing arguments up to the next - // alignment boundary. - unsigned Align = getStackAlignment(); - Amount = (Amount+Align-1)/Align*Align; - - MachineInstr *New; - if (Old->getOpcode() == MBlaze::ADJCALLSTACKDOWN) { - New = BuildMI(MF,Old->getDebugLoc(), TII.get(MBlaze::ADDIK),MBlaze::R1) - .addReg(MBlaze::R1).addImm(-Amount); - } else { - assert(Old->getOpcode() == MBlaze::ADJCALLSTACKUP); - New = BuildMI(MF,Old->getDebugLoc(), TII.get(MBlaze::ADDIK),MBlaze::R1) - .addReg(MBlaze::R1).addImm(Amount); - } - - // Replace the pseudo instruction with a new instruction... - MBB.insert(I, New); - } - } - - // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions. - MBB.erase(I); -} - - -void MBlazeFrameLowering:: -processFunctionBeforeCalleeSavedScan(MachineFunction &MF, - RegScavenger *RS) const { - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - CallingConv::ID CallConv = MF.getFunction()->getCallingConv(); - bool requiresRA = CallConv == CallingConv::MBLAZE_INTR; - - if (MFI->adjustsStack() || requiresRA) { - MBlazeFI->setRAStackOffset(0); - MFI->CreateFixedObject(4,0,true); - } - - if (hasFP(MF)) { - MBlazeFI->setFPStackOffset(4); - MFI->CreateFixedObject(4,4,true); - } - - interruptFrameLayout(MF); - analyzeFrameIndexes(MF); -} diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.h b/lib/Target/MBlaze/MBlazeFrameLowering.h deleted file mode 100644 index f4228c5..0000000 --- a/lib/Target/MBlaze/MBlazeFrameLowering.h +++ /dev/null @@ -1,56 +0,0 @@ -//=- MBlazeFrameLowering.h - Define frame lowering for MicroBlaze -*- C++ -*-=// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZE_FRAMEINFO_H -#define MBLAZE_FRAMEINFO_H - -#include "MBlaze.h" -#include "llvm/Target/TargetFrameLowering.h" - -namespace llvm { -class MBlazeSubtarget; - -class MBlazeFrameLowering : public TargetFrameLowering { -protected: - const MBlazeSubtarget &STI; - -public: - explicit MBlazeFrameLowering(const MBlazeSubtarget &sti) - : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 4, 0), STI(sti) { - } - - /// targetHandlesStackFrameRounding - Returns true if the target is - /// responsible for rounding up the stack frame (probably at emitPrologue - /// time). - bool targetHandlesStackFrameRounding() const { return true; } - - /// emitProlog/emitEpilog - These methods insert prolog and epilog code into - /// the function. - void emitPrologue(MachineFunction &MF) const; - void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; - - void eliminateCallFramePseudoInstr(MachineFunction &MF, - MachineBasicBlock &MBB, - MachineBasicBlock::iterator I) const; - - bool hasFP(const MachineFunction &MF) const; - - int getFrameIndexOffset(const MachineFunction &MF, int FI) const; - - virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, - RegScavenger *RS) const; -}; - -} // End llvm namespace - -#endif diff --git a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp deleted file mode 100644 index 9d6dfe6..0000000 --- a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp +++ /dev/null @@ -1,277 +0,0 @@ -//===-- MBlazeISelDAGToDAG.cpp - A dag to dag inst selector for MBlaze ----===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines an instruction selector for the MBlaze target. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-isel" -#include "MBlaze.h" -#include "MBlazeMachineFunction.h" -#include "MBlazeRegisterInfo.h" -#include "MBlazeSubtarget.h" -#include "MBlazeTargetMachine.h" -#include "llvm/CodeGen/MachineConstantPool.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/SelectionDAGISel.h" -#include "llvm/IR/GlobalValue.h" -#include "llvm/IR/Instructions.h" -#include "llvm/IR/Intrinsics.h" -#include "llvm/IR/Type.h" -#include "llvm/Support/CFG.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetMachine.h" -using namespace llvm; - -//===----------------------------------------------------------------------===// -// Instruction Selector Implementation -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlazeDAGToDAGISel - MBlaze specific code to select MBlaze machine -// instructions for SelectionDAG operations. -//===----------------------------------------------------------------------===// -namespace { - -class MBlazeDAGToDAGISel : public SelectionDAGISel { - - /// TM - Keep a reference to MBlazeTargetMachine. - MBlazeTargetMachine &TM; - - /// Subtarget - Keep a pointer to the MBlazeSubtarget around so that we can - /// make the right decision when generating code for different targets. - const MBlazeSubtarget &Subtarget; - -public: - explicit MBlazeDAGToDAGISel(MBlazeTargetMachine &tm) : - SelectionDAGISel(tm), - TM(tm), Subtarget(tm.getSubtarget<MBlazeSubtarget>()) {} - - // Pass Name - virtual const char *getPassName() const { - return "MBlaze DAG->DAG Pattern Instruction Selection"; - } -private: - // Include the pieces autogenerated from the target description. - #include "MBlazeGenDAGISel.inc" - - /// getTargetMachine - Return a reference to the TargetMachine, casted - /// to the target-specific type. - const MBlazeTargetMachine &getTargetMachine() { - return static_cast<const MBlazeTargetMachine &>(TM); - } - - /// getInstrInfo - Return a reference to the TargetInstrInfo, casted - /// to the target-specific type. - const MBlazeInstrInfo *getInstrInfo() { - return getTargetMachine().getInstrInfo(); - } - - SDNode *getGlobalBaseReg(); - SDNode *Select(SDNode *N); - - // Address Selection - bool SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index); - bool SelectAddrRegImm(SDValue N, SDValue &Disp, SDValue &Base); - - // getI32Imm - Return a target constant with the specified value, of type i32. - inline SDValue getI32Imm(unsigned Imm) { - return CurDAG->getTargetConstant(Imm, MVT::i32); - } -}; - -} - -/// isIntS32Immediate - This method tests to see if the node is either a 32-bit -/// or 64-bit immediate, and if the value can be accurately represented as a -/// sign extension from a 32-bit value. If so, this returns true and the -/// immediate. -static bool isIntS32Immediate(SDNode *N, int32_t &Imm) { - unsigned Opc = N->getOpcode(); - if (Opc != ISD::Constant) - return false; - - Imm = (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); - if (N->getValueType(0) == MVT::i32) - return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); - else - return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); -} - -static bool isIntS32Immediate(SDValue Op, int32_t &Imm) { - return isIntS32Immediate(Op.getNode(), Imm); -} - - -/// SelectAddressRegReg - Given the specified addressed, check to see if it -/// can be represented as an indexed [r+r] operation. Returns false if it -/// can be more efficiently represented with [r+imm]. -bool MBlazeDAGToDAGISel:: -SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index) { - if (N.getOpcode() == ISD::FrameIndex) return false; - if (N.getOpcode() == ISD::TargetExternalSymbol || - N.getOpcode() == ISD::TargetGlobalAddress) - return false; // direct calls. - - int32_t imm = 0; - if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) { - if (isIntS32Immediate(N.getOperand(1), imm)) - return false; // r+i - - if (N.getOperand(0).getOpcode() == ISD::TargetJumpTable || - N.getOperand(1).getOpcode() == ISD::TargetJumpTable) - return false; // jump tables. - - Base = N.getOperand(0); - Index = N.getOperand(1); - return true; - } - - return false; -} - -/// Returns true if the address N can be represented by a base register plus -/// a signed 32-bit displacement [r+imm], and if it is not better -/// represented as reg+reg. -bool MBlazeDAGToDAGISel:: -SelectAddrRegImm(SDValue N, SDValue &Base, SDValue &Disp) { - // If this can be more profitably realized as r+r, fail. - if (SelectAddrRegReg(N, Base, Disp)) - return false; - - if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) { - int32_t imm = 0; - if (isIntS32Immediate(N.getOperand(1), imm)) { - Disp = CurDAG->getTargetConstant(imm, MVT::i32); - if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { - Base = CurDAG->getTargetFrameIndex(FI->getIndex(), N.getValueType()); - } else { - Base = N.getOperand(0); - } - return true; // [r+i] - } - } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { - // Loading from a constant address. - uint32_t Imm = CN->getZExtValue(); - Disp = CurDAG->getTargetConstant(Imm, CN->getValueType(0)); - Base = CurDAG->getRegister(MBlaze::R0, CN->getValueType(0)); - return true; - } - - Disp = CurDAG->getTargetConstant(0, TM.getTargetLowering()->getPointerTy()); - if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) - Base = CurDAG->getTargetFrameIndex(FI->getIndex(), N.getValueType()); - else - Base = N; - return true; // [r+0] -} - -/// getGlobalBaseReg - Output the instructions required to put the -/// GOT address into a register. -SDNode *MBlazeDAGToDAGISel::getGlobalBaseReg() { - unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); - return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); -} - -/// Select instructions not customized! Used for -/// expanded, promoted and normal instructions -SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) { - unsigned Opcode = Node->getOpcode(); - SDLoc dl(Node); - - // If we have a custom node, we already have selected! - if (Node->isMachineOpcode()) - return NULL; - - /// - // Instruction Selection not handled by the auto-generated - // tablegen selection should be handled here. - /// - switch (Opcode) { - default: break; - - // Get target GOT address. - case ISD::GLOBAL_OFFSET_TABLE: - return getGlobalBaseReg(); - - case ISD::FrameIndex: { - SDValue imm = CurDAG->getTargetConstant(0, MVT::i32); - int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex(); - EVT VT = Node->getValueType(0); - SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); - unsigned Opc = MBlaze::ADDIK; - if (Node->hasOneUse()) - return CurDAG->SelectNodeTo(Node, Opc, VT, TFI, imm); - return CurDAG->getMachineNode(Opc, dl, VT, TFI, imm); - } - - - /// Handle direct and indirect calls when using PIC. On PIC, when - /// GOT is smaller than about 64k (small code) the GA target is - /// loaded with only one instruction. Otherwise GA's target must - /// be loaded with 3 instructions. - case MBlazeISD::JmpLink: { - if (TM.getRelocationModel() == Reloc::PIC_) { - SDValue Chain = Node->getOperand(0); - SDValue Callee = Node->getOperand(1); - SDValue R20Reg = CurDAG->getRegister(MBlaze::R20, MVT::i32); - SDValue InFlag(0, 0); - - if ((isa<GlobalAddressSDNode>(Callee)) || - (isa<ExternalSymbolSDNode>(Callee))) - { - /// Direct call for global addresses and external symbols - SDValue GPReg = CurDAG->getRegister(MBlaze::R15, MVT::i32); - - // Use load to get GOT target - SDValue Ops[] = { Callee, GPReg, Chain }; - SDValue Load = SDValue(CurDAG->getMachineNode(MBlaze::LW, dl, - MVT::i32, MVT::Other, Ops), 0); - Chain = Load.getValue(1); - - // Call target must be on T9 - Chain = CurDAG->getCopyToReg(Chain, dl, R20Reg, Load, InFlag); - } else - /// Indirect call - Chain = CurDAG->getCopyToReg(Chain, dl, R20Reg, Callee, InFlag); - - // Emit Jump and Link Register - SDNode *ResNode = CurDAG->getMachineNode(MBlaze::BRLID, dl, MVT::Other, - MVT::Glue, R20Reg, Chain); - Chain = SDValue(ResNode, 0); - InFlag = SDValue(ResNode, 1); - ReplaceUses(SDValue(Node, 0), Chain); - ReplaceUses(SDValue(Node, 1), InFlag); - return ResNode; - } - } - } - - // Select the default instruction - SDNode *ResNode = SelectCode(Node); - - DEBUG(errs() << "=> "); - if (ResNode == NULL || ResNode == Node) - DEBUG(Node->dump(CurDAG)); - else - DEBUG(ResNode->dump(CurDAG)); - DEBUG(errs() << "\n"); - return ResNode; -} - -/// createMBlazeISelDag - This pass converts a legalized DAG into a -/// MBlaze-specific DAG, ready for instruction scheduling. -FunctionPass *llvm::createMBlazeISelDag(MBlazeTargetMachine &TM) { - return new MBlazeDAGToDAGISel(TM); -} diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp deleted file mode 100644 index e07ceec..0000000 --- a/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ /dev/null @@ -1,1155 +0,0 @@ -//===-- MBlazeISelLowering.cpp - MBlaze DAG Lowering Implementation -------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the interfaces that MBlaze uses to lower LLVM code into a -// selection DAG. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-lower" -#include "MBlazeISelLowering.h" -#include "MBlazeMachineFunction.h" -#include "MBlazeSubtarget.h" -#include "MBlazeTargetMachine.h" -#include "MBlazeTargetObjectFile.h" -#include "llvm/CodeGen/CallingConvLower.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/SelectionDAGISel.h" -#include "llvm/CodeGen/ValueTypes.h" -#include "llvm/IR/CallingConv.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/GlobalVariable.h" -#include "llvm/IR/Intrinsics.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -using namespace llvm; - -static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, - CCState &State); - -const char *MBlazeTargetLowering::getTargetNodeName(unsigned Opcode) const { - switch (Opcode) { - case MBlazeISD::JmpLink : return "MBlazeISD::JmpLink"; - case MBlazeISD::GPRel : return "MBlazeISD::GPRel"; - case MBlazeISD::Wrap : return "MBlazeISD::Wrap"; - case MBlazeISD::ICmp : return "MBlazeISD::ICmp"; - case MBlazeISD::Ret : return "MBlazeISD::Ret"; - case MBlazeISD::Select_CC : return "MBlazeISD::Select_CC"; - default : return NULL; - } -} - -MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM) - : TargetLowering(TM, new MBlazeTargetObjectFile()) { - Subtarget = &TM.getSubtarget<MBlazeSubtarget>(); - - // MBlaze does not have i1 type, so use i32 for - // setcc operations results (slt, sgt, ...). - setBooleanContents(ZeroOrOneBooleanContent); - setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? - - // Set up the register classes - addRegisterClass(MVT::i32, &MBlaze::GPRRegClass); - if (Subtarget->hasFPU()) { - addRegisterClass(MVT::f32, &MBlaze::GPRRegClass); - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - } - - // Floating point operations which are not supported - setOperationAction(ISD::FREM, MVT::f32, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i8, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::f32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - setOperationAction(ISD::FSIN, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FPOWI, MVT::f32, Expand); - setOperationAction(ISD::FPOW, MVT::f32, Expand); - setOperationAction(ISD::FLOG, MVT::f32, Expand); - setOperationAction(ISD::FLOG2, MVT::f32, Expand); - setOperationAction(ISD::FLOG10, MVT::f32, Expand); - setOperationAction(ISD::FEXP, MVT::f32, Expand); - - // Load extented operations for i1 types must be promoted - setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); - - // Sign extended loads must be expanded - setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); - setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand); - - // MBlaze has no REM or DIVREM operations. - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - - // If the processor doesn't support multiply then expand it - if (!Subtarget->hasMul()) { - setOperationAction(ISD::MUL, MVT::i32, Expand); - } - - // If the processor doesn't support 64-bit multiply then expand - if (!Subtarget->hasMul() || !Subtarget->hasMul64()) { - setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::MULHS, MVT::i64, Expand); - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::MULHU, MVT::i64, Expand); - } - - // If the processor doesn't support division then expand - if (!Subtarget->hasDiv()) { - setOperationAction(ISD::UDIV, MVT::i32, Expand); - setOperationAction(ISD::SDIV, MVT::i32, Expand); - } - - // Expand unsupported conversions - setOperationAction(ISD::BITCAST, MVT::f32, Expand); - setOperationAction(ISD::BITCAST, MVT::i32, Expand); - - // Expand SELECT_CC - setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); - - // MBlaze doesn't have MUL_LOHI - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - - // Used by legalize types to correctly generate the setcc result. - // Without this, every float setcc comes with a AND/OR with the result, - // we don't want this, since the fpcmp result goes to a flag register, - // which is used implicitly by brcond and select operations. - AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); - AddPromotedToType(ISD::SELECT, MVT::i1, MVT::i32); - AddPromotedToType(ISD::SELECT_CC, MVT::i1, MVT::i32); - - // MBlaze Custom Operations - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - - // Variable Argument support - setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - - - // Operations not directly supported by MBlaze. - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BR_CC, MVT::f32, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::ROTR, MVT::i32, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::CTLZ, MVT::i32, Expand); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); - setOperationAction(ISD::CTTZ, MVT::i32, Expand); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Expand); - setOperationAction(ISD::BSWAP, MVT::i32, Expand); - - // We don't have line number support yet. - setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); - - // Use the default for now - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - - // MBlaze doesn't have extending float->double load/store - setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); - setTruncStoreAction(MVT::f64, MVT::f32, Expand); - - setMinFunctionAlignment(2); - - setStackPointerRegisterToSaveRestore(MBlaze::R1); - computeRegisterProperties(); -} - -EVT MBlazeTargetLowering::getSetCCResultType(LLVMContext &, EVT) const { - return MVT::i32; -} - -SDValue MBlazeTargetLowering::LowerOperation(SDValue Op, - SelectionDAG &DAG) const { - switch (Op.getOpcode()) - { - case ISD::ConstantPool: return LowerConstantPool(Op, DAG); - case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); - case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); - case ISD::JumpTable: return LowerJumpTable(Op, DAG); - case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); - case ISD::VASTART: return LowerVASTART(Op, DAG); - } - return SDValue(); -} - -//===----------------------------------------------------------------------===// -// Lower helper functions -//===----------------------------------------------------------------------===// -MachineBasicBlock* -MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) - const { - switch (MI->getOpcode()) { - default: llvm_unreachable("Unexpected instr type to insert"); - - case MBlaze::ShiftRL: - case MBlaze::ShiftRA: - case MBlaze::ShiftL: - return EmitCustomShift(MI, MBB); - - case MBlaze::Select_FCC: - case MBlaze::Select_CC: - return EmitCustomSelect(MI, MBB); - - case MBlaze::CAS32: - case MBlaze::SWP32: - case MBlaze::LAA32: - case MBlaze::LAS32: - case MBlaze::LAD32: - case MBlaze::LAO32: - case MBlaze::LAX32: - case MBlaze::LAN32: - return EmitCustomAtomic(MI, MBB); - - case MBlaze::MEMBARRIER: - // The Microblaze does not need memory barriers. Just delete the pseudo - // instruction and finish. - MI->eraseFromParent(); - return MBB; - } -} - -MachineBasicBlock* -MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI, - MachineBasicBlock *MBB) const { - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - DebugLoc dl = MI->getDebugLoc(); - - // To "insert" a shift left instruction, we actually have to insert a - // simple loop. The incoming instruction knows the destination vreg to - // set, the source vreg to operate over and the shift amount. - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator It = MBB; - ++It; - - // start: - // andi samt, samt, 31 - // beqid samt, finish - // add dst, src, r0 - // loop: - // addik samt, samt, -1 - // sra dst, dst - // bneid samt, loop - // nop - // finish: - MachineFunction *F = MBB->getParent(); - MachineRegisterInfo &R = F->getRegInfo(); - MachineBasicBlock *loop = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *finish = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(It, loop); - F->insert(It, finish); - - // Update machine-CFG edges by transferring adding all successors and - // remaining instructions from the current block to the new block which - // will contain the Phi node for the select. - finish->splice(finish->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), - MBB->end()); - finish->transferSuccessorsAndUpdatePHIs(MBB); - - // Add the true and fallthrough blocks as its successors. - MBB->addSuccessor(loop); - MBB->addSuccessor(finish); - - // Next, add the finish block as a successor of the loop block - loop->addSuccessor(finish); - loop->addSuccessor(loop); - - unsigned IAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(MBB, dl, TII->get(MBlaze::ANDI), IAMT) - .addReg(MI->getOperand(2).getReg()) - .addImm(31); - - unsigned IVAL = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(MBB, dl, TII->get(MBlaze::ADDIK), IVAL) - .addReg(MI->getOperand(1).getReg()) - .addImm(0); - - BuildMI(MBB, dl, TII->get(MBlaze::BEQID)) - .addReg(IAMT) - .addMBB(finish); - - unsigned DST = R.createVirtualRegister(&MBlaze::GPRRegClass); - unsigned NDST = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(loop, dl, TII->get(MBlaze::PHI), DST) - .addReg(IVAL).addMBB(MBB) - .addReg(NDST).addMBB(loop); - - unsigned SAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); - unsigned NAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT) - .addReg(IAMT).addMBB(MBB) - .addReg(NAMT).addMBB(loop); - - if (MI->getOpcode() == MBlaze::ShiftL) - BuildMI(loop, dl, TII->get(MBlaze::ADD), NDST).addReg(DST).addReg(DST); - else if (MI->getOpcode() == MBlaze::ShiftRA) - BuildMI(loop, dl, TII->get(MBlaze::SRA), NDST).addReg(DST); - else if (MI->getOpcode() == MBlaze::ShiftRL) - BuildMI(loop, dl, TII->get(MBlaze::SRL), NDST).addReg(DST); - else - llvm_unreachable("Cannot lower unknown shift instruction"); - - BuildMI(loop, dl, TII->get(MBlaze::ADDIK), NAMT) - .addReg(SAMT) - .addImm(-1); - - BuildMI(loop, dl, TII->get(MBlaze::BNEID)) - .addReg(NAMT) - .addMBB(loop); - - BuildMI(*finish, finish->begin(), dl, - TII->get(MBlaze::PHI), MI->getOperand(0).getReg()) - .addReg(IVAL).addMBB(MBB) - .addReg(NDST).addMBB(loop); - - // The pseudo instruction is no longer needed so remove it - MI->eraseFromParent(); - return finish; -} - -MachineBasicBlock* -MBlazeTargetLowering::EmitCustomSelect(MachineInstr *MI, - MachineBasicBlock *MBB) const { - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - DebugLoc dl = MI->getDebugLoc(); - - // To "insert" a SELECT_CC instruction, we actually have to insert the - // diamond control-flow pattern. The incoming instruction knows the - // destination vreg to set, the condition code register to branch on, the - // true/false values to select between, and a branch opcode to use. - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator It = MBB; - ++It; - - // thisMBB: - // ... - // TrueVal = ... - // setcc r1, r2, r3 - // bNE r1, r0, copy1MBB - // fallthrough --> copy0MBB - MachineFunction *F = MBB->getParent(); - MachineBasicBlock *flsBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *dneBB = F->CreateMachineBasicBlock(LLVM_BB); - - unsigned Opc; - switch (MI->getOperand(4).getImm()) { - default: llvm_unreachable("Unknown branch condition"); - case MBlazeCC::EQ: Opc = MBlaze::BEQID; break; - case MBlazeCC::NE: Opc = MBlaze::BNEID; break; - case MBlazeCC::GT: Opc = MBlaze::BGTID; break; - case MBlazeCC::LT: Opc = MBlaze::BLTID; break; - case MBlazeCC::GE: Opc = MBlaze::BGEID; break; - case MBlazeCC::LE: Opc = MBlaze::BLEID; break; - } - - F->insert(It, flsBB); - F->insert(It, dneBB); - - // Transfer the remainder of MBB and its successor edges to dneBB. - dneBB->splice(dneBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), - MBB->end()); - dneBB->transferSuccessorsAndUpdatePHIs(MBB); - - MBB->addSuccessor(flsBB); - MBB->addSuccessor(dneBB); - flsBB->addSuccessor(dneBB); - - BuildMI(MBB, dl, TII->get(Opc)) - .addReg(MI->getOperand(3).getReg()) - .addMBB(dneBB); - - // sinkMBB: - // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] - // ... - //BuildMI(dneBB, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg()) - // .addReg(MI->getOperand(1).getReg()).addMBB(flsBB) - // .addReg(MI->getOperand(2).getReg()).addMBB(BB); - - BuildMI(*dneBB, dneBB->begin(), dl, - TII->get(MBlaze::PHI), MI->getOperand(0).getReg()) - .addReg(MI->getOperand(2).getReg()).addMBB(flsBB) - .addReg(MI->getOperand(1).getReg()).addMBB(MBB); - - MI->eraseFromParent(); // The pseudo instruction is gone now. - return dneBB; -} - -MachineBasicBlock* -MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, - MachineBasicBlock *MBB) const { - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - DebugLoc dl = MI->getDebugLoc(); - - // All atomic instructions on the Microblaze are implemented using the - // load-linked / store-conditional style atomic instruction sequences. - // Thus, all operations will look something like the following: - // - // start: - // lwx RV, RP, 0 - // <do stuff> - // swx RV, RP, 0 - // addic RC, R0, 0 - // bneid RC, start - // - // exit: - // - // To "insert" a shift left instruction, we actually have to insert a - // simple loop. The incoming instruction knows the destination vreg to - // set, the source vreg to operate over and the shift amount. - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator It = MBB; - ++It; - - // start: - // andi samt, samt, 31 - // beqid samt, finish - // add dst, src, r0 - // loop: - // addik samt, samt, -1 - // sra dst, dst - // bneid samt, loop - // nop - // finish: - MachineFunction *F = MBB->getParent(); - MachineRegisterInfo &R = F->getRegInfo(); - - // Create the start and exit basic blocks for the atomic operation - MachineBasicBlock *start = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *exit = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(It, start); - F->insert(It, exit); - - // Update machine-CFG edges by transferring adding all successors and - // remaining instructions from the current block to the new block which - // will contain the Phi node for the select. - exit->splice(exit->begin(), MBB, llvm::next(MachineBasicBlock::iterator(MI)), - MBB->end()); - exit->transferSuccessorsAndUpdatePHIs(MBB); - - // Add the fallthrough block as its successors. - MBB->addSuccessor(start); - - BuildMI(start, dl, TII->get(MBlaze::LWX), MI->getOperand(0).getReg()) - .addReg(MI->getOperand(1).getReg()) - .addReg(MBlaze::R0); - - MachineBasicBlock *final = start; - unsigned finalReg = 0; - - switch (MI->getOpcode()) { - default: llvm_unreachable("Cannot lower unknown atomic instruction!"); - - case MBlaze::SWP32: - finalReg = MI->getOperand(2).getReg(); - start->addSuccessor(exit); - start->addSuccessor(start); - break; - - case MBlaze::LAN32: - case MBlaze::LAX32: - case MBlaze::LAO32: - case MBlaze::LAD32: - case MBlaze::LAS32: - case MBlaze::LAA32: { - unsigned opcode = 0; - switch (MI->getOpcode()) { - default: llvm_unreachable("Cannot lower unknown atomic load!"); - case MBlaze::LAA32: opcode = MBlaze::ADDIK; break; - case MBlaze::LAS32: opcode = MBlaze::RSUBIK; break; - case MBlaze::LAD32: opcode = MBlaze::AND; break; - case MBlaze::LAO32: opcode = MBlaze::OR; break; - case MBlaze::LAX32: opcode = MBlaze::XOR; break; - case MBlaze::LAN32: opcode = MBlaze::AND; break; - } - - finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass); - start->addSuccessor(exit); - start->addSuccessor(start); - - BuildMI(start, dl, TII->get(opcode), finalReg) - .addReg(MI->getOperand(0).getReg()) - .addReg(MI->getOperand(2).getReg()); - - if (MI->getOpcode() == MBlaze::LAN32) { - unsigned tmp = finalReg; - finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(start, dl, TII->get(MBlaze::XORI), finalReg) - .addReg(tmp) - .addImm(-1); - } - break; - } - - case MBlaze::CAS32: { - finalReg = MI->getOperand(3).getReg(); - final = F->CreateMachineBasicBlock(LLVM_BB); - - F->insert(It, final); - start->addSuccessor(exit); - start->addSuccessor(final); - final->addSuccessor(exit); - final->addSuccessor(start); - - unsigned CMP = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(start, dl, TII->get(MBlaze::CMP), CMP) - .addReg(MI->getOperand(0).getReg()) - .addReg(MI->getOperand(2).getReg()); - - BuildMI(start, dl, TII->get(MBlaze::BNEID)) - .addReg(CMP) - .addMBB(exit); - - final->moveAfter(start); - exit->moveAfter(final); - break; - } - } - - unsigned CHK = R.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(final, dl, TII->get(MBlaze::SWX)) - .addReg(finalReg) - .addReg(MI->getOperand(1).getReg()) - .addReg(MBlaze::R0); - - BuildMI(final, dl, TII->get(MBlaze::ADDIC), CHK) - .addReg(MBlaze::R0) - .addImm(0); - - BuildMI(final, dl, TII->get(MBlaze::BNEID)) - .addReg(CHK) - .addMBB(start); - - // The pseudo instruction is no longer needed so remove it - MI->eraseFromParent(); - return exit; -} - -//===----------------------------------------------------------------------===// -// Misc Lower Operation implementation -//===----------------------------------------------------------------------===// -// - -SDValue MBlazeTargetLowering::LowerSELECT_CC(SDValue Op, - SelectionDAG &DAG) const { - SDValue LHS = Op.getOperand(0); - SDValue RHS = Op.getOperand(1); - SDValue TrueVal = Op.getOperand(2); - SDValue FalseVal = Op.getOperand(3); - SDLoc dl(Op); - unsigned Opc; - - SDValue CompareFlag; - if (LHS.getValueType() == MVT::i32) { - Opc = MBlazeISD::Select_CC; - CompareFlag = DAG.getNode(MBlazeISD::ICmp, dl, MVT::i32, LHS, RHS) - .getValue(1); - } else { - llvm_unreachable("Cannot lower select_cc with unknown type"); - } - - return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, - CompareFlag); -} - -SDValue MBlazeTargetLowering:: -LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - // FIXME there isn't actually debug info here - SDLoc dl(Op); - const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); - - return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, GA); -} - -SDValue MBlazeTargetLowering:: -LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { - llvm_unreachable("TLS not implemented for MicroBlaze."); -} - -SDValue MBlazeTargetLowering:: -LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { - SDValue ResNode; - SDValue HiPart; - // FIXME there isn't actually debug info here - SDLoc dl(Op); - - EVT PtrVT = Op.getValueType(); - JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - - SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 0); - return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, JTI); -} - -SDValue MBlazeTargetLowering:: -LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - SDValue ResNode; - ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); - const Constant *C = N->getConstVal(); - SDLoc dl(Op); - - SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), - N->getOffset(), 0); - return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, CP); -} - -SDValue MBlazeTargetLowering::LowerVASTART(SDValue Op, - SelectionDAG &DAG) const { - MachineFunction &MF = DAG.getMachineFunction(); - MBlazeFunctionInfo *FuncInfo = MF.getInfo<MBlazeFunctionInfo>(); - - SDLoc dl(Op); - SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), - getPointerTy()); - - // vastart just stores the address of the VarArgsFrameIndex slot into the - // memory location argument. - const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), - MachinePointerInfo(SV), - false, false, 0); -} - -//===----------------------------------------------------------------------===// -// Calling Convention Implementation -//===----------------------------------------------------------------------===// - -#include "MBlazeGenCallingConv.inc" - -static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, - CCState &State) { - static const uint16_t ArgRegs[] = { - MBlaze::R5, MBlaze::R6, MBlaze::R7, - MBlaze::R8, MBlaze::R9, MBlaze::R10 - }; - - const unsigned NumArgRegs = array_lengthof(ArgRegs); - unsigned Reg = State.AllocateReg(ArgRegs, NumArgRegs); - if (!Reg) return false; - - unsigned SizeInBytes = ValVT.getSizeInBits() >> 3; - State.AllocateStack(SizeInBytes, SizeInBytes); - State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); - - return true; -} - -//===----------------------------------------------------------------------===// -// Call Calling Convention Implementation -//===----------------------------------------------------------------------===// - -/// LowerCall - functions arguments are copied from virtual regs to -/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. -/// TODO: isVarArg, isTailCall. -SDValue MBlazeTargetLowering:: -LowerCall(TargetLowering::CallLoweringInfo &CLI, - SmallVectorImpl<SDValue> &InVals) const { - SelectionDAG &DAG = CLI.DAG; - SDLoc dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; - SDValue Chain = CLI.Chain; - SDValue Callee = CLI.Callee; - bool &isTailCall = CLI.IsTailCall; - CallingConv::ID CallConv = CLI.CallConv; - bool isVarArg = CLI.IsVarArg; - - // MBlaze does not yet support tail call optimization - isTailCall = false; - - // The MBlaze requires stack slots for arguments passed to var arg - // functions even if they are passed in registers. - bool needsRegArgSlots = isVarArg; - - MachineFunction &MF = DAG.getMachineFunction(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering(); - - // Analyze operands of the call, assigning locations to each operand. - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze); - - // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); - - // Variable argument function calls require a minimum of 24-bytes of stack - if (isVarArg && NumBytes < 24) NumBytes = 24; - - Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), - dl); - - SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; - SmallVector<SDValue, 8> MemOpChains; - - // Walk the register/memloc assignments, inserting copies/loads. - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; - MVT RegVT = VA.getLocVT(); - SDValue Arg = OutVals[i]; - - // Promote the value if needed. - switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); - case CCValAssign::Full: break; - case CCValAssign::SExt: - Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); - break; - case CCValAssign::ZExt: - Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); - break; - case CCValAssign::AExt: - Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); - break; - } - - // Arguments that can be passed on register must be kept at - // RegsToPass vector - if (VA.isRegLoc()) { - RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); - } else { - // Register can't get to this point... - assert(VA.isMemLoc()); - - // Since we are alread passing values on the stack we don't - // need to worry about creating additional slots for the - // values passed via registers. - needsRegArgSlots = false; - - // Create the frame index object for this incoming parameter - unsigned ArgSize = VA.getValVT().getSizeInBits()/8; - unsigned StackLoc = VA.getLocMemOffset() + 4; - int FI = MFI->CreateFixedObject(ArgSize, StackLoc, true); - - SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy()); - - // emit ISD::STORE whichs stores the - // parameter value to a stack Location - MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, - MachinePointerInfo(), - false, false, 0)); - } - } - - // If we need to reserve stack space for the arguments passed via registers - // then create a fixed stack object at the beginning of the stack. - if (needsRegArgSlots && TFI.hasReservedCallFrame(MF)) - MFI->CreateFixedObject(28,0,true); - - // Transform all store nodes into one single node because all store - // nodes are independent of each other. - if (!MemOpChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &MemOpChains[0], MemOpChains.size()); - - // Build a sequence of copy-to-reg nodes chained together with token - // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emitted instructions must be - // stuck together. - SDValue InFlag; - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, - RegsToPass[i].second, InFlag); - InFlag = Chain.getValue(1); - } - - // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every - // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol - // node so that legalize doesn't hack it. - if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) - Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, - getPointerTy(), 0, 0); - else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) - Callee = DAG.getTargetExternalSymbol(S->getSymbol(), - getPointerTy(), 0); - - // MBlazeJmpLink = #chain, #target_address, #opt_in_flags... - // = Chain, Callee, Reg#1, Reg#2, ... - // - // Returns a chain & a flag for retval copy to use. - SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(Callee); - - // Add argument registers to the end of the list so that they are - // known live into the call. - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Ops.push_back(DAG.getRegister(RegsToPass[i].first, - RegsToPass[i].second.getValueType())); - } - - if (InFlag.getNode()) - Ops.push_back(InFlag); - - Chain = DAG.getNode(MBlazeISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size()); - InFlag = Chain.getValue(1); - - // Create the CALLSEQ_END node. - Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), - DAG.getIntPtrConstant(0, true), InFlag, dl); - if (!Ins.empty()) - InFlag = Chain.getValue(1); - - // Handle result values, copying them out of physregs into vregs that we - // return. - return LowerCallResult(Chain, InFlag, CallConv, isVarArg, - Ins, dl, DAG, InVals); -} - -/// LowerCallResult - Lower the result values of a call into the -/// appropriate copies out of appropriate physical registers. -SDValue MBlazeTargetLowering:: -LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, - bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { - // Assign locations to each value returned by this call. - SmallVector<CCValAssign, 16> RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); - - CCInfo.AnalyzeCallResult(Ins, RetCC_MBlaze); - - // Copy all of the result registers out of their specified physreg. - for (unsigned i = 0; i != RVLocs.size(); ++i) { - Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), - RVLocs[i].getValVT(), InFlag).getValue(1); - InFlag = Chain.getValue(2); - InVals.push_back(Chain.getValue(0)); - } - - return Chain; -} - -//===----------------------------------------------------------------------===// -// Formal Arguments Calling Convention Implementation -//===----------------------------------------------------------------------===// - -/// LowerFormalArguments - transform physical registers into -/// virtual registers and generate load operations for -/// arguments places on the stack. -SDValue MBlazeTargetLowering:: -LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { - MachineFunction &MF = DAG.getMachineFunction(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - - unsigned StackReg = MF.getTarget().getRegisterInfo()->getFrameRegister(MF); - MBlazeFI->setVarArgsFrameIndex(0); - - // Used with vargs to acumulate store chains. - std::vector<SDValue> OutChains; - - // Keep track of the last register used for arguments - unsigned ArgRegEnd = 0; - - // Assign locations to all of the incoming arguments. - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - - CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze); - SDValue StackPtr; - - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; - - // Arguments stored on registers - if (VA.isRegLoc()) { - MVT RegVT = VA.getLocVT(); - ArgRegEnd = VA.getLocReg(); - const TargetRegisterClass *RC; - - if (RegVT == MVT::i32) - RC = &MBlaze::GPRRegClass; - else if (RegVT == MVT::f32) - RC = &MBlaze::GPRRegClass; - else - llvm_unreachable("RegVT not supported by LowerFormalArguments"); - - // Transform the arguments stored on - // physical registers into virtual ones - unsigned Reg = MF.addLiveIn(ArgRegEnd, RC); - SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); - - // If this is an 8 or 16-bit value, it has been passed promoted - // to 32 bits. Insert an assert[sz]ext to capture this, then - // truncate to the right size. If if is a floating point value - // then convert to the correct type. - if (VA.getLocInfo() != CCValAssign::Full) { - unsigned Opcode = 0; - if (VA.getLocInfo() == CCValAssign::SExt) - Opcode = ISD::AssertSext; - else if (VA.getLocInfo() == CCValAssign::ZExt) - Opcode = ISD::AssertZext; - if (Opcode) - ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue, - DAG.getValueType(VA.getValVT())); - ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); - } - - InVals.push_back(ArgValue); - } else { // VA.isRegLoc() - // sanity check - assert(VA.isMemLoc()); - - // The last argument is not a register - ArgRegEnd = 0; - - // The stack pointer offset is relative to the caller stack frame. - // Since the real stack size is unknown here, a negative SPOffset - // is used so there's a way to adjust these offsets when the stack - // size get known (on EliminateFrameIndex). A dummy SPOffset is - // used instead of a direct negative address (which is recorded to - // be used on emitPrologue) to avoid mis-calc of the first stack - // offset on PEI::calculateFrameObjectOffsets. - // Arguments are always 32-bit. - unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; - unsigned StackLoc = VA.getLocMemOffset() + 4; - int FI = MFI->CreateFixedObject(ArgSize, 0, true); - MBlazeFI->recordLoadArgsFI(FI, -StackLoc); - MBlazeFI->recordLiveIn(FI); - - // Create load nodes to retrieve arguments from the stack - SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); - InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, - MachinePointerInfo::getFixedStack(FI), - false, false, false, 0)); - } - } - - // To meet ABI, when VARARGS are passed on registers, the registers - // must have their values written to the caller stack frame. If the last - // argument was placed in the stack, there's no need to save any register. - if ((isVarArg) && ArgRegEnd) { - if (StackPtr.getNode() == 0) - StackPtr = DAG.getRegister(StackReg, getPointerTy()); - - // The last register argument that must be saved is MBlaze::R10 - const TargetRegisterClass *RC = &MBlaze::GPRRegClass; - - unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5); - unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1); - unsigned End = getMBlazeRegisterNumbering(MBlaze::R10); - unsigned StackLoc = Start - Begin + 1; - - for (; Start <= End; ++Start, ++StackLoc) { - unsigned Reg = getMBlazeRegisterFromNumbering(Start); - unsigned LiveReg = MF.addLiveIn(Reg, RC); - SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, LiveReg, MVT::i32); - - int FI = MFI->CreateFixedObject(4, 0, true); - MBlazeFI->recordStoreVarArgsFI(FI, -(StackLoc*4)); - SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy()); - OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, - MachinePointerInfo(), - false, false, 0)); - - // Record the frame index of the first variable argument - // which is a value necessary to VASTART. - if (!MBlazeFI->getVarArgsFrameIndex()) - MBlazeFI->setVarArgsFrameIndex(FI); - } - } - - // All stores are grouped in one node to allow the matching between - // the size of Ins and InVals. This only happens when on varg functions - if (!OutChains.empty()) { - OutChains.push_back(Chain); - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &OutChains[0], OutChains.size()); - } - - return Chain; -} - -//===----------------------------------------------------------------------===// -// Return Value Calling Convention Implementation -//===----------------------------------------------------------------------===// - -SDValue MBlazeTargetLowering:: -LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - SDLoc dl, SelectionDAG &DAG) const { - // CCValAssign - represent the assignment of - // the return value to a location - SmallVector<CCValAssign, 16> RVLocs; - - // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); - - // Analize return values. - CCInfo.AnalyzeReturn(Outs, RetCC_MBlaze); - - SDValue Flag; - SmallVector<SDValue, 4> RetOps(1, Chain); - - // If this function is using the interrupt_handler calling convention - // then use "rtid r14, 0" otherwise use "rtsd r15, 8" - unsigned Ret = (CallConv == CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet - : MBlazeISD::Ret; - unsigned Reg = (CallConv == CallingConv::MBLAZE_INTR) ? MBlaze::R14 - : MBlaze::R15; - RetOps.push_back(DAG.getRegister(Reg, MVT::i32)); - - - // Copy the result values into the output registers. - for (unsigned i = 0; i != RVLocs.size(); ++i) { - CCValAssign &VA = RVLocs[i]; - assert(VA.isRegLoc() && "Can only return in registers!"); - - Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), - OutVals[i], Flag); - - // guarantee that all emitted copies are - // stuck together, avoiding something bad - Flag = Chain.getValue(1); - RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); - } - - RetOps[0] = Chain; // Update chain. - - // Add the flag if we have it. - if (Flag.getNode()) - RetOps.push_back(Flag); - - return DAG.getNode(Ret, dl, MVT::Other, &RetOps[0], RetOps.size()); -} - -//===----------------------------------------------------------------------===// -// MBlaze Inline Assembly Support -//===----------------------------------------------------------------------===// - -/// getConstraintType - Given a constraint letter, return the type of -/// constraint it is for this target. -MBlazeTargetLowering::ConstraintType MBlazeTargetLowering:: -getConstraintType(const std::string &Constraint) const -{ - // MBlaze specific constrainy - // - // 'd' : An address register. Equivalent to r. - // 'y' : Equivalent to r; retained for - // backwards compatibility. - // 'f' : Floating Point registers. - if (Constraint.size() == 1) { - switch (Constraint[0]) { - default : break; - case 'd': - case 'y': - case 'f': - return C_RegisterClass; - } - } - return TargetLowering::getConstraintType(Constraint); -} - -/// Examine constraint type and operand type and determine a weight value. -/// This object must already have been set up with the operand type -/// and the current alternative constraint selected. -TargetLowering::ConstraintWeight -MBlazeTargetLowering::getSingleConstraintMatchWeight( - AsmOperandInfo &info, const char *constraint) const { - ConstraintWeight weight = CW_Invalid; - Value *CallOperandVal = info.CallOperandVal; - // If we don't have a value, we can't do a match, - // but allow it at the lowest weight. - if (CallOperandVal == NULL) - return CW_Default; - Type *type = CallOperandVal->getType(); - // Look at the constraint type. - switch (*constraint) { - default: - weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); - break; - case 'd': - case 'y': - if (type->isIntegerTy()) - weight = CW_Register; - break; - case 'f': - if (type->isFloatTy()) - weight = CW_Register; - break; - } - return weight; -} - -/// Given a register class constraint, like 'r', if this corresponds directly -/// to an LLVM register class, return a register of 0 and the register class -/// pointer. -std::pair<unsigned, const TargetRegisterClass*> MBlazeTargetLowering:: -getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { - if (Constraint.size() == 1) { - switch (Constraint[0]) { - case 'r': - return std::make_pair(0U, &MBlaze::GPRRegClass); - // TODO: These can't possibly be right, but match what was in - // getRegClassForInlineAsmConstraint. - case 'd': - case 'y': - case 'f': - if (VT == MVT::f32) - return std::make_pair(0U, &MBlaze::GPRRegClass); - } - } - return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); -} - -bool MBlazeTargetLowering:: -isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { - // The MBlaze target isn't yet aware of offsets. - return false; -} - -bool MBlazeTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { - return VT != MVT::f32; -} diff --git a/lib/Target/MBlaze/MBlazeISelLowering.h b/lib/Target/MBlaze/MBlazeISelLowering.h deleted file mode 100644 index f874113..0000000 --- a/lib/Target/MBlaze/MBlazeISelLowering.h +++ /dev/null @@ -1,179 +0,0 @@ -//===-- MBlazeISelLowering.h - MBlaze DAG Lowering Interface ----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the interfaces that MBlaze uses to lower LLVM code into a -// selection DAG. -// -//===----------------------------------------------------------------------===// - -#ifndef MBlazeISELLOWERING_H -#define MBlazeISELLOWERING_H - -#include "MBlaze.h" -#include "MBlazeSubtarget.h" -#include "llvm/CodeGen/SelectionDAG.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetLowering.h" - -namespace llvm { - namespace MBlazeCC { - enum CC { - FIRST = 0, - EQ, - NE, - GT, - LT, - GE, - LE - }; - - inline static CC getOppositeCondition(CC cc) { - switch (cc) { - default: llvm_unreachable("Unknown condition code"); - case EQ: return NE; - case NE: return EQ; - case GT: return LE; - case LT: return GE; - case GE: return LT; - case LE: return GE; - } - } - - inline static const char *MBlazeCCToString(CC cc) { - switch (cc) { - default: llvm_unreachable("Unknown condition code"); - case EQ: return "eq"; - case NE: return "ne"; - case GT: return "gt"; - case LT: return "lt"; - case GE: return "ge"; - case LE: return "le"; - } - } - } - - namespace MBlazeISD { - enum NodeType { - // Start the numbering from where ISD NodeType finishes. - FIRST_NUMBER = ISD::BUILTIN_OP_END, - - // Jump and link (call) - JmpLink, - - // Handle gp_rel (small data/bss sections) relocation. - GPRel, - - // Select CC Pseudo Instruction - Select_CC, - - // Wrap up multiple types of instructions - Wrap, - - // Integer Compare - ICmp, - - // Return from subroutine - Ret, - - // Return from interrupt - IRet - }; - } - - //===--------------------------------------------------------------------===// - // TargetLowering Implementation - //===--------------------------------------------------------------------===// - - class MBlazeTargetLowering : public TargetLowering { - public: - explicit MBlazeTargetLowering(MBlazeTargetMachine &TM); - - /// LowerOperation - Provide custom lowering hooks for some operations. - virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; - - /// getTargetNodeName - This method returns the name of a target specific - // DAG node. - virtual const char *getTargetNodeName(unsigned Opcode) const; - - /// getSetCCResultType - get the ISD::SETCC result ValueType - EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; - - private: - // Subtarget Info - const MBlazeSubtarget *Subtarget; - - - // Lower Operand helpers - SDValue LowerCallResult(SDValue Chain, SDValue InFlag, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const; - - // Lower Operand specifics - SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; - - virtual SDValue - LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const; - - virtual SDValue - LowerCall(TargetLowering::CallLoweringInfo &CLI, - SmallVectorImpl<SDValue> &InVals) const; - - virtual SDValue - LowerReturn(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - SDLoc dl, SelectionDAG &DAG) const; - - virtual MachineBasicBlock* - EmitCustomShift(MachineInstr *MI, MachineBasicBlock *MBB) const; - - virtual MachineBasicBlock* - EmitCustomSelect(MachineInstr *MI, MachineBasicBlock *MBB) const; - - virtual MachineBasicBlock* - EmitCustomAtomic(MachineInstr *MI, MachineBasicBlock *MBB) const; - - virtual MachineBasicBlock * - EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) const; - - // Inline asm support - ConstraintType getConstraintType(const std::string &Constraint) const; - - /// Examine constraint string and operand type and determine a weight value. - /// The operand object must already have been set up with the operand type. - ConstraintWeight getSingleConstraintMatchWeight( - AsmOperandInfo &info, const char *constraint) const; - - std::pair<unsigned, const TargetRegisterClass*> - getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; - - virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; - - /// isFPImmLegal - Returns true if the target can instruction select the - /// specified FP immediate natively. If false, the legalizer will - /// materialize the FP immediate as a load from a constant pool. - virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; - }; -} - -#endif // MBlazeISELLOWERING_H diff --git a/lib/Target/MBlaze/MBlazeInstrFPU.td b/lib/Target/MBlaze/MBlazeInstrFPU.td deleted file mode 100644 index 3f14593..0000000 --- a/lib/Target/MBlaze/MBlazeInstrFPU.td +++ /dev/null @@ -1,219 +0,0 @@ -//===-- MBlazeInstrFPU.td - MBlaze FPU Instruction defs ----*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze profiles and nodes -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze Operand, Complex Patterns and Transformations Definitions. -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Memory Access Instructions -//===----------------------------------------------------------------------===// -class LoadFM<bits<6> op, string instr_asm, PatFrag OpNode> : - TA<op, 0x000, (outs GPR:$dst), (ins memrr:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(set (f32 GPR:$dst), (OpNode xaddr:$addr))], IIC_MEMl>; - -class LoadFMI<bits<6> op, string instr_asm, PatFrag OpNode> : - TB<op, (outs GPR:$dst), (ins memri:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(set (f32 GPR:$dst), (OpNode iaddr:$addr))], IIC_MEMl>; - -class StoreFM<bits<6> op, string instr_asm, PatFrag OpNode> : - TA<op, 0x000, (outs), (ins GPR:$dst, memrr:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(OpNode (f32 GPR:$dst), xaddr:$addr)], IIC_MEMs>; - -class StoreFMI<bits<6> op, string instr_asm, PatFrag OpNode> : - TB<op, (outs), (ins GPR:$dst, memrr:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(OpNode (f32 GPR:$dst), iaddr:$addr)], IIC_MEMs>; - -class ArithF<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>; - -class CmpFN<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], itin>; - -class ArithFR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode, - InstrItinClass itin> : - TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $c, $b"), - [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>; - -class LogicFI<bits<6> op, string instr_asm> : - TB<op, (outs GPR:$dst), (ins GPR:$b, fimm:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -let rb=0 in { - class ArithF2<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b), - !strconcat(instr_asm, " $dst, $b"), - [], itin>; - - class ArithIF<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b), - !strconcat(instr_asm, " $dst, $b"), - [], itin>; - - class ArithFI<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b), - !strconcat(instr_asm, " $dst, $b"), - [], itin>; -} - -//===----------------------------------------------------------------------===// -// Pseudo instructions -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// FPU Arithmetic Instructions -//===----------------------------------------------------------------------===// -let Predicates=[HasFPU] in { - def FORI : LogicFI<0x28, "ori ">; - def FADD : ArithF<0x16, 0x000, "fadd ", fadd, IIC_FPU>; - def FRSUB : ArithFR<0x16, 0x080, "frsub ", fsub, IIC_FPU>; - def FMUL : ArithF<0x16, 0x100, "fmul ", fmul, IIC_FPU>; - def FDIV : ArithF<0x16, 0x180, "fdiv ", fdiv, IIC_FPUd>; -} - -let Predicates=[HasFPU], isCodeGenOnly=1 in { - def LWF : LoadFM<0x32, "lw ", load>; - def LWFI : LoadFMI<0x3A, "lwi ", load>; - - def SWF : StoreFM<0x36, "sw ", store>; - def SWFI : StoreFMI<0x3E, "swi ", store>; -} - -let Predicates=[HasFPU,HasSqrt] in { - def FLT : ArithIF<0x16, 0x280, "flt ", IIC_FPUf>; - def FINT : ArithFI<0x16, 0x300, "fint ", IIC_FPUi>; - def FSQRT : ArithF2<0x16, 0x380, "fsqrt ", IIC_FPUs>; -} - -let isAsCheapAsAMove = 1 in { - def FCMP_UN : CmpFN<0x16, 0x200, "fcmp.un", IIC_FPUc>; - def FCMP_LT : CmpFN<0x16, 0x210, "fcmp.lt", IIC_FPUc>; - def FCMP_EQ : CmpFN<0x16, 0x220, "fcmp.eq", IIC_FPUc>; - def FCMP_LE : CmpFN<0x16, 0x230, "fcmp.le", IIC_FPUc>; - def FCMP_GT : CmpFN<0x16, 0x240, "fcmp.gt", IIC_FPUc>; - def FCMP_NE : CmpFN<0x16, 0x250, "fcmp.ne", IIC_FPUc>; - def FCMP_GE : CmpFN<0x16, 0x260, "fcmp.ge", IIC_FPUc>; -} - - -let usesCustomInserter = 1 in { - def Select_FCC : MBlazePseudo<(outs GPR:$dst), - (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC), - "; SELECT_FCC PSEUDO!", - []>; -} - -// Floating point conversions -let Predicates=[HasFPU] in { - def : Pat<(sint_to_fp GPR:$V), (FLT GPR:$V)>; - def : Pat<(fp_to_sint GPR:$V), (FINT GPR:$V)>; - def : Pat<(fsqrt GPR:$V), (FSQRT GPR:$V)>; -} - -// SET_CC operations -let Predicates=[HasFPU] in { - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_EQ GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETNE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_EQ GPR:$L, GPR:$R), 1)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_EQ GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (XOR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_EQ GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_EQ GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_GT GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_LT GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_GE GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_LE GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_GT GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_LT GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_GE GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_LE GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_EQ GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUNE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_NE GPR:$L, GPR:$R), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_GT GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_LT GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_GE GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (OR (FCMP_UN GPR:$L, GPR:$R), - (FCMP_LE GPR:$L, GPR:$R)), 2)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETO), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_UN GPR:$L, GPR:$R), 1)>; - def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUO), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (FCMP_UN GPR:$L, GPR:$R), 2)>; -} - -// SELECT operations -def : Pat<(select (i32 GPR:$C), (f32 GPR:$T), (f32 GPR:$F)), - (Select_FCC GPR:$T, GPR:$F, GPR:$C, 2)>; - -//===----------------------------------------------------------------------===// -// Patterns for Floating Point Instructions -//===----------------------------------------------------------------------===// -def : Pat<(f32 fpimm:$imm), (FORI (i32 R0), fpimm:$imm)>; diff --git a/lib/Target/MBlaze/MBlazeInstrFSL.td b/lib/Target/MBlaze/MBlazeInstrFSL.td deleted file mode 100644 index 91b69de..0000000 --- a/lib/Target/MBlaze/MBlazeInstrFSL.td +++ /dev/null @@ -1,229 +0,0 @@ -//===-- MBlazeInstrFSL.td - MBlaze FSL Instruction defs ----*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// FSL Instruction Formats -//===----------------------------------------------------------------------===// -class FSLGet<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FRCX, (outs GPR:$dst), (ins fslimm:$b), - !strconcat(instr_asm, " $dst, $b"), - [(set GPR:$dst, (OpNode immZExt4:$b))],IIC_FSLg> -{ - bits<5> rd; - bits<4> fslno; - - let Inst{6-10} = rd; - let Inst{11-15} = 0x0; - let Inst{16} = 0x0; - let Inst{17-21} = flags; // NCTAE - let Inst{22-27} = 0x0; - let Inst{28-31} = fslno; -} - -class FSLGetD<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FRCR, (outs GPR:$dst), (ins GPR:$b), - !strconcat(instr_asm, " $dst, $b"), - [(set GPR:$dst, (OpNode GPR:$b))], IIC_FSLg> -{ - bits<5> rd; - bits<5> rb; - - let Inst{6-10} = rd; - let Inst{11-15} = 0x0; - let Inst{16-20} = rb; - let Inst{21} = 0x0; - let Inst{22-26} = flags; // NCTAE - let Inst{27-31} = 0x0; -} - -class FSLPut<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FCRCX, (outs), (ins GPR:$v, fslimm:$b), - !strconcat(instr_asm, " $v, $b"), - [(OpNode GPR:$v, immZExt4:$b)], IIC_FSLp> -{ - bits<5> ra; - bits<4> fslno; - - let Inst{6-10} = 0x0; - let Inst{11-15} = ra; - let Inst{16} = 0x1; - let Inst{17-20} = flags; // NCTA - let Inst{21-27} = 0x0; - let Inst{28-31} = fslno; -} - -class FSLPutD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FCRR, (outs), (ins GPR:$v, GPR:$b), - !strconcat(instr_asm, " $v, $b"), - [(OpNode GPR:$v, GPR:$b)], IIC_FSLp> -{ - bits<5> ra; - bits<5> rb; - - let Inst{6-10} = 0x0; - let Inst{11-15} = ra; - let Inst{16-20} = rb; - let Inst{21} = 0x1; - let Inst{22-25} = flags; // NCTA - let Inst{26-31} = 0x0; -} - -class FSLPutT<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FCX, (outs), (ins fslimm:$b), - !strconcat(instr_asm, " $b"), - [(OpNode immZExt4:$b)], IIC_FSLp> -{ - bits<4> fslno; - - let Inst{6-10} = 0x0; - let Inst{11-15} = 0x0; - let Inst{16} = 0x1; - let Inst{17-20} = flags; // NCTA - let Inst{21-27} = 0x0; - let Inst{28-31} = fslno; -} - -class FSLPutTD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> : - MBlazeInst<op, FCR, (outs), (ins GPR:$b), - !strconcat(instr_asm, " $b"), - [(OpNode GPR:$b)], IIC_FSLp> -{ - bits<5> rb; - - let Inst{6-10} = 0x0; - let Inst{11-15} = 0x0; - let Inst{16-20} = rb; - let Inst{21} = 0x1; - let Inst{22-25} = flags; // NCTA - let Inst{26-31} = 0x0; -} - -//===----------------------------------------------------------------------===// -// FSL Get Instructions -//===----------------------------------------------------------------------===// -def GET : FSLGet<0x1B, 0x00, "get ", int_mblaze_fsl_get>; -def AGET : FSLGet<0x1B, 0x02, "aget ", int_mblaze_fsl_aget>; -def CGET : FSLGet<0x1B, 0x08, "cget ", int_mblaze_fsl_cget>; -def CAGET : FSLGet<0x1B, 0x0A, "caget ", int_mblaze_fsl_caget>; -def EGET : FSLGet<0x1B, 0x01, "eget ", int_mblaze_fsl_eget>; -def EAGET : FSLGet<0x1B, 0x03, "eaget ", int_mblaze_fsl_eaget>; -def ECGET : FSLGet<0x1B, 0x09, "ecget ", int_mblaze_fsl_ecget>; -def ECAGET : FSLGet<0x1B, 0x0B, "ecaget ", int_mblaze_fsl_ecaget>; -def TGET : FSLGet<0x1B, 0x04, "tget ", int_mblaze_fsl_tget>; -def TAGET : FSLGet<0x1B, 0x06, "taget ", int_mblaze_fsl_taget>; -def TCGET : FSLGet<0x1B, 0x0C, "tcget ", int_mblaze_fsl_tcget>; -def TCAGET : FSLGet<0x1B, 0x0E, "tcaget ", int_mblaze_fsl_tcaget>; -def TEGET : FSLGet<0x1B, 0x05, "teget ", int_mblaze_fsl_teget>; -def TEAGET : FSLGet<0x1B, 0x07, "teaget ", int_mblaze_fsl_teaget>; -def TECGET : FSLGet<0x1B, 0x0D, "tecget ", int_mblaze_fsl_tecget>; -def TECAGET : FSLGet<0x1B, 0x0F, "tecaget ", int_mblaze_fsl_tecaget>; - -let Defs = [CARRY] in { - def NGET : FSLGet<0x1B, 0x10, "nget ", int_mblaze_fsl_nget>; - def NAGET : FSLGet<0x1B, 0x12, "naget ", int_mblaze_fsl_naget>; - def NCGET : FSLGet<0x1B, 0x18, "ncget ", int_mblaze_fsl_ncget>; - def NCAGET : FSLGet<0x1B, 0x1A, "ncaget ", int_mblaze_fsl_ncaget>; - def NEGET : FSLGet<0x1B, 0x11, "neget ", int_mblaze_fsl_neget>; - def NEAGET : FSLGet<0x1B, 0x13, "neaget ", int_mblaze_fsl_neaget>; - def NECGET : FSLGet<0x1B, 0x19, "necget ", int_mblaze_fsl_necget>; - def NECAGET : FSLGet<0x1B, 0x1B, "necaget ", int_mblaze_fsl_necaget>; - def TNGET : FSLGet<0x1B, 0x14, "tnget ", int_mblaze_fsl_tnget>; - def TNAGET : FSLGet<0x1B, 0x16, "tnaget ", int_mblaze_fsl_tnaget>; - def TNCGET : FSLGet<0x1B, 0x1C, "tncget ", int_mblaze_fsl_tncget>; - def TNCAGET : FSLGet<0x1B, 0x1E, "tncaget ", int_mblaze_fsl_tncaget>; - def TNEGET : FSLGet<0x1B, 0x15, "tneget ", int_mblaze_fsl_tneget>; - def TNEAGET : FSLGet<0x1B, 0x17, "tneaget ", int_mblaze_fsl_tneaget>; - def TNECGET : FSLGet<0x1B, 0x1D, "tnecget ", int_mblaze_fsl_tnecget>; - def TNECAGET : FSLGet<0x1B, 0x1F, "tnecaget ", int_mblaze_fsl_tnecaget>; -} - -//===----------------------------------------------------------------------===// -// FSL Dynamic Get Instructions -//===----------------------------------------------------------------------===// -def GETD : FSLGetD<0x13, 0x00, "getd ", int_mblaze_fsl_get>; -def AGETD : FSLGetD<0x13, 0x02, "agetd ", int_mblaze_fsl_aget>; -def CGETD : FSLGetD<0x13, 0x08, "cgetd ", int_mblaze_fsl_cget>; -def CAGETD : FSLGetD<0x13, 0x0A, "cagetd ", int_mblaze_fsl_caget>; -def EGETD : FSLGetD<0x13, 0x01, "egetd ", int_mblaze_fsl_eget>; -def EAGETD : FSLGetD<0x13, 0x03, "eagetd ", int_mblaze_fsl_eaget>; -def ECGETD : FSLGetD<0x13, 0x09, "ecgetd ", int_mblaze_fsl_ecget>; -def ECAGETD : FSLGetD<0x13, 0x0B, "ecagetd ", int_mblaze_fsl_ecaget>; -def TGETD : FSLGetD<0x13, 0x04, "tgetd ", int_mblaze_fsl_tget>; -def TAGETD : FSLGetD<0x13, 0x06, "tagetd ", int_mblaze_fsl_taget>; -def TCGETD : FSLGetD<0x13, 0x0C, "tcgetd ", int_mblaze_fsl_tcget>; -def TCAGETD : FSLGetD<0x13, 0x0E, "tcagetd ", int_mblaze_fsl_tcaget>; -def TEGETD : FSLGetD<0x13, 0x05, "tegetd ", int_mblaze_fsl_teget>; -def TEAGETD : FSLGetD<0x13, 0x07, "teagetd ", int_mblaze_fsl_teaget>; -def TECGETD : FSLGetD<0x13, 0x0D, "tecgetd ", int_mblaze_fsl_tecget>; -def TECAGETD : FSLGetD<0x13, 0x0F, "tecagetd ", int_mblaze_fsl_tecaget>; - -let Defs = [CARRY] in { - def NGETD : FSLGetD<0x13, 0x10, "ngetd ", int_mblaze_fsl_nget>; - def NAGETD : FSLGetD<0x13, 0x12, "nagetd ", int_mblaze_fsl_naget>; - def NCGETD : FSLGetD<0x13, 0x18, "ncgetd ", int_mblaze_fsl_ncget>; - def NCAGETD : FSLGetD<0x13, 0x1A, "ncagetd ", int_mblaze_fsl_ncaget>; - def NEGETD : FSLGetD<0x13, 0x11, "negetd ", int_mblaze_fsl_neget>; - def NEAGETD : FSLGetD<0x13, 0x13, "neagetd ", int_mblaze_fsl_neaget>; - def NECGETD : FSLGetD<0x13, 0x19, "necgetd ", int_mblaze_fsl_necget>; - def NECAGETD : FSLGetD<0x13, 0x1B, "necagetd ", int_mblaze_fsl_necaget>; - def TNGETD : FSLGetD<0x13, 0x14, "tngetd ", int_mblaze_fsl_tnget>; - def TNAGETD : FSLGetD<0x13, 0x16, "tnagetd ", int_mblaze_fsl_tnaget>; - def TNCGETD : FSLGetD<0x13, 0x1C, "tncgetd ", int_mblaze_fsl_tncget>; - def TNCAGETD : FSLGetD<0x13, 0x1E, "tncagetd ", int_mblaze_fsl_tncaget>; - def TNEGETD : FSLGetD<0x13, 0x15, "tnegetd ", int_mblaze_fsl_tneget>; - def TNEAGETD : FSLGetD<0x13, 0x17, "tneagetd ", int_mblaze_fsl_tneaget>; - def TNECGETD : FSLGetD<0x13, 0x1D, "tnecgetd ", int_mblaze_fsl_tnecget>; - def TNECAGETD : FSLGetD<0x13, 0x1F, "tnecagetd", int_mblaze_fsl_tnecaget>; -} - -//===----------------------------------------------------------------------===// -// FSL Put Instructions -//===----------------------------------------------------------------------===// -def PUT : FSLPut<0x1B, 0x0, "put ", int_mblaze_fsl_put>; -def APUT : FSLPut<0x1B, 0x1, "aput ", int_mblaze_fsl_aput>; -def CPUT : FSLPut<0x1B, 0x4, "cput ", int_mblaze_fsl_cput>; -def CAPUT : FSLPut<0x1B, 0x5, "caput ", int_mblaze_fsl_caput>; -def TPUT : FSLPutT<0x1B, 0x2, "tput ", int_mblaze_fsl_tput>; -def TAPUT : FSLPutT<0x1B, 0x3, "taput ", int_mblaze_fsl_taput>; -def TCPUT : FSLPutT<0x1B, 0x6, "tcput ", int_mblaze_fsl_tcput>; -def TCAPUT : FSLPutT<0x1B, 0x7, "tcaput ", int_mblaze_fsl_tcaput>; - -let Defs = [CARRY] in { - def NPUT : FSLPut<0x1B, 0x8, "nput ", int_mblaze_fsl_nput>; - def NAPUT : FSLPut<0x1B, 0x9, "naput ", int_mblaze_fsl_naput>; - def NCPUT : FSLPut<0x1B, 0xC, "ncput ", int_mblaze_fsl_ncput>; - def NCAPUT : FSLPut<0x1B, 0xD, "ncaput ", int_mblaze_fsl_ncaput>; - def TNPUT : FSLPutT<0x1B, 0xA, "tnput ", int_mblaze_fsl_tnput>; - def TNAPUT : FSLPutT<0x1B, 0xB, "tnaput ", int_mblaze_fsl_tnaput>; - def TNCPUT : FSLPutT<0x1B, 0xE, "tncput ", int_mblaze_fsl_tncput>; - def TNCAPUT : FSLPutT<0x1B, 0xF, "tncaput ", int_mblaze_fsl_tncaput>; -} - -//===----------------------------------------------------------------------===// -// FSL Dynamic Put Instructions -//===----------------------------------------------------------------------===// -def PUTD : FSLPutD<0x13, 0x0, "putd ", int_mblaze_fsl_put>; -def APUTD : FSLPutD<0x13, 0x1, "aputd ", int_mblaze_fsl_aput>; -def CPUTD : FSLPutD<0x13, 0x4, "cputd ", int_mblaze_fsl_cput>; -def CAPUTD : FSLPutD<0x13, 0x5, "caputd ", int_mblaze_fsl_caput>; -def TPUTD : FSLPutTD<0x13, 0x2, "tputd ", int_mblaze_fsl_tput>; -def TAPUTD : FSLPutTD<0x13, 0x3, "taputd ", int_mblaze_fsl_taput>; -def TCPUTD : FSLPutTD<0x13, 0x6, "tcputd ", int_mblaze_fsl_tcput>; -def TCAPUTD : FSLPutTD<0x13, 0x7, "tcaputd ", int_mblaze_fsl_tcaput>; - -let Defs = [CARRY] in { - def NPUTD : FSLPutD<0x13, 0x8, "nputd ", int_mblaze_fsl_nput>; - def NAPUTD : FSLPutD<0x13, 0x9, "naputd ", int_mblaze_fsl_naput>; - def NCPUTD : FSLPutD<0x13, 0xC, "ncputd ", int_mblaze_fsl_ncput>; - def NCAPUTD : FSLPutD<0x13, 0xD, "ncaputd ", int_mblaze_fsl_ncaput>; - def TNPUTD : FSLPutTD<0x13, 0xA, "tnputd ", int_mblaze_fsl_tnput>; - def TNAPUTD : FSLPutTD<0x13, 0xB, "tnaputd ", int_mblaze_fsl_tnaput>; - def TNCPUTD : FSLPutTD<0x13, 0xE, "tncputd ", int_mblaze_fsl_tncput>; - def TNCAPUTD : FSLPutTD<0x13, 0xF, "tncaputd ", int_mblaze_fsl_tncaput>; -} diff --git a/lib/Target/MBlaze/MBlazeInstrFormats.td b/lib/Target/MBlaze/MBlazeInstrFormats.td deleted file mode 100644 index e40432a..0000000 --- a/lib/Target/MBlaze/MBlazeInstrFormats.td +++ /dev/null @@ -1,228 +0,0 @@ -//===-- MBlazeInstrFormats.td - MB Instruction defs --------*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -// Format specifies the encoding used by the instruction. This is part of the -// ad-hoc solution used to emit machine instruction encodings by our machine -// code emitter. -class Format<bits<6> val> { - bits<6> Value = val; -} - -def FPseudo : Format<0>; -def FRRR : Format<1>; // ADD, OR, etc. -def FRRI : Format<2>; // ADDI, ORI, etc. -def FCRR : Format<3>; // PUTD, WDC, WIC, BEQ, BNE, BGE, etc. -def FCRI : Format<4>; // RTID, RTED, RTSD, BEQI, BNEI, BGEI, etc. -def FRCR : Format<5>; // BRLD, BRALD, GETD -def FRCI : Format<6>; // BRLID, BRALID, MSRCLR, MSRSET -def FCCR : Format<7>; // BR, BRA, BRD, etc. -def FCCI : Format<8>; // IMM, BRI, BRAI, BRID, etc. -def FRRCI : Format<9>; // BSRLI, BSRAI, BSLLI -def FRRC : Format<10>; // SEXT8, SEXT16, SRA, SRC, SRL, FLT, FINT, FSQRT -def FRCX : Format<11>; // GET -def FRCS : Format<12>; // MFS -def FCRCS : Format<13>; // MTS -def FCRCX : Format<14>; // PUT -def FCX : Format<15>; // TPUT -def FCR : Format<16>; // TPUTD -def FRIR : Format<17>; // RSUBI -def FRRRR : Format<18>; // RSUB, FRSUB -def FRI : Format<19>; // RSUB, FRSUB -def FC : Format<20>; // NOP -def FRR : Format<21>; // CLZ - -//===----------------------------------------------------------------------===// -// Describe MBlaze instructions format -// -// CPU INSTRUCTION FORMATS -// -// opcode - operation code. -// rd - dst reg. -// ra - first src. reg. -// rb - second src. reg. -// imm16 - 16-bit immediate value. -// -//===----------------------------------------------------------------------===// - -// Generic MBlaze Format -class MBlazeInst<bits<6> op, Format form, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : Instruction { - let Namespace = "MBlaze"; - field bits<32> Inst; - - bits<6> opcode = op; - Format Form = form; - bits<6> FormBits = Form.Value; - - // Top 6 bits are the 'opcode' field - let Inst{0-5} = opcode; - - // If the instruction is marked as a pseudo, set isCodeGenOnly so that the - // assembler and disassmbler ignore it. - let isCodeGenOnly = !eq(!cast<string>(form), "FPseudo"); - - dag OutOperandList = outs; - dag InOperandList = ins; - - let AsmString = asmstr; - let Pattern = pattern; - let Itinerary = itin; - - // TSFlags layout should be kept in sync with MBlazeInstrInfo.h. - let TSFlags{5-0} = FormBits; -} - -//===----------------------------------------------------------------------===// -// Pseudo instruction class -//===----------------------------------------------------------------------===// -class MBlazePseudo<dag outs, dag ins, string asmstr, list<dag> pattern>: - MBlazeInst<0x0, FPseudo, outs, ins, asmstr, pattern, IIC_Pseudo>; - -//===----------------------------------------------------------------------===// -// Type A instruction class in MBlaze : <|opcode|rd|ra|rb|flags|> -//===----------------------------------------------------------------------===// - -class TA<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op,FRRR,outs, ins, asmstr, pattern, itin> -{ - bits<5> rd; - bits<5> ra; - bits<5> rb; - - let Inst{6-10} = rd; - let Inst{11-15} = ra; - let Inst{16-20} = rb; - let Inst{21-31} = flags; -} - -//===----------------------------------------------------------------------===// -// Type B instruction class in MBlaze : <|opcode|rd|ra|immediate|> -//===----------------------------------------------------------------------===// - -class TB<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, - InstrItinClass itin> : - MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin> -{ - bits<5> rd; - bits<5> ra; - bits<16> imm16; - - let Inst{6-10} = rd; - let Inst{11-15} = ra; - let Inst{16-31} = imm16; -} - -//===----------------------------------------------------------------------===// -// Type A instruction class in MBlaze but with the operands reversed -// in the LLVM DAG : <|opcode|rd|ra|rb|flags|> -//===----------------------------------------------------------------------===// - -class TAR<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - TA<op, flags, outs, ins, asmstr, pattern, itin> -{ - bits<5> rrd; - bits<5> rrb; - bits<5> rra; - - let Form = FRRRR; - - let rd = rrd; - let ra = rra; - let rb = rrb; -} - -//===----------------------------------------------------------------------===// -// Type B instruction class in MBlaze but with the operands reversed in -// the LLVM DAG : <|opcode|rd|ra|immediate|> -//===----------------------------------------------------------------------===// -class TBR<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, - InstrItinClass itin> : - TB<op, outs, ins, asmstr, pattern, itin> { - bits<5> rrd; - bits<16> rimm16; - bits<5> rra; - - let Form = FRIR; - - let rd = rrd; - let ra = rra; - let imm16 = rimm16; -} - -//===----------------------------------------------------------------------===// -// Shift immediate instruction class in MBlaze : <|opcode|rd|ra|immediate|> -//===----------------------------------------------------------------------===// -class SHT<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin> { - bits<5> rd; - bits<5> ra; - bits<5> imm5; - - let Inst{6-10} = rd; - let Inst{11-15} = ra; - let Inst{16-20} = 0x0; - let Inst{21-22} = flags; - let Inst{23-26} = 0x0; - let Inst{27-31} = imm5; -} - -//===----------------------------------------------------------------------===// -// Special instruction class in MBlaze : <|opcode|rd|imm14|> -//===----------------------------------------------------------------------===// -class SPC<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> { - bits<5> rd; - bits<14> imm14; - - let Inst{6-10} = rd; - let Inst{11-15} = 0x0; - let Inst{16-17} = flags; - let Inst{18-31} = imm14; -} - -//===----------------------------------------------------------------------===// -// MSR instruction class in MBlaze : <|opcode|rd|imm15|> -//===----------------------------------------------------------------------===// -class MSR<bits<6> op, bits<6> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> { - bits<5> rd; - bits<15> imm15; - - let Inst{6-10} = rd; - let Inst{11-16} = flags; - let Inst{17-31} = imm15; -} - -//===----------------------------------------------------------------------===// -// TCLZ instruction class in MBlaze : <|opcode|rd|imm15|> -//===----------------------------------------------------------------------===// -class TCLZ<bits<6> op, bits<16> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op, FRR, outs, ins, asmstr, pattern, itin> { - bits<5> rd; - bits<5> ra; - - let Inst{6-10} = rd; - let Inst{11-15} = ra; - let Inst{16-31} = flags; -} - -//===----------------------------------------------------------------------===// -// MBAR instruction class in MBlaze : <|opcode|rd|imm15|> -//===----------------------------------------------------------------------===// -class MBAR<bits<6> op, bits<26> flags, dag outs, dag ins, string asmstr, - list<dag> pattern, InstrItinClass itin> : - MBlazeInst<op, FC, outs, ins, asmstr, pattern, itin> { - let Inst{6-31} = flags; -} diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/lib/Target/MBlaze/MBlazeInstrInfo.cpp deleted file mode 100644 index ab069e6..0000000 --- a/lib/Target/MBlaze/MBlazeInstrInfo.cpp +++ /dev/null @@ -1,297 +0,0 @@ -//===-- MBlazeInstrInfo.cpp - MBlaze Instruction Information --------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of the TargetInstrInfo class. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeInstrInfo.h" -#include "MBlazeMachineFunction.h" -#include "MBlazeTargetMachine.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/ScoreboardHazardRecognizer.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/TargetRegistry.h" - -#define GET_INSTRINFO_CTOR -#include "MBlazeGenInstrInfo.inc" - -using namespace llvm; - -MBlazeInstrInfo::MBlazeInstrInfo(MBlazeTargetMachine &tm) - : MBlazeGenInstrInfo(MBlaze::ADJCALLSTACKDOWN, MBlaze::ADJCALLSTACKUP), - TM(tm), RI(*TM.getSubtargetImpl()) {} - -static bool isZeroImm(const MachineOperand &op) { - return op.isImm() && op.getImm() == 0; -} - -/// isLoadFromStackSlot - If the specified machine instruction is a direct -/// load from a stack slot, return the virtual or physical register number of -/// the destination along with the FrameIndex of the loaded stack slot. If -/// not, return 0. This predicate must return 0 if the instruction has -/// any side effects other than loading from the stack slot. -unsigned MBlazeInstrInfo:: -isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const { - if (MI->getOpcode() == MBlaze::LWI) { - if ((MI->getOperand(1).isFI()) && // is a stack slot - (MI->getOperand(2).isImm()) && // the imm is zero - (isZeroImm(MI->getOperand(2)))) { - FrameIndex = MI->getOperand(1).getIndex(); - return MI->getOperand(0).getReg(); - } - } - - return 0; -} - -/// isStoreToStackSlot - If the specified machine instruction is a direct -/// store to a stack slot, return the virtual or physical register number of -/// the source reg along with the FrameIndex of the loaded stack slot. If -/// not, return 0. This predicate must return 0 if the instruction has -/// any side effects other than storing to the stack slot. -unsigned MBlazeInstrInfo:: -isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const { - if (MI->getOpcode() == MBlaze::SWI) { - if ((MI->getOperand(1).isFI()) && // is a stack slot - (MI->getOperand(2).isImm()) && // the imm is zero - (isZeroImm(MI->getOperand(2)))) { - FrameIndex = MI->getOperand(1).getIndex(); - return MI->getOperand(0).getReg(); - } - } - return 0; -} - -/// insertNoop - If data hazard condition is found insert the target nop -/// instruction. -void MBlazeInstrInfo:: -insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const { - DebugLoc DL; - BuildMI(MBB, MI, DL, get(MBlaze::NOP)); -} - -void MBlazeInstrInfo:: -copyPhysReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator I, DebugLoc DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const { - llvm::BuildMI(MBB, I, DL, get(MBlaze::ADDK), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)).addReg(MBlaze::R0); -} - -void MBlazeInstrInfo:: -storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned SrcReg, bool isKill, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { - DebugLoc DL; - BuildMI(MBB, I, DL, get(MBlaze::SWI)).addReg(SrcReg,getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI); -} - -void MBlazeInstrInfo:: -loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { - DebugLoc DL; - BuildMI(MBB, I, DL, get(MBlaze::LWI), DestReg) - .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI); -} - -//===----------------------------------------------------------------------===// -// Branch Analysis -//===----------------------------------------------------------------------===// -bool MBlazeInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, - MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl<MachineOperand> &Cond, - bool AllowModify) const { - // If the block has no terminators, it just falls into the block after it. - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) - return false; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return false; - --I; - } - if (!isUnpredicatedTerminator(I)) - return false; - - // Get the last instruction in the block. - MachineInstr *LastInst = I; - - // If there is only one terminator instruction, process it. - unsigned LastOpc = LastInst->getOpcode(); - if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { - if (MBlaze::isUncondBranchOpcode(LastOpc)) { - TBB = LastInst->getOperand(0).getMBB(); - return false; - } - if (MBlaze::isCondBranchOpcode(LastOpc)) { - // Block ends with fall-through condbranch. - TBB = LastInst->getOperand(1).getMBB(); - Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); - Cond.push_back(LastInst->getOperand(0)); - return false; - } - // Otherwise, don't know what this is. - return true; - } - - // Get the instruction before it if it's a terminator. - MachineInstr *SecondLastInst = I; - - // If there are three terminators, we don't know what sort of block this is. - if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) - return true; - - // If the block ends with something like BEQID then BRID, handle it. - if (MBlaze::isCondBranchOpcode(SecondLastInst->getOpcode()) && - MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) { - TBB = SecondLastInst->getOperand(1).getMBB(); - Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode())); - Cond.push_back(SecondLastInst->getOperand(0)); - FBB = LastInst->getOperand(0).getMBB(); - return false; - } - - // If the block ends with two unconditional branches, handle it. - // The second one is not executed, so remove it. - if (MBlaze::isUncondBranchOpcode(SecondLastInst->getOpcode()) && - MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) { - TBB = SecondLastInst->getOperand(0).getMBB(); - I = LastInst; - if (AllowModify) - I->eraseFromParent(); - return false; - } - - // Otherwise, can't handle this. - return true; -} - -unsigned MBlazeInstrInfo:: -InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const SmallVectorImpl<MachineOperand> &Cond, - DebugLoc DL) const { - // Shouldn't be a fall through. - assert(TBB && "InsertBranch must not be told to insert a fallthrough"); - assert((Cond.size() == 2 || Cond.size() == 0) && - "MBlaze branch conditions have two components!"); - - unsigned Opc = MBlaze::BRID; - if (!Cond.empty()) - Opc = (unsigned)Cond[0].getImm(); - - if (FBB == 0) { - if (Cond.empty()) // Unconditional branch - BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); - else // Conditional branch - BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB); - return 1; - } - - BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB); - BuildMI(&MBB, DL, get(MBlaze::BRID)).addMBB(FBB); - return 2; -} - -unsigned MBlazeInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) return 0; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return 0; - --I; - } - - if (!MBlaze::isUncondBranchOpcode(I->getOpcode()) && - !MBlaze::isCondBranchOpcode(I->getOpcode())) - return 0; - - // Remove the branch. - I->eraseFromParent(); - - I = MBB.end(); - - if (I == MBB.begin()) return 1; - --I; - if (!MBlaze::isCondBranchOpcode(I->getOpcode())) - return 1; - - // Remove the branch. - I->eraseFromParent(); - return 2; -} - -bool MBlazeInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> - &Cond) const { - assert(Cond.size() == 2 && "Invalid MBlaze branch opcode!"); - switch (Cond[0].getImm()) { - default: return true; - case MBlaze::BEQ: Cond[0].setImm(MBlaze::BNE); return false; - case MBlaze::BNE: Cond[0].setImm(MBlaze::BEQ); return false; - case MBlaze::BGT: Cond[0].setImm(MBlaze::BLE); return false; - case MBlaze::BGE: Cond[0].setImm(MBlaze::BLT); return false; - case MBlaze::BLT: Cond[0].setImm(MBlaze::BGE); return false; - case MBlaze::BLE: Cond[0].setImm(MBlaze::BGT); return false; - case MBlaze::BEQI: Cond[0].setImm(MBlaze::BNEI); return false; - case MBlaze::BNEI: Cond[0].setImm(MBlaze::BEQI); return false; - case MBlaze::BGTI: Cond[0].setImm(MBlaze::BLEI); return false; - case MBlaze::BGEI: Cond[0].setImm(MBlaze::BLTI); return false; - case MBlaze::BLTI: Cond[0].setImm(MBlaze::BGEI); return false; - case MBlaze::BLEI: Cond[0].setImm(MBlaze::BGTI); return false; - case MBlaze::BEQD: Cond[0].setImm(MBlaze::BNED); return false; - case MBlaze::BNED: Cond[0].setImm(MBlaze::BEQD); return false; - case MBlaze::BGTD: Cond[0].setImm(MBlaze::BLED); return false; - case MBlaze::BGED: Cond[0].setImm(MBlaze::BLTD); return false; - case MBlaze::BLTD: Cond[0].setImm(MBlaze::BGED); return false; - case MBlaze::BLED: Cond[0].setImm(MBlaze::BGTD); return false; - case MBlaze::BEQID: Cond[0].setImm(MBlaze::BNEID); return false; - case MBlaze::BNEID: Cond[0].setImm(MBlaze::BEQID); return false; - case MBlaze::BGTID: Cond[0].setImm(MBlaze::BLEID); return false; - case MBlaze::BGEID: Cond[0].setImm(MBlaze::BLTID); return false; - case MBlaze::BLTID: Cond[0].setImm(MBlaze::BGEID); return false; - case MBlaze::BLEID: Cond[0].setImm(MBlaze::BGTID); return false; - } -} - -/// getGlobalBaseReg - Return a virtual register initialized with the -/// the global base register value. Output instructions required to -/// initialize the register in the function entry block, if necessary. -/// -unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { - MBlazeFunctionInfo *MBlazeFI = MF->getInfo<MBlazeFunctionInfo>(); - unsigned GlobalBaseReg = MBlazeFI->getGlobalBaseReg(); - if (GlobalBaseReg != 0) - return GlobalBaseReg; - - // Insert the set of GlobalBaseReg into the first MBB of the function - MachineBasicBlock &FirstMBB = MF->front(); - MachineBasicBlock::iterator MBBI = FirstMBB.begin(); - MachineRegisterInfo &RegInfo = MF->getRegInfo(); - const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); - - GlobalBaseReg = RegInfo.createVirtualRegister(&MBlaze::GPRRegClass); - BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), - GlobalBaseReg).addReg(MBlaze::R20); - RegInfo.addLiveIn(MBlaze::R20); - - MBlazeFI->setGlobalBaseReg(GlobalBaseReg); - return GlobalBaseReg; -} diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.h b/lib/Target/MBlaze/MBlazeInstrInfo.h deleted file mode 100644 index 5252147..0000000 --- a/lib/Target/MBlaze/MBlazeInstrInfo.h +++ /dev/null @@ -1,240 +0,0 @@ -//===-- MBlazeInstrInfo.h - MBlaze Instruction Information ------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of the TargetInstrInfo class. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZEINSTRUCTIONINFO_H -#define MBLAZEINSTRUCTIONINFO_H - -#include "MBlaze.h" -#include "MBlazeRegisterInfo.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetInstrInfo.h" - -#define GET_INSTRINFO_HEADER -#include "MBlazeGenInstrInfo.inc" - -namespace llvm { - -namespace MBlaze { - - // MBlaze Branch Codes - enum FPBranchCode { - BRANCH_F, - BRANCH_T, - BRANCH_FL, - BRANCH_TL, - BRANCH_INVALID - }; - - // MBlaze Condition Codes - enum CondCode { - // To be used with float branch True - FCOND_F, - FCOND_UN, - FCOND_EQ, - FCOND_UEQ, - FCOND_OLT, - FCOND_ULT, - FCOND_OLE, - FCOND_ULE, - FCOND_SF, - FCOND_NGLE, - FCOND_SEQ, - FCOND_NGL, - FCOND_LT, - FCOND_NGE, - FCOND_LE, - FCOND_NGT, - - // To be used with float branch False - // This conditions have the same mnemonic as the - // above ones, but are used with a branch False; - FCOND_T, - FCOND_OR, - FCOND_NEQ, - FCOND_OGL, - FCOND_UGE, - FCOND_OGE, - FCOND_UGT, - FCOND_OGT, - FCOND_ST, - FCOND_GLE, - FCOND_SNE, - FCOND_GL, - FCOND_NLT, - FCOND_GE, - FCOND_NLE, - FCOND_GT, - - // Only integer conditions - COND_EQ, - COND_GT, - COND_GE, - COND_LT, - COND_LE, - COND_NE, - COND_INVALID - }; - - // Turn condition code into conditional branch opcode. - inline static unsigned GetCondBranchFromCond(CondCode CC) { - switch (CC) { - default: llvm_unreachable("Unknown condition code"); - case COND_EQ: return MBlaze::BEQID; - case COND_NE: return MBlaze::BNEID; - case COND_GT: return MBlaze::BGTID; - case COND_GE: return MBlaze::BGEID; - case COND_LT: return MBlaze::BLTID; - case COND_LE: return MBlaze::BLEID; - } - } - - /// GetOppositeBranchCondition - Return the inverse of the specified cond, - /// e.g. turning COND_E to COND_NE. - // CondCode GetOppositeBranchCondition(MBlaze::CondCode CC); - - /// MBlazeCCToString - Map each FP condition code to its string - inline static const char *MBlazeFCCToString(MBlaze::CondCode CC) { - switch (CC) { - default: llvm_unreachable("Unknown condition code"); - case FCOND_F: - case FCOND_T: return "f"; - case FCOND_UN: - case FCOND_OR: return "un"; - case FCOND_EQ: - case FCOND_NEQ: return "eq"; - case FCOND_UEQ: - case FCOND_OGL: return "ueq"; - case FCOND_OLT: - case FCOND_UGE: return "olt"; - case FCOND_ULT: - case FCOND_OGE: return "ult"; - case FCOND_OLE: - case FCOND_UGT: return "ole"; - case FCOND_ULE: - case FCOND_OGT: return "ule"; - case FCOND_SF: - case FCOND_ST: return "sf"; - case FCOND_NGLE: - case FCOND_GLE: return "ngle"; - case FCOND_SEQ: - case FCOND_SNE: return "seq"; - case FCOND_NGL: - case FCOND_GL: return "ngl"; - case FCOND_LT: - case FCOND_NLT: return "lt"; - case FCOND_NGE: - case FCOND_GE: return "ge"; - case FCOND_LE: - case FCOND_NLE: return "nle"; - case FCOND_NGT: - case FCOND_GT: return "gt"; - } - } - - inline static bool isUncondBranchOpcode(int Opc) { - switch (Opc) { - default: return false; - case MBlaze::BRI: - case MBlaze::BRAI: - case MBlaze::BRID: - case MBlaze::BRAID: - return true; - } - } - - inline static bool isCondBranchOpcode(int Opc) { - switch (Opc) { - default: return false; - case MBlaze::BEQI: case MBlaze::BEQID: - case MBlaze::BNEI: case MBlaze::BNEID: - case MBlaze::BGTI: case MBlaze::BGTID: - case MBlaze::BGEI: case MBlaze::BGEID: - case MBlaze::BLTI: case MBlaze::BLTID: - case MBlaze::BLEI: case MBlaze::BLEID: - return true; - } - } -} - -class MBlazeInstrInfo : public MBlazeGenInstrInfo { - MBlazeTargetMachine &TM; - const MBlazeRegisterInfo RI; -public: - explicit MBlazeInstrInfo(MBlazeTargetMachine &TM); - - /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As - /// such, whenever a client has an instance of instruction info, it should - /// always be able to get register info as well (through this method). - /// - virtual const MBlazeRegisterInfo &getRegisterInfo() const { return RI; } - - /// isLoadFromStackSlot - If the specified machine instruction is a direct - /// load from a stack slot, return the virtual or physical register number of - /// the destination along with the FrameIndex of the loaded stack slot. If - /// not, return 0. This predicate must return 0 if the instruction has - /// any side effects other than loading from the stack slot. - virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, - int &FrameIndex) const; - - /// isStoreToStackSlot - If the specified machine instruction is a direct - /// store to a stack slot, return the virtual or physical register number of - /// the source reg along with the FrameIndex of the loaded stack slot. If - /// not, return 0. This predicate must return 0 if the instruction has - /// any side effects other than storing to the stack slot. - virtual unsigned isStoreToStackSlot(const MachineInstr *MI, - int &FrameIndex) const; - - /// Branch Analysis - virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl<MachineOperand> &Cond, - bool AllowModify) const; - virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const SmallVectorImpl<MachineOperand> &Cond, - DebugLoc DL) const; - virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; - - virtual bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) - const; - - virtual void copyPhysReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator I, DebugLoc DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const; - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - /// Insert nop instruction when hazard condition is found - virtual void insertNoop(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI) const; - - /// getGlobalBaseReg - Return a virtual register initialized with the - /// the global base register value. Output instructions required to - /// initialize the register in the function entry block, if necessary. - /// - unsigned getGlobalBaseReg(MachineFunction *MF) const; -}; - -} - -#endif diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.td b/lib/Target/MBlaze/MBlazeInstrInfo.td deleted file mode 100644 index d27cd39..0000000 --- a/lib/Target/MBlaze/MBlazeInstrInfo.td +++ /dev/null @@ -1,1051 +0,0 @@ -//===-- MBlazeInstrInfo.td - MBlaze Instruction defs -------*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Instruction format superclass -//===----------------------------------------------------------------------===// -include "MBlazeInstrFormats.td" - -//===----------------------------------------------------------------------===// -// MBlaze type profiles -//===----------------------------------------------------------------------===// - -// def SDTMBlazeSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>]>; -def SDT_MBlazeRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>; -def SDT_MBlazeIRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>; -def SDT_MBlazeJmpLink : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>; -def SDT_MBCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>; -def SDT_MBCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; - -//===----------------------------------------------------------------------===// -// MBlaze specific nodes -//===----------------------------------------------------------------------===// - -def MBlazeRet : SDNode<"MBlazeISD::Ret", SDT_MBlazeRet, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; -def MBlazeIRet : SDNode<"MBlazeISD::IRet", SDT_MBlazeIRet, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; - -def MBlazeJmpLink : SDNode<"MBlazeISD::JmpLink",SDT_MBlazeJmpLink, - [SDNPHasChain,SDNPOptInGlue,SDNPOutGlue, - SDNPVariadic]>; - -def MBWrapper : SDNode<"MBlazeISD::Wrap", SDTIntUnaryOp>; - -def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MBCallSeqStart, - [SDNPHasChain, SDNPOutGlue]>; - -def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MBCallSeqEnd, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; - -//===----------------------------------------------------------------------===// -// MBlaze Instruction Predicate Definitions. -//===----------------------------------------------------------------------===// -// def HasPipe3 : Predicate<"Subtarget.hasPipe3()">; -def HasBarrel : Predicate<"Subtarget.hasBarrel()">; -// def NoBarrel : Predicate<"!Subtarget.hasBarrel()">; -def HasDiv : Predicate<"Subtarget.hasDiv()">; -def HasMul : Predicate<"Subtarget.hasMul()">; -// def HasFSL : Predicate<"Subtarget.hasFSL()">; -// def HasEFSL : Predicate<"Subtarget.hasEFSL()">; -// def HasMSRSet : Predicate<"Subtarget.hasMSRSet()">; -// def HasException : Predicate<"Subtarget.hasException()">; -def HasPatCmp : Predicate<"Subtarget.hasPatCmp()">; -def HasFPU : Predicate<"Subtarget.hasFPU()">; -// def HasESR : Predicate<"Subtarget.hasESR()">; -// def HasPVR : Predicate<"Subtarget.hasPVR()">; -def HasMul64 : Predicate<"Subtarget.hasMul64()">; -def HasSqrt : Predicate<"Subtarget.hasSqrt()">; -// def HasMMU : Predicate<"Subtarget.hasMMU()">; - -//===----------------------------------------------------------------------===// -// MBlaze Operand, Complex Patterns and Transformations Definitions. -//===----------------------------------------------------------------------===// - -def MBlazeMemAsmOperand : AsmOperandClass { - let Name = "Mem"; - let SuperClasses = []; -} - -def MBlazeFslAsmOperand : AsmOperandClass { - let Name = "Fsl"; - let SuperClasses = []; -} - -// Instruction operand types -def brtarget : Operand<OtherVT>; -def calltarget : Operand<i32>; -def simm16 : Operand<i32>; -def uimm5 : Operand<i32>; -def uimm15 : Operand<i32>; -def fimm : Operand<f32>; - -// Unsigned Operand -def uimm16 : Operand<i32> { - let PrintMethod = "printUnsignedImm"; -} - -// FSL Operand -def fslimm : Operand<i32> { - let PrintMethod = "printFSLImm"; - let ParserMatchClass = MBlazeFslAsmOperand; -} - -// Address operand -def memri : Operand<i32> { - let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops GPR, simm16); - let ParserMatchClass = MBlazeMemAsmOperand; -} - -def memrr : Operand<i32> { - let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops GPR, GPR); - let ParserMatchClass = MBlazeMemAsmOperand; -} - -// Node immediate fits as 16-bit sign extended on target immediate. -def immSExt16 : PatLeaf<(imm), [{ - return (N->getZExtValue() >> 16) == 0; -}]>; - -// Node immediate fits as 16-bit zero extended on target immediate. -// The LO16 param means that only the lower 16 bits of the node -// immediate are caught. -// e.g. addiu, sltiu -def immZExt16 : PatLeaf<(imm), [{ - return (N->getZExtValue() >> 16) == 0; -}]>; - -// FSL immediate field must fit in 4 bits. -def immZExt4 : PatLeaf<(imm), [{ - return N->getZExtValue() == ((N->getZExtValue()) & 0xf) ; -}]>; - -// shamt field must fit in 5 bits. -def immZExt5 : PatLeaf<(imm), [{ - return N->getZExtValue() == ((N->getZExtValue()) & 0x1f) ; -}]>; - -// MBlaze Address Mode. SDNode frameindex could possibily be a match -// since load and store instructions from stack used it. -def iaddr : ComplexPattern<i32, 2, "SelectAddrRegImm", [frameindex], []>; -def xaddr : ComplexPattern<i32, 2, "SelectAddrRegReg", [], []>; - -//===----------------------------------------------------------------------===// -// Pseudo instructions -//===----------------------------------------------------------------------===// - -// As stack alignment is always done with addiu, we need a 16-bit immediate -let Defs = [R1], Uses = [R1] in { -def ADJCALLSTACKDOWN : MBlazePseudo<(outs), (ins simm16:$amt), - "#ADJCALLSTACKDOWN $amt", - [(callseq_start timm:$amt)]>; -def ADJCALLSTACKUP : MBlazePseudo<(outs), - (ins uimm16:$amt1, simm16:$amt2), - "#ADJCALLSTACKUP $amt1", - [(callseq_end timm:$amt1, timm:$amt2)]>; -} - -//===----------------------------------------------------------------------===// -// Instructions specific format -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Arithmetic Instructions -//===----------------------------------------------------------------------===// -class Arith<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>; - -class ArithI<bits<6> op, string instr_asm, SDNode OpNode, - Operand Od, PatLeaf imm_type> : - TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIC_ALU>; - -class ArithI32<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> : - TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -class ShiftI<bits<6> op, bits<2> flags, string instr_asm, SDNode OpNode, - Operand Od, PatLeaf imm_type> : - SHT<op, flags, (outs GPR:$dst), (ins GPR:$b, Od:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIC_SHT>; - -class ArithR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode, - InstrItinClass itin> : - TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $c, $b"), - [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>; - -class ArithRI<bits<6> op, string instr_asm, SDNode OpNode, - Operand Od, PatLeaf imm_type> : - TBR<op, (outs GPR:$dst), (ins Od:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $c, $b"), - [(set GPR:$dst, (OpNode imm_type:$b, GPR:$c))], IIC_ALU>; - -class ArithN<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], itin>; - -class ArithNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> : - TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -class ArithRN<bits<6> op, bits<11> flags, string instr_asm, - InstrItinClass itin> : - TAR<op, flags, (outs GPR:$dst), (ins GPR:$c, GPR:$b), - !strconcat(instr_asm, " $dst, $b, $c"), - [], itin>; - -class ArithRNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> : - TBR<op, (outs GPR:$dst), (ins Od:$c, GPR:$b), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -//===----------------------------------------------------------------------===// -// Misc Arithmetic Instructions -//===----------------------------------------------------------------------===// - -class Logic<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], IIC_ALU>; - -class LogicI<bits<6> op, string instr_asm, SDNode OpNode> : - TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [(set GPR:$dst, (OpNode GPR:$b, immZExt16:$c))], - IIC_ALU>; - -class LogicI32<bits<6> op, string instr_asm> : - TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -class PatCmp<bits<6> op, bits<11> flags, string instr_asm> : - TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c), - !strconcat(instr_asm, " $dst, $b, $c"), - [], IIC_ALU>; - -//===----------------------------------------------------------------------===// -// Memory Access Instructions -//===----------------------------------------------------------------------===// - -let mayLoad = 1 in { -class LoadM<bits<6> op, bits<11> flags, string instr_asm> : - TA<op, flags, (outs GPR:$dst), (ins memrr:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [], IIC_MEMl>; -} - -class LoadMI<bits<6> op, string instr_asm, PatFrag OpNode> : - TB<op, (outs GPR:$dst), (ins memri:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(set (i32 GPR:$dst), (OpNode iaddr:$addr))], IIC_MEMl>; - -let mayStore = 1 in { -class StoreM<bits<6> op, bits<11> flags, string instr_asm> : - TA<op, flags, (outs), (ins GPR:$dst, memrr:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [], IIC_MEMs>; -} - -class StoreMI<bits<6> op, string instr_asm, PatFrag OpNode> : - TB<op, (outs), (ins GPR:$dst, memri:$addr), - !strconcat(instr_asm, " $dst, $addr"), - [(OpNode (i32 GPR:$dst), iaddr:$addr)], IIC_MEMs>; - -//===----------------------------------------------------------------------===// -// Branch Instructions -//===----------------------------------------------------------------------===// -class Branch<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> : - TA<op, flags, (outs), (ins GPR:$target), - !strconcat(instr_asm, " $target"), - [], IIC_BR> { - let rd = 0x0; - let ra = br; - let Form = FCCR; -} - -class BranchI<bits<6> op, bits<5> br, string instr_asm> : - TB<op, (outs), (ins brtarget:$target), - !strconcat(instr_asm, " $target"), - [], IIC_BR> { - let rd = 0; - let ra = br; - let Form = FCCI; -} - -//===----------------------------------------------------------------------===// -// Branch and Link Instructions -//===----------------------------------------------------------------------===// -class BranchL<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> : - TA<op, flags, (outs), (ins GPR:$link, GPR:$target), - !strconcat(instr_asm, " $link, $target"), - [], IIC_BRl> { - let ra = br; - let Form = FRCR; -} - -class BranchLI<bits<6> op, bits<5> br, string instr_asm> : - TB<op, (outs), (ins GPR:$link, calltarget:$target), - !strconcat(instr_asm, " $link, $target"), - [], IIC_BRl> { - let ra = br; - let Form = FRCI; -} - -//===----------------------------------------------------------------------===// -// Conditional Branch Instructions -//===----------------------------------------------------------------------===// -class BranchC<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> : - TA<op, flags, (outs), - (ins GPR:$a, GPR:$b), - !strconcat(instr_asm, " $a, $b"), - [], IIC_BRc> { - let rd = br; - let Form = FCRR; -} - -class BranchCI<bits<6> op, bits<5> br, string instr_asm> : - TB<op, (outs), (ins GPR:$a, brtarget:$offset), - !strconcat(instr_asm, " $a, $offset"), - [], IIC_BRc> { - let rd = br; - let Form = FCRI; -} - -//===----------------------------------------------------------------------===// -// MBlaze arithmetic instructions -//===----------------------------------------------------------------------===// - -let isCommutable = 1, isAsCheapAsAMove = 1 in { - def ADDK : Arith<0x04, 0x000, "addk ", add, IIC_ALU>; - def AND : Logic<0x21, 0x000, "and ", and>; - def OR : Logic<0x20, 0x000, "or ", or>; - def XOR : Logic<0x22, 0x000, "xor ", xor>; - - let Predicates=[HasPatCmp] in { - def PCMPBF : PatCmp<0x20, 0x400, "pcmpbf ">; - def PCMPEQ : PatCmp<0x22, 0x400, "pcmpeq ">; - def PCMPNE : PatCmp<0x23, 0x400, "pcmpne ">; - } - - let Defs = [CARRY] in { - def ADD : Arith<0x00, 0x000, "add ", addc, IIC_ALU>; - - let Uses = [CARRY] in { - def ADDC : Arith<0x02, 0x000, "addc ", adde, IIC_ALU>; - } - } - - let Uses = [CARRY] in { - def ADDKC : ArithN<0x06, 0x000, "addkc ", IIC_ALU>; - } -} - -let isAsCheapAsAMove = 1 in { - def ANDN : ArithN<0x23, 0x000, "andn ", IIC_ALU>; - def CMP : ArithN<0x05, 0x001, "cmp ", IIC_ALU>; - def CMPU : ArithN<0x05, 0x003, "cmpu ", IIC_ALU>; - def RSUBK : ArithR<0x05, 0x000, "rsubk ", sub, IIC_ALU>; - - let Defs = [CARRY] in { - def RSUB : ArithR<0x01, 0x000, "rsub ", subc, IIC_ALU>; - - let Uses = [CARRY] in { - def RSUBC : ArithR<0x03, 0x000, "rsubc ", sube, IIC_ALU>; - } - } - - let Uses = [CARRY] in { - def RSUBKC : ArithRN<0x07, 0x000, "rsubkc ", IIC_ALU>; - } -} - -let isCommutable = 1, Predicates=[HasMul] in { - def MUL : Arith<0x10, 0x000, "mul ", mul, IIC_ALUm>; -} - -let isCommutable = 1, Predicates=[HasMul,HasMul64] in { - def MULH : Arith<0x10, 0x001, "mulh ", mulhs, IIC_ALUm>; - def MULHU : Arith<0x10, 0x003, "mulhu ", mulhu, IIC_ALUm>; -} - -let Predicates=[HasMul,HasMul64] in { - def MULHSU : ArithN<0x10, 0x002, "mulhsu ", IIC_ALUm>; -} - -let Predicates=[HasBarrel] in { - def BSRL : Arith<0x11, 0x000, "bsrl ", srl, IIC_SHT>; - def BSRA : Arith<0x11, 0x200, "bsra ", sra, IIC_SHT>; - def BSLL : Arith<0x11, 0x400, "bsll ", shl, IIC_SHT>; - def BSRLI : ShiftI<0x19, 0x0, "bsrli ", srl, uimm5, immZExt5>; - def BSRAI : ShiftI<0x19, 0x1, "bsrai ", sra, uimm5, immZExt5>; - def BSLLI : ShiftI<0x19, 0x2, "bslli ", shl, uimm5, immZExt5>; -} - -let Predicates=[HasDiv] in { - def IDIV : ArithR<0x12, 0x000, "idiv ", sdiv, IIC_ALUd>; - def IDIVU : ArithR<0x12, 0x002, "idivu ", udiv, IIC_ALUd>; -} - -//===----------------------------------------------------------------------===// -// MBlaze immediate mode arithmetic instructions -//===----------------------------------------------------------------------===// - -let isAsCheapAsAMove = 1 in { - def ADDIK : ArithI<0x0C, "addik ", add, simm16, immSExt16>; - def RSUBIK : ArithRI<0x0D, "rsubik ", sub, simm16, immSExt16>; - def ANDNI : ArithNI<0x2B, "andni ", uimm16, immZExt16>; - def ANDI : LogicI<0x29, "andi ", and>; - def ORI : LogicI<0x28, "ori ", or>; - def XORI : LogicI<0x2A, "xori ", xor>; - - let Defs = [CARRY] in { - def ADDI : ArithI<0x08, "addi ", addc, simm16, immSExt16>; - def RSUBI : ArithRI<0x09, "rsubi ", subc, simm16, immSExt16>; - - let Uses = [CARRY] in { - def ADDIC : ArithI<0x0A, "addic ", adde, simm16, immSExt16>; - def RSUBIC : ArithRI<0x0B, "rsubic ", sube, simm16, immSExt16>; - } - } - - let Uses = [CARRY] in { - def ADDIKC : ArithNI<0x0E, "addikc ", simm16, immSExt16>; - def RSUBIKC : ArithRNI<0x0F, "rsubikc", simm16, immSExt16>; - } -} - -let Predicates=[HasMul] in { - def MULI : ArithI<0x18, "muli ", mul, simm16, immSExt16>; -} - -//===----------------------------------------------------------------------===// -// MBlaze memory access instructions -//===----------------------------------------------------------------------===// - -let canFoldAsLoad = 1, isReMaterializable = 1 in { - let neverHasSideEffects = 1 in { - def LBU : LoadM<0x30, 0x000, "lbu ">; - def LBUR : LoadM<0x30, 0x200, "lbur ">; - - def LHU : LoadM<0x31, 0x000, "lhu ">; - def LHUR : LoadM<0x31, 0x200, "lhur ">; - - def LW : LoadM<0x32, 0x000, "lw ">; - def LWR : LoadM<0x32, 0x200, "lwr ">; - - let Defs = [CARRY] in { - def LWX : LoadM<0x32, 0x400, "lwx ">; - } - } - - def LBUI : LoadMI<0x38, "lbui ", zextloadi8>; - def LHUI : LoadMI<0x39, "lhui ", zextloadi16>; - def LWI : LoadMI<0x3A, "lwi ", load>; -} - -def SB : StoreM<0x34, 0x000, "sb ">; -def SBR : StoreM<0x34, 0x200, "sbr ">; - -def SH : StoreM<0x35, 0x000, "sh ">; -def SHR : StoreM<0x35, 0x200, "shr ">; - -def SW : StoreM<0x36, 0x000, "sw ">; -def SWR : StoreM<0x36, 0x200, "swr ">; - -let Defs = [CARRY] in { - def SWX : StoreM<0x36, 0x400, "swx ">; -} - -def SBI : StoreMI<0x3C, "sbi ", truncstorei8>; -def SHI : StoreMI<0x3D, "shi ", truncstorei16>; -def SWI : StoreMI<0x3E, "swi ", store>; - -//===----------------------------------------------------------------------===// -// MBlaze branch instructions -//===----------------------------------------------------------------------===// - -let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { - def BRI : BranchI<0x2E, 0x00, "bri ">; - def BRAI : BranchI<0x2E, 0x08, "brai ">; -} - -let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in { - def BEQI : BranchCI<0x2F, 0x00, "beqi ">; - def BNEI : BranchCI<0x2F, 0x01, "bnei ">; - def BLTI : BranchCI<0x2F, 0x02, "blti ">; - def BLEI : BranchCI<0x2F, 0x03, "blei ">; - def BGTI : BranchCI<0x2F, 0x04, "bgti ">; - def BGEI : BranchCI<0x2F, 0x05, "bgei ">; -} - -let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1, - isBarrier = 1 in { - def BR : Branch<0x26, 0x00, 0x000, "br ">; - def BRA : Branch<0x26, 0x08, 0x000, "bra ">; -} - -let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1 in { - def BEQ : BranchC<0x27, 0x00, 0x000, "beq ">; - def BNE : BranchC<0x27, 0x01, 0x000, "bne ">; - def BLT : BranchC<0x27, 0x02, 0x000, "blt ">; - def BLE : BranchC<0x27, 0x03, 0x000, "ble ">; - def BGT : BranchC<0x27, 0x04, 0x000, "bgt ">; - def BGE : BranchC<0x27, 0x05, 0x000, "bge ">; -} - -let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1, - isBarrier = 1 in { - def BRID : BranchI<0x2E, 0x10, "brid ">; - def BRAID : BranchI<0x2E, 0x18, "braid ">; -} - -let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1 in { - def BEQID : BranchCI<0x2F, 0x10, "beqid ">; - def BNEID : BranchCI<0x2F, 0x11, "bneid ">; - def BLTID : BranchCI<0x2F, 0x12, "bltid ">; - def BLEID : BranchCI<0x2F, 0x13, "bleid ">; - def BGTID : BranchCI<0x2F, 0x14, "bgtid ">; - def BGEID : BranchCI<0x2F, 0x15, "bgeid ">; -} - -let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, - hasDelaySlot = 1, hasCtrlDep = 1, isBarrier = 1 in { - def BRD : Branch<0x26, 0x10, 0x000, "brd ">; - def BRAD : Branch<0x26, 0x18, 0x000, "brad ">; -} - -let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, - hasDelaySlot = 1, hasCtrlDep = 1 in { - def BEQD : BranchC<0x27, 0x10, 0x000, "beqd ">; - def BNED : BranchC<0x27, 0x11, 0x000, "bned ">; - def BLTD : BranchC<0x27, 0x12, 0x000, "bltd ">; - def BLED : BranchC<0x27, 0x13, 0x000, "bled ">; - def BGTD : BranchC<0x27, 0x14, 0x000, "bgtd ">; - def BGED : BranchC<0x27, 0x15, 0x000, "bged ">; -} - -let isCall =1, hasDelaySlot = 1, - Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY], - Uses = [R1] in { - def BRLID : BranchLI<0x2E, 0x14, "brlid ">; - def BRALID : BranchLI<0x2E, 0x1C, "bralid ">; -} - -let isCall = 1, hasDelaySlot = 1, - Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY], - Uses = [R1] in { - def BRLD : BranchL<0x26, 0x14, 0x000, "brld ">; - def BRALD : BranchL<0x26, 0x1C, 0x000, "brald ">; -} - -let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, - rd=0x10, Form=FCRI in { - def RTSD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm), - "rtsd $target, $imm", - [], - IIC_BR>; -} - -let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, - rd=0x11, Form=FCRI in { - def RTID : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm), - "rtid $target, $imm", - [], - IIC_BR>; -} - -let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, - rd=0x12, Form=FCRI in { - def RTBD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm), - "rtbd $target, $imm", - [], - IIC_BR>; -} - -let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, - rd=0x14, Form=FCRI in { - def RTED : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm), - "rted $target, $imm", - [], - IIC_BR>; -} - -//===----------------------------------------------------------------------===// -// MBlaze misc instructions -//===----------------------------------------------------------------------===// - -let neverHasSideEffects = 1 in { - def NOP : MBlazeInst<0x20, FC, (outs), (ins), "nop ", [], IIC_ALU>; -} - -let Predicates=[HasPatCmp] in { - def CLZ : TCLZ<0x24, 0x00E0, (outs GPR:$dst), (ins GPR:$src), - "clz $dst, $src", [], IIC_ALU>; -} - -def IMEMBAR : MBAR<0x2E, 0x0420004, (outs), (ins), "mbar 2", [], IIC_ALU>; -def DMEMBAR : MBAR<0x2E, 0x0220004, (outs), (ins), "mbar 1", [], IIC_ALU>; -def IDMEMBAR : MBAR<0x2E, 0x0020004, (outs), (ins), "mbar 0", [], IIC_ALU>; - -let usesCustomInserter = 1 in { - def Select_CC : MBlazePseudo<(outs GPR:$dst), - (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC), // F T reversed - "; SELECT_CC PSEUDO!", - []>; - - def ShiftL : MBlazePseudo<(outs GPR:$dst), - (ins GPR:$L, GPR:$R), - "; ShiftL PSEUDO!", - []>; - - def ShiftRA : MBlazePseudo<(outs GPR:$dst), - (ins GPR:$L, GPR:$R), - "; ShiftRA PSEUDO!", - []>; - - def ShiftRL : MBlazePseudo<(outs GPR:$dst), - (ins GPR:$L, GPR:$R), - "; ShiftRL PSEUDO!", - []>; -} - -let rb = 0 in { - def SEXT16 : TA<0x24, 0x061, (outs GPR:$dst), (ins GPR:$src), - "sext16 $dst, $src", [], IIC_ALU>; - def SEXT8 : TA<0x24, 0x060, (outs GPR:$dst), (ins GPR:$src), - "sext8 $dst, $src", [], IIC_ALU>; - let Defs = [CARRY] in { - def SRL : TA<0x24, 0x041, (outs GPR:$dst), (ins GPR:$src), - "srl $dst, $src", [], IIC_ALU>; - def SRA : TA<0x24, 0x001, (outs GPR:$dst), (ins GPR:$src), - "sra $dst, $src", [], IIC_ALU>; - let Uses = [CARRY] in { - def SRC : TA<0x24, 0x021, (outs GPR:$dst), (ins GPR:$src), - "src $dst, $src", [], IIC_ALU>; - } - } -} - -let isCodeGenOnly=1 in { - def ADDIK32 : ArithI32<0x08, "addik ", simm16, immSExt16>; - def ORI32 : LogicI32<0x28, "ori ">; - def BRLID32 : BranchLI<0x2E, 0x14, "brlid ">; -} - -//===----------------------------------------------------------------------===// -// Misc. instructions -//===----------------------------------------------------------------------===// -let Form=FRCS in { - def MFS : SPC<0x25, 0x2, (outs GPR:$dst), (ins SPR:$src), - "mfs $dst, $src", [], IIC_ALU>; -} - -let Form=FCRCS in { - def MTS : SPC<0x25, 0x3, (outs SPR:$dst), (ins GPR:$src), - "mts $dst, $src", [], IIC_ALU>; -} - -def MSRSET : MSR<0x25, 0x20, (outs GPR:$dst), (ins uimm15:$set), - "msrset $dst, $set", [], IIC_ALU>; - -def MSRCLR : MSR<0x25, 0x22, (outs GPR:$dst), (ins uimm15:$clr), - "msrclr $dst, $clr", [], IIC_ALU>; - -let rd=0x0, Form=FCRR in { - def WDC : TA<0x24, 0x64, (outs), (ins GPR:$a, GPR:$b), - "wdc $a, $b", [], IIC_WDC>; - def WDCF : TA<0x24, 0x74, (outs), (ins GPR:$a, GPR:$b), - "wdc.flush $a, $b", [], IIC_WDC>; - def WDCC : TA<0x24, 0x66, (outs), (ins GPR:$a, GPR:$b), - "wdc.clear $a, $b", [], IIC_WDC>; - def WIC : TA<0x24, 0x68, (outs), (ins GPR:$a, GPR:$b), - "wic $a, $b", [], IIC_WDC>; -} - -def BRK : BranchL<0x26, 0x0C, 0x000, "brk ">; -def BRKI : BranchLI<0x2E, 0x0C, "brki ">; - -def IMM : MBlazeInst<0x2C, FCCI, (outs), (ins simm16:$imm), - "imm $imm", [], IIC_ALU>; - -//===----------------------------------------------------------------------===// -// Pseudo instructions for atomic operations -//===----------------------------------------------------------------------===// -let usesCustomInserter=1 in { - def CAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$cmp, GPR:$swp), - "# atomic compare and swap", - [(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$cmp, GPR:$swp))]>; - - def SWP32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$swp), - "# atomic swap", - [(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$swp))]>; - - def LAA32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and add", - [(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$val))]>; - - def LAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and sub", - [(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$val))]>; - - def LAD32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and and", - [(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$val))]>; - - def LAO32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and or", - [(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$val))]>; - - def LAX32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and xor", - [(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$val))]>; - - def LAN32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), - "# atomic load and nand", - [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$val))]>; - - def MEMBARRIER : MBlazePseudo<(outs), (ins), - "# memory barrier", []>; -} - -//===----------------------------------------------------------------------===// -// Arbitrary patterns that map to one or more instructions -//===----------------------------------------------------------------------===// - -// Small immediates -def : Pat<(i32 0), (ADDK (i32 R0), (i32 R0))>; -def : Pat<(i32 immSExt16:$imm), (ADDIK (i32 R0), imm:$imm)>; -def : Pat<(i32 immZExt16:$imm), (ORI (i32 R0), imm:$imm)>; - -// Arbitrary immediates -def : Pat<(i32 imm:$imm), (ADDIK (i32 R0), imm:$imm)>; - -// In register sign extension -def : Pat<(sext_inreg GPR:$src, i16), (SEXT16 GPR:$src)>; -def : Pat<(sext_inreg GPR:$src, i8), (SEXT8 GPR:$src)>; - -// Call -def : Pat<(MBlazeJmpLink (i32 tglobaladdr:$dst)), - (BRLID (i32 R15), tglobaladdr:$dst)>; - -def : Pat<(MBlazeJmpLink (i32 texternalsym:$dst)), - (BRLID (i32 R15), texternalsym:$dst)>; - -def : Pat<(MBlazeJmpLink GPR:$dst), - (BRALD (i32 R15), GPR:$dst)>; - -// Shift Instructions -def : Pat<(shl GPR:$L, GPR:$R), (ShiftL GPR:$L, GPR:$R)>; -def : Pat<(sra GPR:$L, GPR:$R), (ShiftRA GPR:$L, GPR:$R)>; -def : Pat<(srl GPR:$L, GPR:$R), (ShiftRL GPR:$L, GPR:$R)>; - -// SET_CC operations -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 1)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETNE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 2)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 3)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 4)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 5)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 6)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU (i32 R0), GPR:$L), 3)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU (i32 R0), GPR:$L), 4)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU (i32 R0), GPR:$L), 5)>; -def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU (i32 R0), GPR:$L), 6)>; - -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 1)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETNE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 2)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 3)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 4)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 5)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 6)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, (i32 R0)), 3)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, (i32 R0)), 4)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, (i32 R0)), 5)>; -def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, (i32 R0)), 6)>; - -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 1)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETNE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 2)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 3)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 4)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 5)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMP GPR:$R, GPR:$L), 6)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, GPR:$L), 3)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULT), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, GPR:$L), 4)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, GPR:$L), 5)>; -def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULE), - (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), - (CMPU GPR:$R, GPR:$L), 6)>; - -// SELECT operations -def : Pat<(select (i32 GPR:$C), (i32 GPR:$T), (i32 GPR:$F)), - (Select_CC GPR:$T, GPR:$F, GPR:$C, 2)>; - -// SELECT_CC -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETEQ), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 1)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETNE), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 2)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETGT), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 3)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETLT), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 4)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETGE), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 5)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETLE), - (Select_CC GPR:$T, GPR:$F, GPR:$L, 6)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETUGT), - (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 3)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETULT), - (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 4)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETUGE), - (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 5)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 0), - (i32 GPR:$T), (i32 GPR:$F), SETULE), - (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 6)>; - -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETEQ), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 1)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETNE), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 2)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETGT), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 3)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETLT), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 4)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETGE), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 5)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETLE), - (Select_CC GPR:$T, GPR:$F, GPR:$R, 6)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETUGT), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 3)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETULT), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 4)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETUGE), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 5)>; -def : Pat<(selectcc (i32 0), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETULE), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 6)>; - -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETEQ), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 1)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETNE), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 2)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETGT), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 3)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETLT), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 4)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETGE), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 5)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETLE), - (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 6)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETUGT), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 3)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETULT), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 4)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETUGE), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 5)>; -def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R), - (i32 GPR:$T), (i32 GPR:$F), SETULE), - (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 6)>; - -// Ret instructions -def : Pat<(MBlazeRet GPR:$target), (RTSD GPR:$target, 0x8)>; -def : Pat<(MBlazeIRet GPR:$target), (RTID GPR:$target, 0x0)>; - -// BR instructions -def : Pat<(br bb:$T), (BRID bb:$T)>; -def : Pat<(brind GPR:$T), (BRAD GPR:$T)>; - -// BRCOND instructions -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETEQ), bb:$T), - (BEQID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETNE), bb:$T), - (BNEID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGT), bb:$T), - (BGTID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLT), bb:$T), - (BLTID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGE), bb:$T), - (BGEID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLE), bb:$T), - (BLEID GPR:$L, bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGT), bb:$T), - (BGTID (CMPU (i32 R0), GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULT), bb:$T), - (BLTID (CMPU (i32 R0), GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGE), bb:$T), - (BGEID (CMPU (i32 R0), GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULE), bb:$T), - (BLEID (CMPU (i32 R0), GPR:$L), bb:$T)>; - -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETEQ), bb:$T), - (BEQID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETNE), bb:$T), - (BNEID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGT), bb:$T), - (BGTID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLT), bb:$T), - (BLTID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGE), bb:$T), - (BGEID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLE), bb:$T), - (BLEID GPR:$R, bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGT), bb:$T), - (BGTID (CMPU GPR:$R, (i32 R0)), bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULT), bb:$T), - (BLTID (CMPU GPR:$R, (i32 R0)), bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGE), bb:$T), - (BGEID (CMPU GPR:$R, (i32 R0)), bb:$T)>; -def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULE), bb:$T), - (BLEID (CMPU GPR:$R, (i32 R0)), bb:$T)>; - -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ), bb:$T), - (BEQID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETNE), bb:$T), - (BNEID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGT), bb:$T), - (BGTID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLT), bb:$T), - (BLTID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGE), bb:$T), - (BGEID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLE), bb:$T), - (BLEID (CMP GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT), bb:$T), - (BGTID (CMPU GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULT), bb:$T), - (BLTID (CMPU GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE), bb:$T), - (BGEID (CMPU GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULE), bb:$T), - (BLEID (CMPU GPR:$R, GPR:$L), bb:$T)>; -def : Pat<(brcond (i32 GPR:$C), bb:$T), - (BNEID GPR:$C, bb:$T)>; - -// Jump tables, global addresses, and constant pools -def : Pat<(MBWrapper tglobaladdr:$in), (ORI (i32 R0), tglobaladdr:$in)>; -def : Pat<(MBWrapper tjumptable:$in), (ORI (i32 R0), tjumptable:$in)>; -def : Pat<(MBWrapper tconstpool:$in), (ORI (i32 R0), tconstpool:$in)>; - -// Misc instructions -def : Pat<(and (i32 GPR:$lh), (not (i32 GPR:$rh))),(ANDN GPR:$lh, GPR:$rh)>; - -// Convert any extend loads into zero extend loads -def : Pat<(extloadi8 iaddr:$src), (i32 (LBUI iaddr:$src))>; -def : Pat<(extloadi16 iaddr:$src), (i32 (LHUI iaddr:$src))>; -def : Pat<(extloadi8 xaddr:$src), (i32 (LBU xaddr:$src))>; -def : Pat<(extloadi16 xaddr:$src), (i32 (LHU xaddr:$src))>; - -// 32-bit load and store -def : Pat<(store (i32 GPR:$dst), xaddr:$addr), (SW GPR:$dst, xaddr:$addr)>; -def : Pat<(load xaddr:$addr), (i32 (LW xaddr:$addr))>; - -// 16-bit load and store -def : Pat<(truncstorei16 (i32 GPR:$dst), xaddr:$ad), (SH GPR:$dst, xaddr:$ad)>; -def : Pat<(zextloadi16 xaddr:$addr), (i32 (LHU xaddr:$addr))>; - -// 8-bit load and store -def : Pat<(truncstorei8 (i32 GPR:$dst), xaddr:$ad), (SB GPR:$dst, xaddr:$ad)>; -def : Pat<(zextloadi8 xaddr:$addr), (i32 (LBU xaddr:$addr))>; - -// Peepholes -def : Pat<(store (i32 0), iaddr:$dst), (SWI (i32 R0), iaddr:$dst)>; - -// Atomic fence -def : Pat<(atomic_fence (imm), (imm)), (MEMBARRIER)>; - -//===----------------------------------------------------------------------===// -// Floating Point Support -//===----------------------------------------------------------------------===// -include "MBlazeInstrFSL.td" -include "MBlazeInstrFPU.td" diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp deleted file mode 100644 index 0d3f7d8..0000000 --- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp +++ /dev/null @@ -1,111 +0,0 @@ -//===-- MBlazeIntrinsicInfo.cpp - Intrinsic Information -------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of TargetIntrinsicInfo. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeIntrinsicInfo.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/Intrinsics.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/Type.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include <cstring> - -using namespace llvm; - -namespace mblazeIntrinsic { - - enum ID { - last_non_mblaze_intrinsic = Intrinsic::num_intrinsics-1, -#define GET_INTRINSIC_ENUM_VALUES -#include "MBlazeGenIntrinsics.inc" -#undef GET_INTRINSIC_ENUM_VALUES - , num_mblaze_intrinsics - }; - -#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN -#include "MBlazeGenIntrinsics.inc" -#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN -} - -std::string MBlazeIntrinsicInfo::getName(unsigned IntrID, Type **Tys, - unsigned numTys) const { - static const char *const names[] = { -#define GET_INTRINSIC_NAME_TABLE -#include "MBlazeGenIntrinsics.inc" -#undef GET_INTRINSIC_NAME_TABLE - }; - - assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded"); - if (IntrID < Intrinsic::num_intrinsics) - return 0; - assert(IntrID < mblazeIntrinsic::num_mblaze_intrinsics && - "Invalid intrinsic ID"); - - std::string Result(names[IntrID - Intrinsic::num_intrinsics]); - return Result; -} - -unsigned MBlazeIntrinsicInfo:: -lookupName(const char *Name, unsigned Len) const { - if (!StringRef(Name, Len).startswith("llvm.")) - return 0; // All intrinsics start with 'llvm.' - -#define GET_FUNCTION_RECOGNIZER -#include "MBlazeGenIntrinsics.inc" -#undef GET_FUNCTION_RECOGNIZER - return 0; -} - -unsigned MBlazeIntrinsicInfo:: -lookupGCCName(const char *Name) const { - return mblazeIntrinsic::getIntrinsicForGCCBuiltin("mblaze",Name); -} - -bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const { - if (IntrID == 0) - return false; - - unsigned id = IntrID - Intrinsic::num_intrinsics + 1; -#define GET_INTRINSIC_OVERLOAD_TABLE -#include "MBlazeGenIntrinsics.inc" -#undef GET_INTRINSIC_OVERLOAD_TABLE -} - -/// This defines the "getAttributes(LLVMContext &C, ID id)" method. -#define GET_INTRINSIC_ATTRIBUTES -#include "MBlazeGenIntrinsics.inc" -#undef GET_INTRINSIC_ATTRIBUTES - -static FunctionType *getType(LLVMContext &Context, unsigned id) { - Type *ResultTy = NULL; - SmallVector<Type*, 8> ArgTys; - bool IsVarArg = false; - -#define GET_INTRINSIC_GENERATOR -#include "MBlazeGenIntrinsics.inc" -#undef GET_INTRINSIC_GENERATOR - - return FunctionType::get(ResultTy, ArgTys, IsVarArg); -} - -Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID, - Type **Tys, - unsigned numTy) const { - assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded"); - AttributeSet AList = getAttributes(M->getContext(), - (mblazeIntrinsic::ID) IntrID); - return cast<Function>(M->getOrInsertFunction(getName(IntrID), - getType(M->getContext(), IntrID), - AList)); -} diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h b/lib/Target/MBlaze/MBlazeIntrinsicInfo.h deleted file mode 100644 index 34f3792..0000000 --- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.h +++ /dev/null @@ -1,33 +0,0 @@ -//===-- MBlazeIntrinsicInfo.h - MBlaze Intrinsic Information ----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of TargetIntrinsicInfo. -// -//===----------------------------------------------------------------------===// -#ifndef MBLAZEINTRINSICS_H -#define MBLAZEINTRINSICS_H - -#include "llvm/Target/TargetIntrinsicInfo.h" - -namespace llvm { - - class MBlazeIntrinsicInfo : public TargetIntrinsicInfo { - public: - std::string getName(unsigned IntrID, Type **Tys = 0, - unsigned numTys = 0) const; - unsigned lookupName(const char *Name, unsigned Len) const; - unsigned lookupGCCName(const char *Name) const; - bool isOverloaded(unsigned IID) const; - Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0, - unsigned numTys = 0) const; - }; - -} - -#endif diff --git a/lib/Target/MBlaze/MBlazeIntrinsics.td b/lib/Target/MBlaze/MBlazeIntrinsics.td deleted file mode 100644 index b5dc595..0000000 --- a/lib/Target/MBlaze/MBlazeIntrinsics.td +++ /dev/null @@ -1,131 +0,0 @@ -//===-- IntrinsicsMBlaze.td - Defines MBlaze intrinsics ----*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines all of the MicroBlaze-specific intrinsics. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Definitions for all MBlaze intrinsics. -// - -// MBlaze intrinsic classes. -let TargetPrefix = "mblaze", isTarget = 1 in { - class MBFSL_Get_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>; - - class MBFSL_Put_Intrinsic : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; - - class MBFSL_PutT_Intrinsic : Intrinsic<[], [llvm_i32_ty], []>; -} - -//===----------------------------------------------------------------------===// -// MicroBlaze FSL Get Intrinsic Definitions. -// - -def int_mblaze_fsl_get : GCCBuiltin<"__builtin_mblaze_fsl_get">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_aget : GCCBuiltin<"__builtin_mblaze_fsl_aget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_cget : GCCBuiltin<"__builtin_mblaze_fsl_cget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_caget : GCCBuiltin<"__builtin_mblaze_fsl_caget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_eget : GCCBuiltin<"__builtin_mblaze_fsl_eget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_eaget : GCCBuiltin<"__builtin_mblaze_fsl_eaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_ecget : GCCBuiltin<"__builtin_mblaze_fsl_ecget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_ecaget : GCCBuiltin<"__builtin_mblaze_fsl_ecaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_nget : GCCBuiltin<"__builtin_mblaze_fsl_nget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_naget : GCCBuiltin<"__builtin_mblaze_fsl_naget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_ncget : GCCBuiltin<"__builtin_mblaze_fsl_ncget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_ncaget : GCCBuiltin<"__builtin_mblaze_fsl_ncaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_neget : GCCBuiltin<"__builtin_mblaze_fsl_neget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_neaget : GCCBuiltin<"__builtin_mblaze_fsl_neaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_necget : GCCBuiltin<"__builtin_mblaze_fsl_necget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_necaget : GCCBuiltin<"__builtin_mblaze_fsl_necaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tget : GCCBuiltin<"__builtin_mblaze_fsl_tget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_taget : GCCBuiltin<"__builtin_mblaze_fsl_taget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tcget : GCCBuiltin<"__builtin_mblaze_fsl_tcget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tcaget : GCCBuiltin<"__builtin_mblaze_fsl_tcaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_teget : GCCBuiltin<"__builtin_mblaze_fsl_teget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_teaget : GCCBuiltin<"__builtin_mblaze_fsl_teaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tecget : GCCBuiltin<"__builtin_mblaze_fsl_tecget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tecaget : GCCBuiltin<"__builtin_mblaze_fsl_tecaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tnget : GCCBuiltin<"__builtin_mblaze_fsl_tnget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tnaget : GCCBuiltin<"__builtin_mblaze_fsl_tnaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tncget : GCCBuiltin<"__builtin_mblaze_fsl_tncget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tncaget : GCCBuiltin<"__builtin_mblaze_fsl_tncaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tneget : GCCBuiltin<"__builtin_mblaze_fsl_tneget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tneaget : GCCBuiltin<"__builtin_mblaze_fsl_tneaget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tnecget : GCCBuiltin<"__builtin_mblaze_fsl_tnecget">, - MBFSL_Get_Intrinsic; -def int_mblaze_fsl_tnecaget : GCCBuiltin<"__builtin_mblaze_fsl_tnecaget">, - MBFSL_Get_Intrinsic; - -//===----------------------------------------------------------------------===// -// MicroBlaze FSL Put Intrinsic Definitions. -// - -def int_mblaze_fsl_put : GCCBuiltin<"__builtin_mblaze_fsl_put">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_aput : GCCBuiltin<"__builtin_mblaze_fsl_aput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_cput : GCCBuiltin<"__builtin_mblaze_fsl_cput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_caput : GCCBuiltin<"__builtin_mblaze_fsl_caput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_nput : GCCBuiltin<"__builtin_mblaze_fsl_nput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_naput : GCCBuiltin<"__builtin_mblaze_fsl_naput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_ncput : GCCBuiltin<"__builtin_mblaze_fsl_ncput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_ncaput : GCCBuiltin<"__builtin_mblaze_fsl_ncaput">, - MBFSL_Put_Intrinsic; -def int_mblaze_fsl_tput : GCCBuiltin<"__builtin_mblaze_fsl_tput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_taput : GCCBuiltin<"__builtin_mblaze_fsl_taput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tcput : GCCBuiltin<"__builtin_mblaze_fsl_tcput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tcaput : GCCBuiltin<"__builtin_mblaze_fsl_tcaput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tnput : GCCBuiltin<"__builtin_mblaze_fsl_tnput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tnaput : GCCBuiltin<"__builtin_mblaze_fsl_tnaput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tncput : GCCBuiltin<"__builtin_mblaze_fsl_tncput">, - MBFSL_PutT_Intrinsic; -def int_mblaze_fsl_tncaput : GCCBuiltin<"__builtin_mblaze_fsl_tncaput">, - MBFSL_PutT_Intrinsic; diff --git a/lib/Target/MBlaze/MBlazeMCInstLower.cpp b/lib/Target/MBlaze/MBlazeMCInstLower.cpp deleted file mode 100644 index ad414ac..0000000 --- a/lib/Target/MBlaze/MBlazeMCInstLower.cpp +++ /dev/null @@ -1,167 +0,0 @@ -//===-- MBlazeMCInstLower.cpp - Convert MBlaze MachineInstr to an MCInst---===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains code to lower MBlaze MachineInstrs to their corresponding -// MCInst records. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeMCInstLower.h" -#include "MBlazeInstrInfo.h" -#include "llvm/ADT/SmallString.h" -#include "llvm/CodeGen/AsmPrinter.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/IR/Constants.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCContext.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCInst.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/Mangler.h" -using namespace llvm; - -MCSymbol *MBlazeMCInstLower:: -GetGlobalAddressSymbol(const MachineOperand &MO) const { - switch (MO.getTargetFlags()) { - default: llvm_unreachable("Unknown target flag on GV operand"); - case 0: break; - } - - return Printer.Mang->getSymbol(MO.getGlobal()); -} - -MCSymbol *MBlazeMCInstLower:: -GetExternalSymbolSymbol(const MachineOperand &MO) const { - switch (MO.getTargetFlags()) { - default: llvm_unreachable("Unknown target flag on GV operand"); - case 0: break; - } - - return Printer.GetExternalSymbolSymbol(MO.getSymbolName()); -} - -MCSymbol *MBlazeMCInstLower:: -GetJumpTableSymbol(const MachineOperand &MO) const { - SmallString<256> Name; - raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI" - << Printer.getFunctionNumber() << '_' - << MO.getIndex(); - switch (MO.getTargetFlags()) { - default: llvm_unreachable("Unknown target flag on GV operand"); - case 0: break; - } - - // Create a symbol for the name. - return Ctx.GetOrCreateSymbol(Name.str()); -} - -MCSymbol *MBlazeMCInstLower:: -GetConstantPoolIndexSymbol(const MachineOperand &MO) const { - SmallString<256> Name; - raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI" - << Printer.getFunctionNumber() << '_' - << MO.getIndex(); - - switch (MO.getTargetFlags()) { - default: - llvm_unreachable("Unknown target flag on GV operand"); - - case 0: break; - } - - // Create a symbol for the name. - return Ctx.GetOrCreateSymbol(Name.str()); -} - -MCSymbol *MBlazeMCInstLower:: -GetBlockAddressSymbol(const MachineOperand &MO) const { - switch (MO.getTargetFlags()) { - default: llvm_unreachable("Unknown target flag on GV operand"); - case 0: break; - } - - return Printer.GetBlockAddressSymbol(MO.getBlockAddress()); -} - -MCOperand MBlazeMCInstLower:: -LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const { - // FIXME: We would like an efficient form for this, so we don't have to do a - // lot of extra uniquing. - const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx); - - switch (MO.getTargetFlags()) { - default: - llvm_unreachable("Unknown target flag on GV operand"); - - case 0: break; - } - - if (!MO.isJTI() && MO.getOffset()) - Expr = MCBinaryExpr::CreateAdd(Expr, - MCConstantExpr::Create(MO.getOffset(), Ctx), - Ctx); - return MCOperand::CreateExpr(Expr); -} - -void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { - OutMI.setOpcode(MI->getOpcode()); - - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); - - MCOperand MCOp; - switch (MO.getType()) { - default: llvm_unreachable("unknown operand type"); - case MachineOperand::MO_Register: - // Ignore all implicit register operands. - if (MO.isImplicit()) continue; - MCOp = MCOperand::CreateReg(MO.getReg()); - break; - case MachineOperand::MO_Immediate: - MCOp = MCOperand::CreateImm(MO.getImm()); - break; - case MachineOperand::MO_MachineBasicBlock: - MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( - MO.getMBB()->getSymbol(), Ctx)); - break; - case MachineOperand::MO_GlobalAddress: - MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO)); - break; - case MachineOperand::MO_ExternalSymbol: - MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO)); - break; - case MachineOperand::MO_JumpTableIndex: - MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO)); - break; - case MachineOperand::MO_ConstantPoolIndex: - MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO)); - break; - case MachineOperand::MO_BlockAddress: - MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO)); - break; - case MachineOperand::MO_FPImmediate: { - bool ignored; - APFloat FVal = MO.getFPImm()->getValueAPF(); - FVal.convert(APFloat::IEEEsingle, APFloat::rmTowardZero, &ignored); - - APInt IVal = FVal.bitcastToAPInt(); - uint64_t Val = *IVal.getRawData(); - MCOp = MCOperand::CreateImm(Val); - break; - } - case MachineOperand::MO_RegisterMask: - continue; - } - - OutMI.addOperand(MCOp); - } -} diff --git a/lib/Target/MBlaze/MBlazeMCInstLower.h b/lib/Target/MBlaze/MBlazeMCInstLower.h deleted file mode 100644 index 8ab2c9a..0000000 --- a/lib/Target/MBlaze/MBlazeMCInstLower.h +++ /dev/null @@ -1,47 +0,0 @@ -//===-- MBlazeMCInstLower.h - Lower MachineInstr to MCInst ------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZE_MCINSTLOWER_H -#define MBLAZE_MCINSTLOWER_H - -#include "llvm/Support/Compiler.h" - -namespace llvm { - class AsmPrinter; - class MCContext; - class MCInst; - class MCOperand; - class MCSymbol; - class MachineInstr; - class MachineModuleInfoMachO; - class MachineOperand; - - /// MBlazeMCInstLower - This class is used to lower an MachineInstr - /// into an MCInst. -class LLVM_LIBRARY_VISIBILITY MBlazeMCInstLower { - MCContext &Ctx; - - AsmPrinter &Printer; -public: - MBlazeMCInstLower(MCContext &ctx, AsmPrinter &printer) - : Ctx(ctx), Printer(printer) {} - void Lower(const MachineInstr *MI, MCInst &OutMI) const; - - MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; - - MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const; - MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const; - MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const; - MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const; - MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const; -}; - -} - -#endif diff --git a/lib/Target/MBlaze/MBlazeMachineFunction.cpp b/lib/Target/MBlaze/MBlazeMachineFunction.cpp deleted file mode 100644 index 2217b54..0000000 --- a/lib/Target/MBlaze/MBlazeMachineFunction.cpp +++ /dev/null @@ -1,14 +0,0 @@ -//===-- MBlazeMachineFunctionInfo.cpp - Private data ----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeMachineFunction.h" - -using namespace llvm; - -void MBlazeFunctionInfo::anchor() { } diff --git a/lib/Target/MBlaze/MBlazeMachineFunction.h b/lib/Target/MBlaze/MBlazeMachineFunction.h deleted file mode 100644 index 10d507f..0000000 --- a/lib/Target/MBlaze/MBlazeMachineFunction.h +++ /dev/null @@ -1,169 +0,0 @@ -//===-- MBlazeMachineFunctionInfo.h - Private data --------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file declares the MBlaze specific subclass of MachineFunctionInfo. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZE_MACHINE_FUNCTION_INFO_H -#define MBLAZE_MACHINE_FUNCTION_INFO_H - -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" - -namespace llvm { - -/// MBlazeFunctionInfo - This class is derived from MachineFunction private -/// MBlaze target-specific information for each MachineFunction. -class MBlazeFunctionInfo : public MachineFunctionInfo { - virtual void anchor(); - - /// Holds for each function where on the stack the Frame Pointer must be - /// saved. This is used on Prologue and Epilogue to emit FP save/restore - int FPStackOffset; - - /// Holds for each function where on the stack the Return Address must be - /// saved. This is used on Prologue and Epilogue to emit RA save/restore - int RAStackOffset; - - /// MBlazeFIHolder - Holds a FrameIndex and it's Stack Pointer Offset - struct MBlazeFIHolder { - - int FI; - int SPOffset; - - MBlazeFIHolder(int FrameIndex, int StackPointerOffset) - : FI(FrameIndex), SPOffset(StackPointerOffset) {} - }; - - /// When PIC is used the GP must be saved on the stack on the function - /// prologue and must be reloaded from this stack location after every - /// call. A reference to its stack location and frame index must be kept - /// to be used on emitPrologue and processFunctionBeforeFrameFinalized. - MBlazeFIHolder GPHolder; - - /// On LowerFormalArguments the stack size is unknown, so the Stack - /// Pointer Offset calculation of "not in register arguments" must be - /// postponed to emitPrologue. - SmallVector<MBlazeFIHolder, 16> FnLoadArgs; - bool HasLoadArgs; - - // When VarArgs, we must write registers back to caller stack, preserving - // on register arguments. Since the stack size is unknown on - // LowerFormalArguments, the Stack Pointer Offset calculation must be - // postponed to emitPrologue. - SmallVector<MBlazeFIHolder, 4> FnStoreVarArgs; - bool HasStoreVarArgs; - - // When determining the final stack layout some of the frame indexes may - // be replaced by new frame indexes that reside in the caller's stack - // frame. The replacements are recorded in this structure. - DenseMap<int,int> FIReplacements; - - /// SRetReturnReg - Some subtargets require that sret lowering includes - /// returning the value of the returned struct in a register. This field - /// holds the virtual register into which the sret argument is passed. - unsigned SRetReturnReg; - - /// GlobalBaseReg - keeps track of the virtual register initialized for - /// use as the global base register. This is used for PIC in some PIC - /// relocation models. - unsigned GlobalBaseReg; - - // VarArgsFrameIndex - FrameIndex for start of varargs area. - int VarArgsFrameIndex; - - /// LiveInFI - keeps track of the frame indexes in a callers stack - /// frame that are live into a function. - SmallVector<int, 16> LiveInFI; - -public: - MBlazeFunctionInfo(MachineFunction& MF) - : FPStackOffset(0), RAStackOffset(0), GPHolder(-1,-1), HasLoadArgs(false), - HasStoreVarArgs(false), SRetReturnReg(0), GlobalBaseReg(0), - VarArgsFrameIndex(0), LiveInFI() - {} - - int getFPStackOffset() const { return FPStackOffset; } - void setFPStackOffset(int Off) { FPStackOffset = Off; } - - int getRAStackOffset() const { return RAStackOffset; } - void setRAStackOffset(int Off) { RAStackOffset = Off; } - - int getGPStackOffset() const { return GPHolder.SPOffset; } - int getGPFI() const { return GPHolder.FI; } - void setGPStackOffset(int Off) { GPHolder.SPOffset = Off; } - void setGPFI(int FI) { GPHolder.FI = FI; } - bool needGPSaveRestore() const { return GPHolder.SPOffset != -1; } - - bool hasLoadArgs() const { return HasLoadArgs; } - bool hasStoreVarArgs() const { return HasStoreVarArgs; } - - void recordLiveIn(int FI) { - LiveInFI.push_back(FI); - } - - bool isLiveIn(int FI) { - for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i) - if (FI == LiveInFI[i]) return true; - - return false; - } - - const SmallVector<int, 16>& getLiveIn() const { return LiveInFI; } - - void recordReplacement(int OFI, int NFI) { - FIReplacements.insert(std::make_pair(OFI,NFI)); - } - - bool hasReplacement(int OFI) const { - return FIReplacements.find(OFI) != FIReplacements.end(); - } - - int getReplacement(int OFI) const { - return FIReplacements.lookup(OFI); - } - - void recordLoadArgsFI(int FI, int SPOffset) { - if (!HasLoadArgs) HasLoadArgs=true; - FnLoadArgs.push_back(MBlazeFIHolder(FI, SPOffset)); - } - - void recordStoreVarArgsFI(int FI, int SPOffset) { - if (!HasStoreVarArgs) HasStoreVarArgs=true; - FnStoreVarArgs.push_back(MBlazeFIHolder(FI, SPOffset)); - } - - void adjustLoadArgsFI(MachineFrameInfo *MFI) const { - if (!hasLoadArgs()) return; - for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i) - MFI->setObjectOffset(FnLoadArgs[i].FI, FnLoadArgs[i].SPOffset); - } - - void adjustStoreVarArgsFI(MachineFrameInfo *MFI) const { - if (!hasStoreVarArgs()) return; - for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i) - MFI->setObjectOffset(FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset); - } - - unsigned getSRetReturnReg() const { return SRetReturnReg; } - void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } - - unsigned getGlobalBaseReg() const { return GlobalBaseReg; } - void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; } - - int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } - void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; } -}; - -} // end of namespace llvm - -#endif // MBLAZE_MACHINE_FUNCTION_INFO_H diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/lib/Target/MBlaze/MBlazeRegisterInfo.cpp deleted file mode 100644 index 72fb8c6..0000000 --- a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp +++ /dev/null @@ -1,145 +0,0 @@ -//===-- MBlazeRegisterInfo.cpp - MBlaze Register Information --------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of the TargetRegisterInfo -// class. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-frame-info" - -#include "MBlazeRegisterInfo.h" -#include "MBlaze.h" -#include "MBlazeMachineFunction.h" -#include "MBlazeSubtarget.h" -#include "llvm/ADT/BitVector.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/ValueTypes.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/Type.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetFrameLowering.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" - -#define GET_REGINFO_TARGET_DESC -#include "MBlazeGenRegisterInfo.inc" - -using namespace llvm; - -MBlazeRegisterInfo:: -MBlazeRegisterInfo(const MBlazeSubtarget &ST) - : MBlazeGenRegisterInfo(MBlaze::R15), Subtarget(ST) {} - -unsigned MBlazeRegisterInfo::getPICCallReg() { - return MBlaze::R20; -} - -//===----------------------------------------------------------------------===// -// Callee Saved Registers methods -//===----------------------------------------------------------------------===// - -/// MBlaze Callee Saved Registers -const uint16_t* MBlazeRegisterInfo:: -getCalleeSavedRegs(const MachineFunction *MF) const { - // MBlaze callee-save register range is R20 - R31 - static const uint16_t CalleeSavedRegs[] = { - MBlaze::R20, MBlaze::R21, MBlaze::R22, MBlaze::R23, - MBlaze::R24, MBlaze::R25, MBlaze::R26, MBlaze::R27, - MBlaze::R28, MBlaze::R29, MBlaze::R30, MBlaze::R31, - 0 - }; - - return CalleeSavedRegs; -} - -BitVector MBlazeRegisterInfo:: -getReservedRegs(const MachineFunction &MF) const { - BitVector Reserved(getNumRegs()); - Reserved.set(MBlaze::R0); - Reserved.set(MBlaze::R1); - Reserved.set(MBlaze::R2); - Reserved.set(MBlaze::R13); - Reserved.set(MBlaze::R14); - Reserved.set(MBlaze::R15); - Reserved.set(MBlaze::R16); - Reserved.set(MBlaze::R17); - Reserved.set(MBlaze::R18); - Reserved.set(MBlaze::R19); - return Reserved; -} - -// FrameIndex represent objects inside a abstract stack. -// We must replace FrameIndex with an stack/frame pointer -// direct reference. -void MBlazeRegisterInfo:: -eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, - unsigned FIOperandNum, RegScavenger *RS) const { - MachineInstr &MI = *II; - MachineFunction &MF = *MI.getParent()->getParent(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - unsigned OFIOperandNum = FIOperandNum == 2 ? 1 : 2; - - DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n"; - dbgs() << "<--------->\n" << MI); - - int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); - int stackSize = MFI->getStackSize(); - int spOffset = MFI->getObjectOffset(FrameIndex); - - DEBUG(MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - dbgs() << "FrameIndex : " << FrameIndex << "\n" - << "spOffset : " << spOffset << "\n" - << "stackSize : " << stackSize << "\n" - << "isFixed : " << MFI->isFixedObjectIndex(FrameIndex) << "\n" - << "isLiveIn : " << MBlazeFI->isLiveIn(FrameIndex) << "\n" - << "isSpill : " << MFI->isSpillSlotObjectIndex(FrameIndex) - << "\n" ); - - // as explained on LowerFormalArguments, detect negative offsets - // and adjust SPOffsets considering the final stack size. - int Offset = (spOffset < 0) ? (stackSize - spOffset) : spOffset; - Offset += MI.getOperand(OFIOperandNum).getImm(); - - DEBUG(dbgs() << "Offset : " << Offset << "\n" << "<--------->\n"); - - MI.getOperand(OFIOperandNum).ChangeToImmediate(Offset); - MI.getOperand(FIOperandNum).ChangeToRegister(getFrameRegister(MF), false); -} - -void MBlazeRegisterInfo:: -processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *) const { - // Set the stack offset where GP must be saved/loaded from. - MachineFrameInfo *MFI = MF.getFrameInfo(); - MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>(); - if (MBlazeFI->needGPSaveRestore()) - MFI->setObjectOffset(MBlazeFI->getGPFI(), MBlazeFI->getGPStackOffset()); -} - -unsigned MBlazeRegisterInfo::getFrameRegister(const MachineFunction &MF) const { - const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - - return TFI->hasFP(MF) ? MBlaze::R19 : MBlaze::R1; -} - -unsigned MBlazeRegisterInfo::getEHExceptionRegister() const { - llvm_unreachable("What is the exception register"); -} - -unsigned MBlazeRegisterInfo::getEHHandlerRegister() const { - llvm_unreachable("What is the exception handler register"); -} diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.h b/lib/Target/MBlaze/MBlazeRegisterInfo.h deleted file mode 100644 index b463478..0000000 --- a/lib/Target/MBlaze/MBlazeRegisterInfo.h +++ /dev/null @@ -1,69 +0,0 @@ -//===-- MBlazeRegisterInfo.h - MBlaze Register Information Impl -*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the MBlaze implementation of the TargetRegisterInfo -// class. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZEREGISTERINFO_H -#define MBLAZEREGISTERINFO_H - -#include "MBlaze.h" -#include "llvm/Target/TargetRegisterInfo.h" - -#define GET_REGINFO_HEADER -#include "MBlazeGenRegisterInfo.inc" - -namespace llvm { -class MBlazeSubtarget; -class TargetInstrInfo; -class Type; - -namespace MBlaze { - /// SubregIndex - The index of various sized subregister classes. Note that - /// these indices must be kept in sync with the class indices in the - /// MBlazeRegisterInfo.td file. - enum SubregIndex { - SUBREG_FPEVEN = 1, SUBREG_FPODD = 2 - }; -} - -struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo { - const MBlazeSubtarget &Subtarget; - - MBlazeRegisterInfo(const MBlazeSubtarget &Subtarget); - - /// Get PIC indirect call register - static unsigned getPICCallReg(); - - /// Code Generation virtual methods... - const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const; - - BitVector getReservedRegs(const MachineFunction &MF) const; - - /// Stack Frame Processing Methods - void eliminateFrameIndex(MachineBasicBlock::iterator II, - int SPAdj, unsigned FIOperandNum, - RegScavenger *RS = NULL) const; - - void processFunctionBeforeFrameFinalized(MachineFunction &MF, - RegScavenger *RS = NULL) const; - - /// Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const; - - /// Exception handling queries. - unsigned getEHExceptionRegister() const; - unsigned getEHHandlerRegister() const; -}; - -} // end namespace llvm - -#endif diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.td b/lib/Target/MBlaze/MBlazeRegisterInfo.td deleted file mode 100644 index 64cae5c..0000000 --- a/lib/Target/MBlaze/MBlazeRegisterInfo.td +++ /dev/null @@ -1,148 +0,0 @@ -//===-- MBlazeRegisterInfo.td - MBlaze Register defs -------*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Declarations that describe the MicroBlaze register file -//===----------------------------------------------------------------------===// - -// We have banks of 32 registers each. -class MBlazeReg<string n> : Register<n> { - field bits<5> Num; - let Namespace = "MBlaze"; -} - -// Special purpose registers have 15-bit values -class MBlazeSReg<string n> : Register<n> { - field bits<15> Num; - let Namespace = "MBlaze"; -} - -// MBlaze general purpose registers -class MBlazeGPRReg<bits<5> num, string n> : MBlazeReg<n> { - let Num = num; -} - -// MBlaze special purpose registers -class MBlazeSPRReg<bits<15> num, string n> : MBlazeSReg<n> { - let Num = num; -} - -//===----------------------------------------------------------------------===// -// Registers -//===----------------------------------------------------------------------===// - -let Namespace = "MBlaze" in { - // General Purpose Registers - def R0 : MBlazeGPRReg< 0, "r0">, DwarfRegNum<[0]>; - def R1 : MBlazeGPRReg< 1, "r1">, DwarfRegNum<[1]>; - def R2 : MBlazeGPRReg< 2, "r2">, DwarfRegNum<[2]>; - def R3 : MBlazeGPRReg< 3, "r3">, DwarfRegNum<[3]>; - def R4 : MBlazeGPRReg< 4, "r4">, DwarfRegNum<[4]>; - def R5 : MBlazeGPRReg< 5, "r5">, DwarfRegNum<[5]>; - def R6 : MBlazeGPRReg< 6, "r6">, DwarfRegNum<[6]>; - def R7 : MBlazeGPRReg< 7, "r7">, DwarfRegNum<[7]>; - def R8 : MBlazeGPRReg< 8, "r8">, DwarfRegNum<[8]>; - def R9 : MBlazeGPRReg< 9, "r9">, DwarfRegNum<[9]>; - def R10 : MBlazeGPRReg< 10, "r10">, DwarfRegNum<[10]>; - def R11 : MBlazeGPRReg< 11, "r11">, DwarfRegNum<[11]>; - def R12 : MBlazeGPRReg< 12, "r12">, DwarfRegNum<[12]>; - def R13 : MBlazeGPRReg< 13, "r13">, DwarfRegNum<[13]>; - def R14 : MBlazeGPRReg< 14, "r14">, DwarfRegNum<[14]>; - def R15 : MBlazeGPRReg< 15, "r15">, DwarfRegNum<[15]>; - def R16 : MBlazeGPRReg< 16, "r16">, DwarfRegNum<[16]>; - def R17 : MBlazeGPRReg< 17, "r17">, DwarfRegNum<[17]>; - def R18 : MBlazeGPRReg< 18, "r18">, DwarfRegNum<[18]>; - def R19 : MBlazeGPRReg< 19, "r19">, DwarfRegNum<[19]>; - def R20 : MBlazeGPRReg< 20, "r20">, DwarfRegNum<[20]>; - def R21 : MBlazeGPRReg< 21, "r21">, DwarfRegNum<[21]>; - def R22 : MBlazeGPRReg< 22, "r22">, DwarfRegNum<[22]>; - def R23 : MBlazeGPRReg< 23, "r23">, DwarfRegNum<[23]>; - def R24 : MBlazeGPRReg< 24, "r24">, DwarfRegNum<[24]>; - def R25 : MBlazeGPRReg< 25, "r25">, DwarfRegNum<[25]>; - def R26 : MBlazeGPRReg< 26, "r26">, DwarfRegNum<[26]>; - def R27 : MBlazeGPRReg< 27, "r27">, DwarfRegNum<[27]>; - def R28 : MBlazeGPRReg< 28, "r28">, DwarfRegNum<[28]>; - def R29 : MBlazeGPRReg< 29, "r29">, DwarfRegNum<[29]>; - def R30 : MBlazeGPRReg< 30, "r30">, DwarfRegNum<[30]>; - def R31 : MBlazeGPRReg< 31, "r31">, DwarfRegNum<[31]>; - - // Special Purpose Registers - def RPC : MBlazeSPRReg<0x0000, "rpc">, DwarfRegNum<[32]>; - def RMSR : MBlazeSPRReg<0x0001, "rmsr">, DwarfRegNum<[33]>; - def REAR : MBlazeSPRReg<0x0003, "rear">, DwarfRegNum<[34]>; - def RESR : MBlazeSPRReg<0x0005, "resr">, DwarfRegNum<[35]>; - def RFSR : MBlazeSPRReg<0x0007, "rfsr">, DwarfRegNum<[36]>; - def RBTR : MBlazeSPRReg<0x000B, "rbtr">, DwarfRegNum<[37]>; - def REDR : MBlazeSPRReg<0x000D, "redr">, DwarfRegNum<[38]>; - def RPID : MBlazeSPRReg<0x1000, "rpid">, DwarfRegNum<[39]>; - def RZPR : MBlazeSPRReg<0x1001, "rzpr">, DwarfRegNum<[40]>; - def RTLBX : MBlazeSPRReg<0x1002, "rtlbx">, DwarfRegNum<[41]>; - def RTLBLO : MBlazeSPRReg<0x1003, "rtlblo">, DwarfRegNum<[42]>; - def RTLBHI : MBlazeSPRReg<0x1004, "rtlbhi">, DwarfRegNum<[43]>; - def RTLBSX : MBlazeSPRReg<0x1004, "rtlbsx">, DwarfRegNum<[44]>; - def RPVR0 : MBlazeSPRReg<0x2000, "rpvr0">, DwarfRegNum<[45]>; - def RPVR1 : MBlazeSPRReg<0x2001, "rpvr1">, DwarfRegNum<[46]>; - def RPVR2 : MBlazeSPRReg<0x2002, "rpvr2">, DwarfRegNum<[47]>; - def RPVR3 : MBlazeSPRReg<0x2003, "rpvr3">, DwarfRegNum<[48]>; - def RPVR4 : MBlazeSPRReg<0x2004, "rpvr4">, DwarfRegNum<[49]>; - def RPVR5 : MBlazeSPRReg<0x2005, "rpvr5">, DwarfRegNum<[50]>; - def RPVR6 : MBlazeSPRReg<0x2006, "rpvr6">, DwarfRegNum<[51]>; - def RPVR7 : MBlazeSPRReg<0x2007, "rpvr7">, DwarfRegNum<[52]>; - def RPVR8 : MBlazeSPRReg<0x2008, "rpvr8">, DwarfRegNum<[53]>; - def RPVR9 : MBlazeSPRReg<0x2009, "rpvr9">, DwarfRegNum<[54]>; - def RPVR10 : MBlazeSPRReg<0x200A, "rpvr10">, DwarfRegNum<[55]>; - def RPVR11 : MBlazeSPRReg<0x200B, "rpvr11">, DwarfRegNum<[56]>; - - // The carry bit. In the Microblaze this is really bit 29 of the - // MSR register but this is the only bit of that register that we - // are interested in modeling. - def CARRY : MBlazeSPRReg<0x0000, "rmsr[c]">; -} - -//===----------------------------------------------------------------------===// -// Register Classes -//===----------------------------------------------------------------------===// - -def GPR : RegisterClass<"MBlaze", [i32,f32], 32, (sequence "R%u", 0, 31)>; - -def SPR : RegisterClass<"MBlaze", [i32], 32, (add - // Reserved - RPC, - RMSR, - REAR, - RESR, - RFSR, - RBTR, - REDR, - RPID, - RZPR, - RTLBX, - RTLBLO, - RTLBHI, - RPVR0, - RPVR1, - RPVR2, - RPVR3, - RPVR4, - RPVR5, - RPVR6, - RPVR7, - RPVR8, - RPVR9, - RPVR10, - RPVR11 - )> -{ - // None of the special purpose registers are allocatable. - let isAllocatable = 0; -} - -def CRC : RegisterClass<"MBlaze", [i32], 32, (add CARRY)> { - let CopyCost = -1; -} diff --git a/lib/Target/MBlaze/MBlazeRelocations.h b/lib/Target/MBlaze/MBlazeRelocations.h deleted file mode 100644 index 6387ee2..0000000 --- a/lib/Target/MBlaze/MBlazeRelocations.h +++ /dev/null @@ -1,47 +0,0 @@ -//===-- MBlazeRelocations.h - MBlaze Code Relocations -----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the MBlaze target-specific relocation types. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZERELOCATIONS_H -#define MBLAZERELOCATIONS_H - -#include "llvm/CodeGen/MachineRelocation.h" - -namespace llvm { - namespace MBlaze { - enum RelocationType { - /// reloc_pcrel_word - PC relative relocation, add the relocated value to - /// the value already in memory, after we adjust it for where the PC is. - reloc_pcrel_word = 0, - - /// reloc_picrel_word - PIC base relative relocation, add the relocated - /// value to the value already in memory, after we adjust it for where the - /// PIC base is. - reloc_picrel_word = 1, - - /// reloc_absolute_word - absolute relocation, just add the relocated - /// value to the value already in memory. - reloc_absolute_word = 2, - - /// reloc_absolute_word_sext - absolute relocation, just add the relocated - /// value to the value already in memory. In object files, it represents a - /// value which must be sign-extended when resolving the relocation. - reloc_absolute_word_sext = 3, - - /// reloc_absolute_dword - absolute relocation, just add the relocated - /// value to the value already in memory. - reloc_absolute_dword = 4 - }; - } -} - -#endif diff --git a/lib/Target/MBlaze/MBlazeSchedule.td b/lib/Target/MBlaze/MBlazeSchedule.td deleted file mode 100644 index cd5691c..0000000 --- a/lib/Target/MBlaze/MBlazeSchedule.td +++ /dev/null @@ -1,50 +0,0 @@ -//===-- MBlazeSchedule.td - MBlaze Scheduling Definitions --*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze functional units. -//===----------------------------------------------------------------------===// -def IF : FuncUnit; -def ID : FuncUnit; -def EX : FuncUnit; -def MA : FuncUnit; -def WB : FuncUnit; - -//===----------------------------------------------------------------------===// -// Instruction Itinerary classes used for MBlaze -//===----------------------------------------------------------------------===// -def IIC_ALU : InstrItinClass; -def IIC_ALUm : InstrItinClass; -def IIC_ALUd : InstrItinClass; -def IIC_SHT : InstrItinClass; -def IIC_FSLg : InstrItinClass; -def IIC_FSLp : InstrItinClass; -def IIC_MEMs : InstrItinClass; -def IIC_MEMl : InstrItinClass; -def IIC_FPU : InstrItinClass; -def IIC_FPUd : InstrItinClass; -def IIC_FPUf : InstrItinClass; -def IIC_FPUi : InstrItinClass; -def IIC_FPUs : InstrItinClass; -def IIC_FPUc : InstrItinClass; -def IIC_BR : InstrItinClass; -def IIC_BRc : InstrItinClass; -def IIC_BRl : InstrItinClass; -def IIC_WDC : InstrItinClass; -def IIC_Pseudo : InstrItinClass; - -//===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for three stage pipeline. -//===----------------------------------------------------------------------===// -include "MBlazeSchedule3.td" - -//===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for five stage pipeline. -//===----------------------------------------------------------------------===// -include "MBlazeSchedule5.td" diff --git a/lib/Target/MBlaze/MBlazeSchedule3.td b/lib/Target/MBlaze/MBlazeSchedule3.td deleted file mode 100644 index 20257a6..0000000 --- a/lib/Target/MBlaze/MBlazeSchedule3.td +++ /dev/null @@ -1,236 +0,0 @@ -//===-- MBlazeSchedule3.td - MBlaze Scheduling Definitions -*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for the three stage pipeline. -//===----------------------------------------------------------------------===// -def MBlazePipe3Itineraries : ProcessorItineraries< - [IF,ID,EX], [], [ - - // ALU instruction with one destination register and either two register - // source operands or one register source operand and one immediate operand. - // The instruction takes one cycle to execute in each of the stages. The - // two source operands are read during the decode stage and the result is - // ready after the execute stage. - InstrItinData< IIC_ALU, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]>], // one cycle in execute stage - [ 2 // result ready after two cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // ALU multiply instruction with one destination register and either two - // register source operands or one register source operand and one immediate - // operand. The instruction takes one cycle to execute in each of the - // pipeline stages except the execute stage, which takes three cycles. The - // two source operands are read during the decode stage and the result is - // ready after the execute stage. - InstrItinData< IIC_ALUm, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<3,[EX]>], // three cycles in execute stage - [ 4 // result ready after four cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // ALU divide instruction with one destination register two register source - // operands. The instruction takes one cycle to execute in each the pipeline - // stages except the execute stage, which takes 34 cycles. The two - // source operands are read during the decode stage and the result is ready - // after the execute stage. - InstrItinData< IIC_ALUd, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<34,[EX]>], // 34 cycles in execute stage - [ 35 // result ready after 35 cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Shift instruction with one destination register and either two register - // source operands or one register source operand and one immediate operand. - // The instruction takes one cycle to execute in each of the pipeline stages - // except the execute stage, which takes two cycles. The two source operands - // are read during the decode stage and the result is ready after the execute - // stage. - InstrItinData< IIC_SHT, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 3 // result ready after three cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Branch instruction with one source operand register. The instruction takes - // one cycle to execute in each of the pipeline stages. The source operand is - // read during the decode stage. - InstrItinData< IIC_BR, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]>], // one cycle in execute stage - [ 1 ]>, // first operand read after one cycle - - // Conditional branch instruction with two source operand registers. The - // instruction takes one cycle to execute in each of the pipeline stages. The - // two source operands are read during the decode stage. - InstrItinData< IIC_BRc, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]>], // one cycle in execute stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Branch and link instruction with one destination register and one source - // operand register. The instruction takes one cycle to execute in each of - // the pipeline stages. The source operand is read during the decode stage - // and the destination register is ready after the execute stage. - InstrItinData< IIC_BRl, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]>], // one cycle in execute stage - [ 2 // result ready after two cycles - , 1 ]>, // first operand read after one cycle - - // Cache control instruction with two source operand registers. The - // instruction takes one cycle to execute in each of the pipeline stages - // except the memory access stage, which takes two cycles. The source - // operands are read during the decode stage. - InstrItinData< IIC_WDC, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Floating point instruction with one destination register and two source - // operand registers. The instruction takes one cycle to execute in each of - // the pipeline stages except the execute stage, which takes six cycles. The - // source operands are read during the decode stage and the results are ready - // after the execute stage. - InstrItinData< IIC_FPU, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<6,[EX]>], // six cycles in execute stage - [ 7 // result ready after seven cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Floating point divide instruction with one destination register and two - // source operand registers. The instruction takes one cycle to execute in - // each of the pipeline stages except the execute stage, which takes 30 - // cycles. The source operands are read during the decode stage and the - // results are ready after the execute stage. - InstrItinData< IIC_FPUd, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<30,[EX]>], // one cycle in execute stage - [ 31 // result ready after 31 cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Convert floating point to integer instruction with one destination - // register and one source operand register. The instruction takes one cycle - // to execute in each of the pipeline stages except the execute stage, - // which takes seven cycles. The source operands are read during the decode - // stage and the results are ready after the execute stage. - InstrItinData< IIC_FPUi, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<7,[EX]>], // seven cycles in execute stage - [ 8 // result ready after eight cycles - , 1 ]>, // first operand read after one cycle - - // Convert integer to floating point instruction with one destination - // register and one source operand register. The instruction takes one cycle - // to execute in each of the pipeline stages except the execute stage, - // which takes six cycles. The source operands are read during the decode - // stage and the results are ready after the execute stage. - InstrItinData< IIC_FPUf, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<6,[EX]>], // six cycles in execute stage - [ 7 // result ready after seven cycles - , 1 ]>, // first operand read after one cycle - - // Floating point square root instruction with one destination register and - // one source operand register. The instruction takes one cycle to execute in - // each of the pipeline stages except the execute stage, which takes 29 - // cycles. The source operands are read during the decode stage and the - // results are ready after the execute stage. - InstrItinData< IIC_FPUs, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<29,[EX]>], // 29 cycles in execute stage - [ 30 // result ready after 30 cycles - , 1 ]>, // first operand read after one cycle - - // Floating point comparison instruction with one destination register and - // two source operand registers. The instruction takes one cycle to execute - // in each of the pipeline stages except the execute stage, which takes three - // cycles. The source operands are read during the decode stage and the - // results are ready after the execute stage. - InstrItinData< IIC_FPUc, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<3,[EX]>], // three cycles in execute stage - [ 4 // result ready after four cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // FSL get instruction with one register or immediate source operand and one - // destination register. The instruction takes one cycle to execute in each - // of the pipeline stages except the execute stage, which takes two cycles. - // The one source operand is read during the decode stage and the result is - // ready after the execute stage. - InstrItinData< IIC_FSLg, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 3 // result ready after two cycles - , 1 ]>, // first operand read after one cycle - - // FSL put instruction with either two register source operands or one - // register source operand and one immediate operand. There is no result - // produced by the instruction. The instruction takes one cycle to execute in - // each of the pipeline stages except the execute stage, which takes two - // cycles. The two source operands are read during the decode stage. - InstrItinData< IIC_FSLp, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Memory store instruction with either three register source operands or two - // register source operands and one immediate operand. There is no result - // produced by the instruction. The instruction takes one cycle to execute in - // each of the pipeline stages except the execute stage, which takes two - // cycles. All of the source operands are read during the decode stage. - InstrItinData< IIC_MEMs, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 1 // first operand read after one cycle - , 1 // second operand read after one cycle - , 1 ]>, // third operand read after one cycle - - // Memory load instruction with one destination register and either two - // register source operands or one register source operand and one immediate - // operand. The instruction takes one cycle to execute in each of the - // pipeline stages except the execute stage, which takes two cycles. All of - // the source operands are read during the decode stage and the result is - // ready after the execute stage. - InstrItinData< IIC_MEMl, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<2,[EX]>], // two cycles in execute stage - [ 3 // result ready after four cycles - , 1 // second operand read after one cycle - , 1 ]> // third operand read after one cycle -]>; diff --git a/lib/Target/MBlaze/MBlazeSchedule5.td b/lib/Target/MBlaze/MBlazeSchedule5.td deleted file mode 100644 index ab53b42..0000000 --- a/lib/Target/MBlaze/MBlazeSchedule5.td +++ /dev/null @@ -1,267 +0,0 @@ -//===-- MBlazeSchedule5.td - MBlaze Scheduling Definitions -*- tablegen -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for the five stage pipeline. -//===----------------------------------------------------------------------===// -def MBlazePipe5Itineraries : ProcessorItineraries< - [IF,ID,EX,MA,WB], [], [ - - // ALU instruction with one destination register and either two register - // source operands or one register source operand and one immediate operand. - // The instruction takes one cycle to execute in each of the stages. The - // two source operands are read during the decode stage and the result is - // ready after the execute stage. - InstrItinData< IIC_ALU, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 2 // result ready after two cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // ALU multiply instruction with one destination register and either two - // register source operands or one register source operand and one immediate - // operand. The instruction takes one cycle to execute in each of the - // pipeline stages. The two source operands are read during the decode stage - // and the result is ready after the execute stage. - InstrItinData< IIC_ALUm, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 2 // result ready after two cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // ALU divide instruction with one destination register two register source - // operands. The instruction takes one cycle to execute in each the pipeline - // stages except the memory access stage, which takes 31 cycles. The two - // source operands are read during the decode stage and the result is ready - // after the memory access stage. - InstrItinData< IIC_ALUd, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<31,[MA]> // 31 cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 33 // result ready after 33 cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Shift instruction with one destination register and either two register - // source operands or one register source operand and one immediate operand. - // The instruction takes one cycle to execute in each of the pipeline stages. - // The two source operands are read during the decode stage and the result is - // ready after the memory access stage. - InstrItinData< IIC_SHT, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 3 // result ready after three cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Branch instruction with one source operand register. The instruction takes - // one cycle to execute in each of the pipeline stages. The source operand is - // read during the decode stage. - InstrItinData< IIC_BR, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 1 ]>, // first operand read after one cycle - - // Conditional branch instruction with two source operand registers. The - // instruction takes one cycle to execute in each of the pipeline stages. The - // two source operands are read during the decode stage. - InstrItinData< IIC_BRc, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Branch and link instruction with one destination register and one source - // operand register. The instruction takes one cycle to execute in each of - // the pipeline stages. The source operand is read during the decode stage - // and the destination register is ready after the writeback stage. - InstrItinData< IIC_BRl, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 4 // result ready after four cycles - , 1 ]>, // first operand read after one cycle - - // Cache control instruction with two source operand registers. The - // instruction takes one cycle to execute in each of the pipeline stages - // except the memory access stage, which takes two cycles. The source - // operands are read during the decode stage. - InstrItinData< IIC_WDC, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<2,[MA]> // two cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Floating point instruction with one destination register and two source - // operand registers. The instruction takes one cycle to execute in each of - // the pipeline stages except the memory access stage, which takes two - // cycles. The source operands are read during the decode stage and the - // results are ready after the writeback stage. - InstrItinData< IIC_FPU, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<2,[MA]> // two cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 5 // result ready after five cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Floating point divide instruction with one destination register and two - // source operand registers. The instruction takes one cycle to execute in - // each of the pipeline stages except the memory access stage, which takes 26 - // cycles. The source operands are read during the decode stage and the - // results are ready after the writeback stage. - InstrItinData< IIC_FPUd, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<26,[MA]> // 26 cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 29 // result ready after 29 cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Convert floating point to integer instruction with one destination - // register and one source operand register. The instruction takes one cycle - // to execute in each of the pipeline stages except the memory access stage, - // which takes three cycles. The source operands are read during the decode - // stage and the results are ready after the writeback stage. - InstrItinData< IIC_FPUi, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<3,[MA]> // three cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 6 // result ready after six cycles - , 1 ]>, // first operand read after one cycle - - // Convert integer to floating point instruction with one destination - // register and one source operand register. The instruction takes one cycle - // to execute in each of the pipeline stages except the memory access stage, - // which takes two cycles. The source operands are read during the decode - // stage and the results are ready after the writeback stage. - InstrItinData< IIC_FPUf, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<2,[MA]> // two cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 5 // result ready after five cycles - , 1 ]>, // first operand read after one cycle - - // Floating point square root instruction with one destination register and - // one source operand register. The instruction takes one cycle to execute in - // each of the pipeline stages except the memory access stage, which takes 25 - // cycles. The source operands are read during the decode stage and the - // results are ready after the writeback stage. - InstrItinData< IIC_FPUs, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<25,[MA]> // 25 cycles in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 28 // result ready after 28 cycles - , 1 ]>, // first operand read after one cycle - - // Floating point comparison instruction with one destination register and - // two source operand registers. The instruction takes one cycle to execute - // in each of the pipeline stages. The source operands are read during the - // decode stage and the results are ready after the execute stage. - InstrItinData< IIC_FPUc, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 2 // result ready after two cycles - , 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // FSL get instruction with one register or immediate source operand and one - // destination register. The instruction takes one cycle to execute in each - // of the pipeline stages. The one source operand is read during the decode - // stage and the result is ready after the execute stage. - InstrItinData< IIC_FSLg, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 2 // result ready after two cycles - , 1 ]>, // first operand read after one cycle - - // FSL put instruction with either two register source operands or one - // register source operand and one immediate operand. There is no result - // produced by the instruction. The instruction takes one cycle to execute in - // each of the pipeline stages. The two source operands are read during the - // decode stage. - InstrItinData< IIC_FSLp, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 1 // first operand read after one cycle - , 1 ]>, // second operand read after one cycle - - // Memory store instruction with either three register source operands or two - // register source operands and one immediate operand. There is no result - // produced by the instruction. The instruction takes one cycle to execute in - // each of the pipeline stages. All of the source operands are read during - // the decode stage. - InstrItinData< IIC_MEMs, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 1 // first operand read after one cycle - , 1 // second operand read after one cycle - , 1 ]>, // third operand read after one cycle - - // Memory load instruction with one destination register and either two - // register source operands or one register source operand and one immediate - // operand. The instruction takes one cycle to execute in each of the - // pipeline stages. All of the source operands are read during the decode - // stage and the result is ready after the writeback stage. - InstrItinData< IIC_MEMl, - [ InstrStage<1,[IF]> // one cycle in fetch stage - , InstrStage<1,[ID]> // one cycle in decode stage - , InstrStage<1,[EX]> // one cycle in execute stage - , InstrStage<1,[MA]> // one cycle in memory access stage - , InstrStage<1,[WB]>], // one cycle in write back stage - [ 4 // result ready after four cycles - , 1 // second operand read after one cycle - , 1 ]> // third operand read after one cycle -]>; diff --git a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp b/lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp deleted file mode 100644 index 6a115b2..0000000 --- a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.cpp +++ /dev/null @@ -1,23 +0,0 @@ -//===-- MBlazeSelectionDAGInfo.cpp - MBlaze SelectionDAG Info -------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the MBlazeSelectionDAGInfo class. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mblaze-selectiondag-info" -#include "MBlazeTargetMachine.h" -using namespace llvm; - -MBlazeSelectionDAGInfo::MBlazeSelectionDAGInfo(const MBlazeTargetMachine &TM) - : TargetSelectionDAGInfo(TM) { -} - -MBlazeSelectionDAGInfo::~MBlazeSelectionDAGInfo() { -} diff --git a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h b/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h deleted file mode 100644 index 9f8e2aa..0000000 --- a/lib/Target/MBlaze/MBlazeSelectionDAGInfo.h +++ /dev/null @@ -1,31 +0,0 @@ -//===-- MBlazeSelectionDAGInfo.h - MBlaze SelectionDAG Info -----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the MBlaze subclass for TargetSelectionDAGInfo. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZESELECTIONDAGINFO_H -#define MBLAZESELECTIONDAGINFO_H - -#include "llvm/Target/TargetSelectionDAGInfo.h" - -namespace llvm { - -class MBlazeTargetMachine; - -class MBlazeSelectionDAGInfo : public TargetSelectionDAGInfo { -public: - explicit MBlazeSelectionDAGInfo(const MBlazeTargetMachine &TM); - ~MBlazeSelectionDAGInfo(); -}; - -} - -#endif diff --git a/lib/Target/MBlaze/MBlazeSubtarget.cpp b/lib/Target/MBlaze/MBlazeSubtarget.cpp deleted file mode 100644 index dc2ad29..0000000 --- a/lib/Target/MBlaze/MBlazeSubtarget.cpp +++ /dev/null @@ -1,56 +0,0 @@ -//===-- MBlazeSubtarget.cpp - MBlaze Subtarget Information ----------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the MBlaze specific subclass of TargetSubtargetInfo. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeSubtarget.h" -#include "MBlaze.h" -#include "MBlazeRegisterInfo.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/TargetRegistry.h" - -#define GET_SUBTARGETINFO_TARGET_DESC -#define GET_SUBTARGETINFO_CTOR -#include "MBlazeGenSubtargetInfo.inc" - -using namespace llvm; - -MBlazeSubtarget::MBlazeSubtarget(const std::string &TT, - const std::string &CPU, - const std::string &FS): - MBlazeGenSubtargetInfo(TT, CPU, FS), - HasBarrel(false), HasDiv(false), HasMul(false), HasPatCmp(false), - HasFPU(false), HasMul64(false), HasSqrt(false) -{ - // Parse features string. - std::string CPUName = CPU; - if (CPUName.empty()) - CPUName = "mblaze"; - ParseSubtargetFeatures(CPUName, FS); - - // Only use instruction scheduling if the selected CPU has an instruction - // itinerary (the default CPU is the only one that doesn't). - HasItin = CPUName != "mblaze"; - DEBUG(dbgs() << "CPU " << CPUName << "(" << HasItin << ")\n"); - - // Initialize scheduling itinerary for the specified CPU. - InstrItins = getInstrItineraryForCPU(CPUName); -} - -bool MBlazeSubtarget:: -enablePostRAScheduler(CodeGenOpt::Level OptLevel, - TargetSubtargetInfo::AntiDepBreakMode& Mode, - RegClassVector& CriticalPathRCs) const { - Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL; - CriticalPathRCs.clear(); - CriticalPathRCs.push_back(&MBlaze::GPRRegClass); - return HasItin && OptLevel >= CodeGenOpt::Default; -} diff --git a/lib/Target/MBlaze/MBlazeSubtarget.h b/lib/Target/MBlaze/MBlazeSubtarget.h deleted file mode 100644 index ed43d21..0000000 --- a/lib/Target/MBlaze/MBlazeSubtarget.h +++ /dev/null @@ -1,75 +0,0 @@ -//===-- MBlazeSubtarget.h - Define Subtarget for the MBlaze ----*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file declares the MBlaze specific subclass of TargetSubtargetInfo. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZESUBTARGET_H -#define MBLAZESUBTARGET_H - -#include "llvm/MC/MCInstrItineraries.h" -#include "llvm/Target/TargetSubtargetInfo.h" -#include <string> - -#define GET_SUBTARGETINFO_HEADER -#include "MBlazeGenSubtargetInfo.inc" - -namespace llvm { -class StringRef; - -class MBlazeSubtarget : public MBlazeGenSubtargetInfo { - -protected: - bool HasBarrel; - bool HasDiv; - bool HasMul; - bool HasPatCmp; - bool HasFPU; - bool HasMul64; - bool HasSqrt; - bool HasItin; - - InstrItineraryData InstrItins; - -public: - - /// This constructor initializes the data members to match that - /// of the specified triple. - MBlazeSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS); - - /// ParseSubtargetFeatures - Parses features string setting specified - /// subtarget options. Definition of function is auto generated by tblgen. - void ParseSubtargetFeatures(StringRef CPU, StringRef FS); - - /// Compute the number of maximum number of issues per cycle for the - /// MBlaze scheduling itineraries. - void computeIssueWidth(); - - /// enablePostRAScheduler - True at 'More' optimization. - bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, - TargetSubtargetInfo::AntiDepBreakMode& Mode, - RegClassVector& CriticalPathRCs) const; - - /// getInstrItins - Return the instruction itineraies based on subtarget. - const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } - - bool hasItin() const { return HasItin; } - bool hasPCMP() const { return HasPatCmp; } - bool hasFPU() const { return HasFPU; } - bool hasSqrt() const { return HasSqrt; } - bool hasMul() const { return HasMul; } - bool hasMul64() const { return HasMul64; } - bool hasDiv() const { return HasDiv; } - bool hasBarrel() const { return HasBarrel; } -}; -} // End llvm namespace - -#endif diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp deleted file mode 100644 index c758955..0000000 --- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp +++ /dev/null @@ -1,82 +0,0 @@ -//===-- MBlazeTargetMachine.cpp - Define TargetMachine for MBlaze ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Implements the info about MBlaze target spec. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeTargetMachine.h" -#include "MBlaze.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/PassManager.h" -#include "llvm/Support/FormattedStream.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Target/TargetOptions.h" -using namespace llvm; - -extern "C" void LLVMInitializeMBlazeTarget() { - // Register the target. - RegisterTargetMachine<MBlazeTargetMachine> X(TheMBlazeTarget); -} - -// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment -// The stack is always 8 byte aligned -// On function prologue, the stack is created by decrementing -// its pointer. Once decremented, all references are done with positive -// offset from the stack/frame pointer, using StackGrowsUp enables -// an easier handling. -MBlazeTargetMachine:: -MBlazeTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, const TargetOptions &Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS), - DL("E-p:32:32:32-i8:8:8-i16:16:16"), - InstrInfo(*this), - FrameLowering(Subtarget), - TLInfo(*this), TSInfo(*this), - InstrItins(Subtarget.getInstrItineraryData()) { - initAsmInfo(); -} - -namespace { -/// MBlaze Code Generator Pass Configuration Options. -class MBlazePassConfig : public TargetPassConfig { -public: - MBlazePassConfig(MBlazeTargetMachine *TM, PassManagerBase &PM) - : TargetPassConfig(TM, PM) {} - - MBlazeTargetMachine &getMBlazeTargetMachine() const { - return getTM<MBlazeTargetMachine>(); - } - - virtual bool addInstSelector(); - virtual bool addPreEmitPass(); -}; -} // namespace - -TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) { - return new MBlazePassConfig(this, PM); -} - -// Install an instruction selector pass using -// the ISelDag to gen MBlaze code. -bool MBlazePassConfig::addInstSelector() { - addPass(createMBlazeISelDag(getMBlazeTargetMachine())); - return false; -} - -// Implemented by targets that want to run passes immediately before -// machine code is emitted. return true if -print-machineinstrs should -// print out the code after the passes. -bool MBlazePassConfig::addPreEmitPass() { - addPass(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine())); - return true; -} diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.h b/lib/Target/MBlaze/MBlazeTargetMachine.h deleted file mode 100644 index 956794d..0000000 --- a/lib/Target/MBlaze/MBlazeTargetMachine.h +++ /dev/null @@ -1,80 +0,0 @@ -//===-- MBlazeTargetMachine.h - Define TargetMachine for MBlaze -*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file declares the MBlaze specific subclass of TargetMachine. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZE_TARGETMACHINE_H -#define MBLAZE_TARGETMACHINE_H - -#include "MBlazeFrameLowering.h" -#include "MBlazeISelLowering.h" -#include "MBlazeInstrInfo.h" -#include "MBlazeIntrinsicInfo.h" -#include "MBlazeSelectionDAGInfo.h" -#include "MBlazeSubtarget.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/MC/MCStreamer.h" -#include "llvm/Target/TargetFrameLowering.h" -#include "llvm/Target/TargetMachine.h" - -namespace llvm { - class formatted_raw_ostream; - - class MBlazeTargetMachine : public LLVMTargetMachine { - MBlazeSubtarget Subtarget; - const DataLayout DL; // Calculates type size & alignment - MBlazeInstrInfo InstrInfo; - MBlazeFrameLowering FrameLowering; - MBlazeTargetLowering TLInfo; - MBlazeSelectionDAGInfo TSInfo; - MBlazeIntrinsicInfo IntrinsicInfo; - InstrItineraryData InstrItins; - - public: - MBlazeTargetMachine(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, - const TargetOptions &Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL); - - virtual const MBlazeInstrInfo *getInstrInfo() const - { return &InstrInfo; } - - virtual const InstrItineraryData *getInstrItineraryData() const - { return &InstrItins; } - - virtual const TargetFrameLowering *getFrameLowering() const - { return &FrameLowering; } - - virtual const MBlazeSubtarget *getSubtargetImpl() const - { return &Subtarget; } - - virtual const DataLayout *getDataLayout() const - { return &DL;} - - virtual const MBlazeRegisterInfo *getRegisterInfo() const - { return &InstrInfo.getRegisterInfo(); } - - virtual const MBlazeTargetLowering *getTargetLowering() const - { return &TLInfo; } - - virtual const MBlazeSelectionDAGInfo* getSelectionDAGInfo() const - { return &TSInfo; } - - const TargetIntrinsicInfo *getIntrinsicInfo() const - { return &IntrinsicInfo; } - - // Pass Pipeline Configuration - virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); - }; -} // End llvm namespace - -#endif diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp deleted file mode 100644 index a7a0a68..0000000 --- a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp +++ /dev/null @@ -1,90 +0,0 @@ -//===-- MBlazeTargetObjectFile.cpp - MBlaze object files ------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeTargetObjectFile.h" -#include "MBlazeSubtarget.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/GlobalVariable.h" -#include "llvm/MC/MCContext.h" -#include "llvm/MC/MCSectionELF.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/ELF.h" -#include "llvm/Target/TargetMachine.h" -using namespace llvm; - -void MBlazeTargetObjectFile:: -Initialize(MCContext &Ctx, const TargetMachine &TM) { - TargetLoweringObjectFileELF::Initialize(Ctx, TM); - - SmallDataSection = - getContext().getELFSection(".sdata", ELF::SHT_PROGBITS, - ELF::SHF_WRITE |ELF::SHF_ALLOC, - SectionKind::getDataRel()); - - SmallBSSSection = - getContext().getELFSection(".sbss", ELF::SHT_NOBITS, - ELF::SHF_WRITE |ELF::SHF_ALLOC, - SectionKind::getBSS()); - -} - -// A address must be loaded from a small section if its size is less than the -// small section size threshold. Data in this section must be addressed using -// gp_rel operator. -static bool IsInSmallSection(uint64_t Size) { - return Size > 0 && Size <= 8; -} - -bool MBlazeTargetObjectFile:: -IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM) const { - if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) - return false; - - return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM)); -} - -/// IsGlobalInSmallSection - Return true if this global address should be -/// placed into small data/bss section. -bool MBlazeTargetObjectFile:: -IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, - SectionKind Kind) const { - // Only global variables, not functions. - const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV); - if (!GVA) - return false; - - // We can only do this for datarel or BSS objects for now. - if (!Kind.isBSS() && !Kind.isDataRel()) - return false; - - // If this is a internal constant string, there is a special - // section for it, but not in small data/bss. - if (Kind.isMergeable1ByteCString()) - return false; - - Type *Ty = GV->getType()->getElementType(); - return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty)); -} - -const MCSection *MBlazeTargetObjectFile:: -SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, - Mangler *Mang, const TargetMachine &TM) const { - // TODO: Could also support "weak" symbols as well with ".gnu.linkonce.s.*" - // sections? - - // Handle Small Section classification here. - if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind)) - return SmallBSSSection; - if (Kind.isDataNoRel() && IsGlobalInSmallSection(GV, TM, Kind)) - return SmallDataSection; - - // Otherwise, we work the same as ELF. - return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM); -} diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.h b/lib/Target/MBlaze/MBlazeTargetObjectFile.h deleted file mode 100644 index c313722..0000000 --- a/lib/Target/MBlaze/MBlazeTargetObjectFile.h +++ /dev/null @@ -1,40 +0,0 @@ -//===-- llvm/Target/MBlazeTargetObjectFile.h - MBlaze Obj. Info -*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TARGET_MBLAZE_TARGETOBJECTFILE_H -#define LLVM_TARGET_MBLAZE_TARGETOBJECTFILE_H - -#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" - -namespace llvm { - - class MBlazeTargetObjectFile : public TargetLoweringObjectFileELF { - const MCSection *SmallDataSection; - const MCSection *SmallBSSSection; - public: - - void Initialize(MCContext &Ctx, const TargetMachine &TM); - - /// IsGlobalInSmallSection - Return true if this global address should be - /// placed into small data/bss section. - bool IsGlobalInSmallSection(const GlobalValue *GV, - const TargetMachine &TM, - SectionKind Kind) const; - - bool IsGlobalInSmallSection(const GlobalValue *GV, - const TargetMachine &TM) const; - - const MCSection *SelectSectionForGlobal(const GlobalValue *GV, - SectionKind Kind, - Mangler *Mang, - const TargetMachine &TM) const; - }; -} // end namespace llvm - -#endif diff --git a/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt b/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt deleted file mode 100644 index 36134a6..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -add_llvm_library(LLVMMBlazeDesc - MBlazeAsmBackend.cpp - MBlazeMCAsmInfo.cpp - MBlazeMCCodeEmitter.cpp - MBlazeMCTargetDesc.cpp - MBlazeELFObjectWriter.cpp - ) - -add_dependencies(LLVMMBlazeDesc MBlazeCommonTableGen) diff --git a/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt b/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt deleted file mode 100644 index 4982f0f..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt +++ /dev/null @@ -1,23 +0,0 @@ -;===- ./lib/Target/MBlaze/MCTargetDesc/LLVMBuild.txt -----------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[component_0] -type = Library -name = MBlazeDesc -parent = MBlaze -required_libraries = MBlazeAsmPrinter MBlazeInfo MC Support -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp deleted file mode 100644 index 6f9752c..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp +++ /dev/null @@ -1,171 +0,0 @@ -//===-- MBlazeAsmBackend.cpp - MBlaze Assembler Backend -------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MCTargetDesc/MBlazeMCTargetDesc.h" -#include "llvm/ADT/Twine.h" -#include "llvm/MC/MCAsmBackend.h" -#include "llvm/MC/MCAsmLayout.h" -#include "llvm/MC/MCAssembler.h" -#include "llvm/MC/MCELFObjectWriter.h" -#include "llvm/MC/MCELFSymbolFlags.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCObjectWriter.h" -#include "llvm/MC/MCSectionELF.h" -#include "llvm/MC/MCSectionMachO.h" -#include "llvm/MC/MCValue.h" -#include "llvm/Support/ELF.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/raw_ostream.h" -using namespace llvm; - -static unsigned getFixupKindSize(unsigned Kind) { - switch (Kind) { - default: llvm_unreachable("invalid fixup kind!"); - case FK_Data_1: return 1; - case FK_PCRel_2: - case FK_Data_2: return 2; - case FK_PCRel_4: - case FK_Data_4: return 4; - case FK_Data_8: return 8; - } -} - - -namespace { - -class MBlazeAsmBackend : public MCAsmBackend { -public: - MBlazeAsmBackend(const Target &T) - : MCAsmBackend() { - } - - unsigned getNumFixupKinds() const { - return 2; - } - - bool mayNeedRelaxation(const MCInst &Inst) const; - - bool fixupNeedsRelaxation(const MCFixup &Fixup, - uint64_t Value, - const MCRelaxableFragment *DF, - const MCAsmLayout &Layout) const; - - void relaxInstruction(const MCInst &Inst, MCInst &Res) const; - - bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; - - unsigned getPointerSize() const { - return 4; - } -}; - -static unsigned getRelaxedOpcode(unsigned Op) { - switch (Op) { - default: return Op; - case MBlaze::ADDIK: return MBlaze::ADDIK32; - case MBlaze::ORI: return MBlaze::ORI32; - case MBlaze::BRLID: return MBlaze::BRLID32; - } -} - -bool MBlazeAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { - if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode()) - return false; - - bool hasExprOrImm = false; - for (unsigned i = 0; i < Inst.getNumOperands(); ++i) - hasExprOrImm |= Inst.getOperand(i).isExpr(); - - return hasExprOrImm; -} - -bool MBlazeAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, - uint64_t Value, - const MCRelaxableFragment *DF, - const MCAsmLayout &Layout) const { - // FIXME: Is this right? It's what the "generic" code was doing before, - // but is X86 specific. Is it actually true for MBlaze also, or was it - // just close enough to not be a big deal? - // - // Relax if the value is too big for a (signed) i8. - return int64_t(Value) != int64_t(int8_t(Value)); -} - -void MBlazeAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { - Res = Inst; - Res.setOpcode(getRelaxedOpcode(Inst.getOpcode())); -} - -bool MBlazeAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { - if ((Count % 4) != 0) - return false; - - for (uint64_t i = 0; i < Count; i += 4) - OW->Write32(0x00000000); - - return true; -} -} // end anonymous namespace - -namespace { -class ELFMBlazeAsmBackend : public MBlazeAsmBackend { -public: - uint8_t OSABI; - ELFMBlazeAsmBackend(const Target &T, uint8_t _OSABI) - : MBlazeAsmBackend(T), OSABI(_OSABI) { } - - void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, - uint64_t Value) const; - - MCObjectWriter *createObjectWriter(raw_ostream &OS) const { - return createMBlazeELFObjectWriter(OS, OSABI); - } -}; - -void ELFMBlazeAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, - unsigned DataSize, uint64_t Value) const { - unsigned Size = getFixupKindSize(Fixup.getKind()); - - assert(Fixup.getOffset() + Size <= DataSize && - "Invalid fixup offset!"); - - char *data = Data + Fixup.getOffset(); - switch (Size) { - default: llvm_unreachable("Cannot fixup unknown value."); - case 1: llvm_unreachable("Cannot fixup 1 byte value."); - case 8: llvm_unreachable("Cannot fixup 8 byte value."); - - case 4: - *(data+7) = uint8_t(Value); - *(data+6) = uint8_t(Value >> 8); - *(data+3) = uint8_t(Value >> 16); - *(data+2) = uint8_t(Value >> 24); - break; - - case 2: - *(data+3) = uint8_t(Value >> 0); - *(data+2) = uint8_t(Value >> 8); - } -} -} // end anonymous namespace - -MCAsmBackend *llvm::createMBlazeAsmBackend(const Target &T, StringRef TT, - StringRef CPU) { - Triple TheTriple(TT); - - if (TheTriple.isOSDarwin()) - assert(0 && "Mac not supported on MBlaze"); - - if (TheTriple.isOSWindows()) - assert(0 && "Windows not supported on MBlaze"); - - uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); - return new ELFMBlazeAsmBackend(T, OSABI); -} diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h deleted file mode 100644 index 437026e..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h +++ /dev/null @@ -1,237 +0,0 @@ -//===-- MBlazeBaseInfo.h - Top level definitions for MBlaze -- --*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains small standalone helper functions and enum definitions for -// the MBlaze target useful for the compiler back-end and the MC libraries. -// As such, it deliberately does not include references to LLVM core -// code gen types, passes, etc.. -// -//===----------------------------------------------------------------------===// - -#ifndef MBlazeBASEINFO_H -#define MBlazeBASEINFO_H - -#include "MBlazeMCTargetDesc.h" -#include "llvm/Support/ErrorHandling.h" - -namespace llvm { - -/// MBlazeII - This namespace holds all of the target specific flags that -/// instruction info tracks. -/// -namespace MBlazeII { - enum { - // PseudoFrm - This represents an instruction that is a pseudo instruction - // or one that has not been implemented yet. It is illegal to code generate - // it, but tolerated for intermediate implementation stages. - FPseudo = 0, - FRRR, - FRRI, - FCRR, - FCRI, - FRCR, - FRCI, - FCCR, - FCCI, - FRRCI, - FRRC, - FRCX, - FRCS, - FCRCS, - FCRCX, - FCX, - FCR, - FRIR, - FRRRR, - FRI, - FC, - FRR, - FormMask = 63 - - //===------------------------------------------------------------------===// - // MBlaze Specific MachineOperand flags. - // MO_NO_FLAG, - - /// MO_GOT - Represents the offset into the global offset table at which - /// the address the relocation entry symbol resides during execution. - // MO_GOT, - - /// MO_GOT_CALL - Represents the offset into the global offset table at - /// which the address of a call site relocation entry symbol resides - /// during execution. This is different from the above since this flag - /// can only be present in call instructions. - // MO_GOT_CALL, - - /// MO_GPREL - Represents the offset from the current gp value to be used - /// for the relocatable object file being produced. - // MO_GPREL, - - /// MO_ABS_HILO - Represents the hi or low part of an absolute symbol - /// address. - // MO_ABS_HILO - - }; -} - -static inline bool isMBlazeRegister(unsigned Reg) { - return Reg <= 31; -} - -static inline bool isSpecialMBlazeRegister(unsigned Reg) { - switch (Reg) { - case 0x0000 : case 0x0001 : case 0x0003 : case 0x0005 : - case 0x0007 : case 0x000B : case 0x000D : case 0x1000 : - case 0x1001 : case 0x1002 : case 0x1003 : case 0x1004 : - case 0x2000 : case 0x2001 : case 0x2002 : case 0x2003 : - case 0x2004 : case 0x2005 : case 0x2006 : case 0x2007 : - case 0x2008 : case 0x2009 : case 0x200A : case 0x200B : - return true; - - default: - return false; - } -} - -/// getMBlazeRegisterNumbering - Given the enum value for some register, e.g. -/// MBlaze::R0, return the number that it corresponds to (e.g. 0). -static inline unsigned getMBlazeRegisterNumbering(unsigned RegEnum) { - switch (RegEnum) { - case MBlaze::R0 : return 0; - case MBlaze::R1 : return 1; - case MBlaze::R2 : return 2; - case MBlaze::R3 : return 3; - case MBlaze::R4 : return 4; - case MBlaze::R5 : return 5; - case MBlaze::R6 : return 6; - case MBlaze::R7 : return 7; - case MBlaze::R8 : return 8; - case MBlaze::R9 : return 9; - case MBlaze::R10 : return 10; - case MBlaze::R11 : return 11; - case MBlaze::R12 : return 12; - case MBlaze::R13 : return 13; - case MBlaze::R14 : return 14; - case MBlaze::R15 : return 15; - case MBlaze::R16 : return 16; - case MBlaze::R17 : return 17; - case MBlaze::R18 : return 18; - case MBlaze::R19 : return 19; - case MBlaze::R20 : return 20; - case MBlaze::R21 : return 21; - case MBlaze::R22 : return 22; - case MBlaze::R23 : return 23; - case MBlaze::R24 : return 24; - case MBlaze::R25 : return 25; - case MBlaze::R26 : return 26; - case MBlaze::R27 : return 27; - case MBlaze::R28 : return 28; - case MBlaze::R29 : return 29; - case MBlaze::R30 : return 30; - case MBlaze::R31 : return 31; - case MBlaze::RPC : return 0x0000; - case MBlaze::RMSR : return 0x0001; - case MBlaze::REAR : return 0x0003; - case MBlaze::RESR : return 0x0005; - case MBlaze::RFSR : return 0x0007; - case MBlaze::RBTR : return 0x000B; - case MBlaze::REDR : return 0x000D; - case MBlaze::RPID : return 0x1000; - case MBlaze::RZPR : return 0x1001; - case MBlaze::RTLBX : return 0x1002; - case MBlaze::RTLBLO : return 0x1003; - case MBlaze::RTLBHI : return 0x1004; - case MBlaze::RPVR0 : return 0x2000; - case MBlaze::RPVR1 : return 0x2001; - case MBlaze::RPVR2 : return 0x2002; - case MBlaze::RPVR3 : return 0x2003; - case MBlaze::RPVR4 : return 0x2004; - case MBlaze::RPVR5 : return 0x2005; - case MBlaze::RPVR6 : return 0x2006; - case MBlaze::RPVR7 : return 0x2007; - case MBlaze::RPVR8 : return 0x2008; - case MBlaze::RPVR9 : return 0x2009; - case MBlaze::RPVR10 : return 0x200A; - case MBlaze::RPVR11 : return 0x200B; - default: llvm_unreachable("Unknown register number!"); - } -} - -/// getRegisterFromNumbering - Given the enum value for some register, e.g. -/// MBlaze::R0, return the number that it corresponds to (e.g. 0). -static inline unsigned getMBlazeRegisterFromNumbering(unsigned Reg) { - switch (Reg) { - case 0 : return MBlaze::R0; - case 1 : return MBlaze::R1; - case 2 : return MBlaze::R2; - case 3 : return MBlaze::R3; - case 4 : return MBlaze::R4; - case 5 : return MBlaze::R5; - case 6 : return MBlaze::R6; - case 7 : return MBlaze::R7; - case 8 : return MBlaze::R8; - case 9 : return MBlaze::R9; - case 10 : return MBlaze::R10; - case 11 : return MBlaze::R11; - case 12 : return MBlaze::R12; - case 13 : return MBlaze::R13; - case 14 : return MBlaze::R14; - case 15 : return MBlaze::R15; - case 16 : return MBlaze::R16; - case 17 : return MBlaze::R17; - case 18 : return MBlaze::R18; - case 19 : return MBlaze::R19; - case 20 : return MBlaze::R20; - case 21 : return MBlaze::R21; - case 22 : return MBlaze::R22; - case 23 : return MBlaze::R23; - case 24 : return MBlaze::R24; - case 25 : return MBlaze::R25; - case 26 : return MBlaze::R26; - case 27 : return MBlaze::R27; - case 28 : return MBlaze::R28; - case 29 : return MBlaze::R29; - case 30 : return MBlaze::R30; - case 31 : return MBlaze::R31; - default: llvm_unreachable("Unknown register number!"); - } -} - -static inline unsigned getSpecialMBlazeRegisterFromNumbering(unsigned Reg) { - switch (Reg) { - case 0x0000 : return MBlaze::RPC; - case 0x0001 : return MBlaze::RMSR; - case 0x0003 : return MBlaze::REAR; - case 0x0005 : return MBlaze::RESR; - case 0x0007 : return MBlaze::RFSR; - case 0x000B : return MBlaze::RBTR; - case 0x000D : return MBlaze::REDR; - case 0x1000 : return MBlaze::RPID; - case 0x1001 : return MBlaze::RZPR; - case 0x1002 : return MBlaze::RTLBX; - case 0x1003 : return MBlaze::RTLBLO; - case 0x1004 : return MBlaze::RTLBHI; - case 0x2000 : return MBlaze::RPVR0; - case 0x2001 : return MBlaze::RPVR1; - case 0x2002 : return MBlaze::RPVR2; - case 0x2003 : return MBlaze::RPVR3; - case 0x2004 : return MBlaze::RPVR4; - case 0x2005 : return MBlaze::RPVR5; - case 0x2006 : return MBlaze::RPVR6; - case 0x2007 : return MBlaze::RPVR7; - case 0x2008 : return MBlaze::RPVR8; - case 0x2009 : return MBlaze::RPVR9; - case 0x200A : return MBlaze::RPVR10; - case 0x200B : return MBlaze::RPVR11; - default: llvm_unreachable("Unknown register number!"); - } -} - -} // end namespace llvm; - -#endif diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp deleted file mode 100644 index 2824b3c..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp +++ /dev/null @@ -1,77 +0,0 @@ -//===-- MBlazeELFObjectWriter.cpp - MBlaze ELF Writer ---------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MCTargetDesc/MBlazeMCTargetDesc.h" -#include "llvm/MC/MCELFObjectWriter.h" -#include "llvm/MC/MCFixup.h" -#include "llvm/Support/ErrorHandling.h" - -using namespace llvm; - -namespace { - class MBlazeELFObjectWriter : public MCELFObjectTargetWriter { - public: - MBlazeELFObjectWriter(uint8_t OSABI); - - virtual ~MBlazeELFObjectWriter(); - protected: - virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, - bool IsPCRel, bool IsRelocWithSymbol, - int64_t Addend) const; - }; -} - -MBlazeELFObjectWriter::MBlazeELFObjectWriter(uint8_t OSABI) - : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_MBLAZE, - /*HasRelocationAddend*/ false) {} - -MBlazeELFObjectWriter::~MBlazeELFObjectWriter() { -} - -unsigned MBlazeELFObjectWriter::GetRelocType(const MCValue &Target, - const MCFixup &Fixup, - bool IsPCRel, - bool IsRelocWithSymbol, - int64_t Addend) const { - // determine the type of the relocation - unsigned Type; - if (IsPCRel) { - switch ((unsigned)Fixup.getKind()) { - default: - llvm_unreachable("Unimplemented"); - case FK_PCRel_4: - Type = ELF::R_MICROBLAZE_64_PCREL; - break; - case FK_PCRel_2: - Type = ELF::R_MICROBLAZE_32_PCREL; - break; - } - } else { - switch ((unsigned)Fixup.getKind()) { - default: llvm_unreachable("invalid fixup kind!"); - case FK_Data_4: - Type = ((IsRelocWithSymbol || Addend !=0) - ? ELF::R_MICROBLAZE_32 - : ELF::R_MICROBLAZE_64); - break; - case FK_Data_2: - Type = ELF::R_MICROBLAZE_32; - break; - } - } - return Type; -} - - - -MCObjectWriter *llvm::createMBlazeELFObjectWriter(raw_ostream &OS, - uint8_t OSABI) { - MCELFObjectTargetWriter *MOTW = new MBlazeELFObjectWriter(OSABI); - return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/ false); -} diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp deleted file mode 100644 index 8231f07..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp +++ /dev/null @@ -1,26 +0,0 @@ -//===-- MBlazeMCAsmInfo.cpp - MBlaze asm properties -----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the declarations of the MBlazeMCAsmInfo properties. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeMCAsmInfo.h" -using namespace llvm; - -void MBlazeMCAsmInfo::anchor() { } - -MBlazeMCAsmInfo::MBlazeMCAsmInfo() { - IsLittleEndian = false; - StackGrowsUp = false; - SupportsDebugInformation = true; - AlignmentIsInBytes = false; - PrivateGlobalPrefix = "$"; - GPRel32Directive = "\t.gpword\t"; -} diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h deleted file mode 100644 index 977f9a6..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h +++ /dev/null @@ -1,30 +0,0 @@ -//===-- MBlazeMCAsmInfo.h - MBlaze asm properties --------------*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains the declaration of the MBlazeMCAsmInfo class. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZETARGETASMINFO_H -#define MBLAZETARGETASMINFO_H - -#include "llvm/MC/MCAsmInfo.h" - -namespace llvm { - class Target; - - class MBlazeMCAsmInfo : public MCAsmInfo { - virtual void anchor(); - public: - explicit MBlazeMCAsmInfo(); - }; - -} // namespace llvm - -#endif diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp deleted file mode 100644 index 8faff6a..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp +++ /dev/null @@ -1,222 +0,0 @@ -//===-- MBlazeMCCodeEmitter.cpp - Convert MBlaze code to machine code -----===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the MBlazeMCCodeEmitter class. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "mccodeemitter" -#include "MCTargetDesc/MBlazeMCTargetDesc.h" -#include "MCTargetDesc/MBlazeBaseInfo.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/MC/MCCodeEmitter.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCFixup.h" -#include "llvm/MC/MCInst.h" -#include "llvm/MC/MCInstrInfo.h" -#include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/MC/MCSymbol.h" -#include "llvm/Support/raw_ostream.h" -using namespace llvm; - -STATISTIC(MCNumEmitted, "Number of MC instructions emitted"); - -namespace { -class MBlazeMCCodeEmitter : public MCCodeEmitter { - MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION; - void operator=(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION; - const MCInstrInfo &MCII; - -public: - MBlazeMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, - MCContext &ctx) - : MCII(mcii) { - } - - ~MBlazeMCCodeEmitter() {} - - // getBinaryCodeForInstr - TableGen'erated function for getting the - // binary encoding for an instruction. - uint64_t getBinaryCodeForInstr(const MCInst &MI) const; - - /// getMachineOpValue - Return binary encoding of operand. If the machine - /// operand requires relocation, record the relocation and return zero. - unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO) const; - unsigned getMachineOpValue(const MCInst &MI, unsigned OpIdx) const { - return getMachineOpValue(MI, MI.getOperand(OpIdx)); - } - - static unsigned GetMBlazeRegNum(const MCOperand &MO) { - // FIXME: getMBlazeRegisterNumbering() is sufficient? - llvm_unreachable("MBlazeMCCodeEmitter::GetMBlazeRegNum() not yet " - "implemented."); - } - - void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const { - // The MicroBlaze uses a bit reversed format so we need to reverse the - // order of the bits. Taken from: - // http://graphics.stanford.edu/~seander/bithacks.html - C = ((C * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32; - - OS << (char)C; - ++CurByte; - } - - void EmitRawByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const { - OS << (char)C; - ++CurByte; - } - - void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, - raw_ostream &OS) const { - assert(Size <= 8 && "size too big in emit constant"); - - for (unsigned i = 0; i != Size; ++i) { - EmitByte(Val & 255, CurByte, OS); - Val >>= 8; - } - } - - void EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const; - void EmitIMM(const MCInst &MI, unsigned &CurByte, raw_ostream &OS) const; - - void EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel, - unsigned &CurByte, raw_ostream &OS, - SmallVectorImpl<MCFixup> &Fixups) const; - - void EncodeInstruction(const MCInst &MI, raw_ostream &OS, - SmallVectorImpl<MCFixup> &Fixups) const; -}; - -} // end anonymous namespace - - -MCCodeEmitter *llvm::createMBlazeMCCodeEmitter(const MCInstrInfo &MCII, - const MCRegisterInfo &MRI, - const MCSubtargetInfo &STI, - MCContext &Ctx) { - return new MBlazeMCCodeEmitter(MCII, STI, Ctx); -} - -/// getMachineOpValue - Return binary encoding of operand. If the machine -/// operand requires relocation, record the relocation and return zero. -unsigned MBlazeMCCodeEmitter::getMachineOpValue(const MCInst &MI, - const MCOperand &MO) const { - if (MO.isReg()) - return getMBlazeRegisterNumbering(MO.getReg()); - if (MO.isImm()) - return static_cast<unsigned>(MO.getImm()); - if (MO.isExpr()) - return 0; // The relocation has already been recorded at this point. -#ifndef NDEBUG - errs() << MO; -#endif - llvm_unreachable(0); -} - -void MBlazeMCCodeEmitter:: -EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const { - int32_t val = (int32_t)imm.getImm(); - if (val > 32767 || val < -32768) { - EmitByte(0x0D, CurByte, OS); - EmitByte(0x00, CurByte, OS); - EmitRawByte((val >> 24) & 0xFF, CurByte, OS); - EmitRawByte((val >> 16) & 0xFF, CurByte, OS); - } -} - -void MBlazeMCCodeEmitter:: -EmitIMM(const MCInst &MI, unsigned &CurByte,raw_ostream &OS) const { - switch (MI.getOpcode()) { - default: break; - - case MBlaze::ADDIK32: - case MBlaze::ORI32: - case MBlaze::BRLID32: - EmitByte(0x0D, CurByte, OS); - EmitByte(0x00, CurByte, OS); - EmitRawByte(0, CurByte, OS); - EmitRawByte(0, CurByte, OS); - } -} - -void MBlazeMCCodeEmitter:: -EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel, unsigned &CurByte, - raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const { - assert(MI.getNumOperands()>opNo && "Not enought operands for instruction"); - - MCOperand oper = MI.getOperand(opNo); - - if (oper.isImm()) { - EmitIMM(oper, CurByte, OS); - } else if (oper.isExpr()) { - MCFixupKind FixupKind; - switch (MI.getOpcode()) { - default: - FixupKind = pcrel ? FK_PCRel_2 : FK_Data_2; - Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind)); - break; - case MBlaze::ORI32: - case MBlaze::ADDIK32: - case MBlaze::BRLID32: - FixupKind = pcrel ? FK_PCRel_4 : FK_Data_4; - Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind)); - break; - } - } -} - - - -void MBlazeMCCodeEmitter:: -EncodeInstruction(const MCInst &MI, raw_ostream &OS, - SmallVectorImpl<MCFixup> &Fixups) const { - unsigned Opcode = MI.getOpcode(); - const MCInstrDesc &Desc = MCII.get(Opcode); - uint64_t TSFlags = Desc.TSFlags; - // Keep track of the current byte being emitted. - unsigned CurByte = 0; - - // Emit an IMM instruction if the instruction we are encoding requires it - EmitIMM(MI,CurByte,OS); - - switch ((TSFlags & MBlazeII::FormMask)) { - default: break; - case MBlazeII::FPseudo: - // Pseudo instructions don't get encoded. - return; - case MBlazeII::FRRI: - EmitImmediate(MI, 2, false, CurByte, OS, Fixups); - break; - case MBlazeII::FRIR: - EmitImmediate(MI, 1, false, CurByte, OS, Fixups); - break; - case MBlazeII::FCRI: - EmitImmediate(MI, 1, true, CurByte, OS, Fixups); - break; - case MBlazeII::FRCI: - EmitImmediate(MI, 1, true, CurByte, OS, Fixups); - case MBlazeII::FCCI: - EmitImmediate(MI, 0, true, CurByte, OS, Fixups); - break; - } - - ++MCNumEmitted; // Keep track of the # of mi's emitted - unsigned Value = getBinaryCodeForInstr(MI); - EmitConstant(Value, 4, CurByte, OS); -} - -// FIXME: These #defines shouldn't be necessary. Instead, tblgen should -// be able to generate code emitter helpers for either variant, like it -// does for the AsmWriter. -#define MBlazeCodeEmitter MBlazeMCCodeEmitter -#define MachineInstr MCInst -#include "MBlazeGenCodeEmitter.inc" -#undef MBlazeCodeEmitter -#undef MachineInstr diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp deleted file mode 100644 index 5bc0668..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp +++ /dev/null @@ -1,141 +0,0 @@ -//===-- MBlazeMCTargetDesc.cpp - MBlaze Target Descriptions ---------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file provides MBlaze specific target descriptions. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeMCTargetDesc.h" -#include "InstPrinter/MBlazeInstPrinter.h" -#include "MBlazeMCAsmInfo.h" -#include "llvm/MC/MCCodeGenInfo.h" -#include "llvm/MC/MCInstrInfo.h" -#include "llvm/MC/MCRegisterInfo.h" -#include "llvm/MC/MCStreamer.h" -#include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/TargetRegistry.h" - -#define GET_INSTRINFO_MC_DESC -#include "MBlazeGenInstrInfo.inc" - -#define GET_SUBTARGETINFO_MC_DESC -#include "MBlazeGenSubtargetInfo.inc" - -#define GET_REGINFO_MC_DESC -#include "MBlazeGenRegisterInfo.inc" - -using namespace llvm; - - -static MCInstrInfo *createMBlazeMCInstrInfo() { - MCInstrInfo *X = new MCInstrInfo(); - InitMBlazeMCInstrInfo(X); - return X; -} - -static MCRegisterInfo *createMBlazeMCRegisterInfo(StringRef TT) { - MCRegisterInfo *X = new MCRegisterInfo(); - InitMBlazeMCRegisterInfo(X, MBlaze::R15); - return X; -} - -static MCSubtargetInfo *createMBlazeMCSubtargetInfo(StringRef TT, StringRef CPU, - StringRef FS) { - MCSubtargetInfo *X = new MCSubtargetInfo(); - InitMBlazeMCSubtargetInfo(X, TT, CPU, FS); - return X; -} - -static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { - Triple TheTriple(TT); - switch (TheTriple.getOS()) { - default: - return new MBlazeMCAsmInfo(); - } -} - -static MCCodeGenInfo *createMBlazeMCCodeGenInfo(StringRef TT, Reloc::Model RM, - CodeModel::Model CM, - CodeGenOpt::Level OL) { - MCCodeGenInfo *X = new MCCodeGenInfo(); - if (RM == Reloc::Default) - RM = Reloc::Static; - if (CM == CodeModel::Default) - CM = CodeModel::Small; - X->InitMCCodeGenInfo(RM, CM, OL); - return X; -} - -static MCStreamer *createMCStreamer(const Target &T, StringRef TT, - MCContext &Ctx, MCAsmBackend &MAB, - raw_ostream &_OS, - MCCodeEmitter *_Emitter, - bool RelaxAll, - bool NoExecStack) { - Triple TheTriple(TT); - - if (TheTriple.isOSDarwin()) { - llvm_unreachable("MBlaze does not support Darwin MACH-O format"); - } - - if (TheTriple.isOSWindows()) { - llvm_unreachable("MBlaze does not support Windows COFF format"); - } - - return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack); -} - -static MCInstPrinter *createMBlazeMCInstPrinter(const Target &T, - unsigned SyntaxVariant, - const MCAsmInfo &MAI, - const MCInstrInfo &MII, - const MCRegisterInfo &MRI, - const MCSubtargetInfo &STI) { - if (SyntaxVariant == 0) - return new MBlazeInstPrinter(MAI, MII, MRI); - return 0; -} - -// Force static initialization. -extern "C" void LLVMInitializeMBlazeTargetMC() { - // Register the MC asm info. - RegisterMCAsmInfoFn X(TheMBlazeTarget, createMCAsmInfo); - - // Register the MC codegen info. - TargetRegistry::RegisterMCCodeGenInfo(TheMBlazeTarget, - createMBlazeMCCodeGenInfo); - - // Register the MC instruction info. - TargetRegistry::RegisterMCInstrInfo(TheMBlazeTarget, createMBlazeMCInstrInfo); - - // Register the MC register info. - TargetRegistry::RegisterMCRegInfo(TheMBlazeTarget, - createMBlazeMCRegisterInfo); - - // Register the MC subtarget info. - TargetRegistry::RegisterMCSubtargetInfo(TheMBlazeTarget, - createMBlazeMCSubtargetInfo); - - // Register the MC code emitter - TargetRegistry::RegisterMCCodeEmitter(TheMBlazeTarget, - llvm::createMBlazeMCCodeEmitter); - - // Register the asm backend - TargetRegistry::RegisterMCAsmBackend(TheMBlazeTarget, - createMBlazeAsmBackend); - - // Register the object streamer - TargetRegistry::RegisterMCObjectStreamer(TheMBlazeTarget, - createMCStreamer); - - // Register the MCInstPrinter. - TargetRegistry::RegisterMCInstPrinter(TheMBlazeTarget, - createMBlazeMCInstPrinter); -} diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h deleted file mode 100644 index 7bc7d8f..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h +++ /dev/null @@ -1,56 +0,0 @@ -//===-- MBlazeMCTargetDesc.h - MBlaze Target Descriptions -------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file provides MBlaze specific target descriptions. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZEMCTARGETDESC_H -#define MBLAZEMCTARGETDESC_H - -#include "llvm/Support/DataTypes.h" - -namespace llvm { -class MCAsmBackend; -class MCContext; -class MCCodeEmitter; -class MCInstrInfo; -class MCObjectWriter; -class MCRegisterInfo; -class MCSubtargetInfo; -class Target; -class StringRef; -class raw_ostream; - -extern Target TheMBlazeTarget; - -MCCodeEmitter *createMBlazeMCCodeEmitter(const MCInstrInfo &MCII, - const MCRegisterInfo &MRI, - const MCSubtargetInfo &STI, - MCContext &Ctx); - -MCAsmBackend *createMBlazeAsmBackend(const Target &T, StringRef TT, - StringRef CPU); - -MCObjectWriter *createMBlazeELFObjectWriter(raw_ostream &OS, uint8_t OSABI); -} // End llvm namespace - -// Defines symbolic names for MBlaze registers. This defines a mapping from -// register name to register number. -#define GET_REGINFO_ENUM -#include "MBlazeGenRegisterInfo.inc" - -// Defines symbolic names for the MBlaze instructions. -#define GET_INSTRINFO_ENUM -#include "MBlazeGenInstrInfo.inc" - -#define GET_SUBTARGETINFO_ENUM -#include "MBlazeGenSubtargetInfo.inc" - -#endif diff --git a/lib/Target/MBlaze/MCTargetDesc/Makefile b/lib/Target/MBlaze/MCTargetDesc/Makefile deleted file mode 100644 index 71075ff..0000000 --- a/lib/Target/MBlaze/MCTargetDesc/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -##===- lib/Target/MBlaze/TargetDesc/Makefile ---------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## - -LEVEL = ../../../.. -LIBRARYNAME = LLVMMBlazeDesc - -# Hack: we need to include 'main' target directory to grab private headers -CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common diff --git a/lib/Target/MBlaze/Makefile b/lib/Target/MBlaze/Makefile deleted file mode 100644 index 512ce9a..0000000 --- a/lib/Target/MBlaze/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -##===- lib/Target/MBlaze/Makefile --------------------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## -LEVEL = ../../.. -LIBRARYNAME = LLVMMBlazeCodeGen -TARGET = MBlaze - -# Make sure that tblgen is run, first thing. -BUILT_SOURCES = MBlazeGenRegisterInfo.inc MBlazeGenInstrInfo.inc \ - MBlazeGenAsmWriter.inc \ - MBlazeGenDAGISel.inc MBlazeGenAsmMatcher.inc \ - MBlazeGenCodeEmitter.inc MBlazeGenCallingConv.inc \ - MBlazeGenSubtargetInfo.inc MBlazeGenIntrinsics.inc - -DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc - -include $(LEVEL)/Makefile.common - diff --git a/lib/Target/MBlaze/TODO b/lib/Target/MBlaze/TODO deleted file mode 100644 index 317d7c0..0000000 --- a/lib/Target/MBlaze/TODO +++ /dev/null @@ -1,21 +0,0 @@ -* Writing out ELF files is close to working but the following needs to - be examined more closely: - - Relocations use 2-byte / 4-byte to terminology in reference to - the size of the immediate value being changed. The Xilinx - terminology seems to be (???) 4-byte / 8-byte in reference - to the number of bytes of instructions that are being changed. - -* Code generation seems to work relatively well now but the following - needs to be examined more closely: - - The stack layout needs to be examined to make sure it meets - the standard, especially in regards to var arg functions. - - Look at the MBlazeGenFastISel.inc stuff and make use of it - if appropriate. - -* A basic assembly parser is present now and seems to parse most things. - There are a few things that need to be looked at: - - There are some instructions that are not generated by the backend - and have not been tested as far as the parser is concerned. - - The assembly parser does not use many MicroBlaze specific directives. - I should investigate if there are MicroBlaze specific directive and, - if there are, add them. diff --git a/lib/Target/MBlaze/TargetInfo/CMakeLists.txt b/lib/Target/MBlaze/TargetInfo/CMakeLists.txt deleted file mode 100644 index b554d9b..0000000 --- a/lib/Target/MBlaze/TargetInfo/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. - ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - -add_llvm_library(LLVMMBlazeInfo - MBlazeTargetInfo.cpp - ) - -add_dependencies(LLVMMBlazeInfo MBlazeCommonTableGen) diff --git a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt b/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt deleted file mode 100644 index ba7ee5d..0000000 --- a/lib/Target/MBlaze/TargetInfo/LLVMBuild.txt +++ /dev/null @@ -1,23 +0,0 @@ -;===- ./lib/Target/MBlaze/TargetInfo/LLVMBuild.txt -------------*- Conf -*--===; -; -; The LLVM Compiler Infrastructure -; -; This file is distributed under the University of Illinois Open Source -; License. See LICENSE.TXT for details. -; -;===------------------------------------------------------------------------===; -; -; This is an LLVMBuild description file for the components in this subdirectory. -; -; For more information on the LLVMBuild system, please see: -; -; http://llvm.org/docs/LLVMBuild.html -; -;===------------------------------------------------------------------------===; - -[component_0] -type = Library -name = MBlazeInfo -parent = MBlaze -required_libraries = MC Support Target -add_to_library_groups = MBlaze diff --git a/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp b/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp deleted file mode 100644 index 323a7f6..0000000 --- a/lib/Target/MBlaze/TargetInfo/MBlazeTargetInfo.cpp +++ /dev/null @@ -1,19 +0,0 @@ -//===-- MBlazeTargetInfo.cpp - MBlaze Target Implementation ---------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MBlaze.h" -#include "llvm/IR/Module.h" -#include "llvm/Support/TargetRegistry.h" -using namespace llvm; - -Target llvm::TheMBlazeTarget; - -extern "C" void LLVMInitializeMBlazeTargetInfo() { - RegisterTarget<Triple::mblaze> X(TheMBlazeTarget, "mblaze", "MBlaze"); -} diff --git a/lib/Target/MBlaze/TargetInfo/Makefile b/lib/Target/MBlaze/TargetInfo/Makefile deleted file mode 100644 index fb7ea11..0000000 --- a/lib/Target/MBlaze/TargetInfo/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -##===- lib/Target/MBlaze/TargetInfo/Makefile ---------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -LIBRARYNAME = LLVMMBlazeInfo - -# Hack: we need to include 'main' target directory to grab private headers -CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common diff --git a/lib/Target/MSP430/CMakeLists.txt b/lib/Target/MSP430/CMakeLists.txt index f9ecaed..c9b3c3d 100644 --- a/lib/Target/MSP430/CMakeLists.txt +++ b/lib/Target/MSP430/CMakeLists.txt @@ -23,7 +23,7 @@ add_llvm_target(MSP430CodeGen MSP430MCInstLower.cpp ) -add_dependencies(LLVMMSP430CodeGen intrinsics_gen) +add_dependencies(LLVMMSP430CodeGen MSP430CommonTableGen intrinsics_gen) add_subdirectory(InstPrinter) add_subdirectory(TargetInfo) diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index 76bc1e7..543f54c 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -259,7 +259,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N, } Base = (AM.BaseType == MSP430ISelAddressMode::FrameIndexBase) ? - CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI->getPointerTy()) : + CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, + getTargetLowering()->getPointerTy()) : AM.Base.Reg; if (AM.GV) diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index a4818b2..803e899 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -169,6 +169,7 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) : setOperationAction(ISD::VAARG, MVT::Other, Expand); setOperationAction(ISD::VAEND, MVT::Other, Expand); setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::JumpTable, MVT::i16, Custom); // Libcalls names. if (HWMultMode == HWMultIntr) { @@ -199,6 +200,7 @@ SDValue MSP430TargetLowering::LowerOperation(SDValue Op, case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); + case ISD::JumpTable: return LowerJumpTable(Op, DAG); default: llvm_unreachable("unimplemented operand"); } @@ -226,7 +228,7 @@ MSP430TargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> MSP430TargetLowering:: getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC Constraint Letters switch (Constraint[0]) { @@ -277,9 +279,9 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -454,7 +456,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, /// LowerCCCCallTo - functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. -/// TODO: sret. +// TODO: sret. SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, @@ -981,6 +983,14 @@ SDValue MSP430TargetLowering::LowerVASTART(SDValue Op, false, false, 0); } +SDValue MSP430TargetLowering::LowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); + SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); + return DAG.getNode(MSP430ISD::Wrapper, SDLoc(JT), + getPointerTy(), Result); +} + /// getPostIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if this node can be /// combined with a load / store to form a post-indexed load / store. diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h index 9570ef2..85a861e 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.h +++ b/lib/Target/MSP430/MSP430ISelLowering.h @@ -93,12 +93,13 @@ namespace llvm { SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; TargetLowering::ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> - getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; + getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; /// isTruncateFree - Return true if it's free to truncate a value of type /// Ty1 to type Ty2. e.g. On msp430 it's free to truncate a i16 value in diff --git a/lib/Target/MSP430/MSP430InstrInfo.td b/lib/Target/MSP430/MSP430InstrInfo.td index e45780d..50e3fda 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.td +++ b/lib/Target/MSP430/MSP430InstrInfo.td @@ -183,10 +183,10 @@ let isBarrier = 1 in { "br\t$brdst", [(brind tblockaddress:$brdst)]>; def Br : I16rr<0, (outs), (ins GR16:$brdst), - "mov.w\t{$brdst, pc}", + "br\t$brdst", [(brind GR16:$brdst)]>; def Bm : I16rm<0, (outs), (ins memsrc:$brdst), - "mov.w\t{$brdst, pc}", + "br\t$brdst", [(brind (load addr:$brdst))]>; } } diff --git a/lib/Target/Mangler.cpp b/lib/Target/Mangler.cpp index 2269b73..f86428c 100644 --- a/lib/Target/Mangler.cpp +++ b/lib/Target/Mangler.cpp @@ -47,18 +47,18 @@ static void MangleLetter(SmallVectorImpl<char> &OutName, unsigned char C) { /// NameNeedsEscaping - Return true if the identifier \p Str needs quotes /// for this assembler. -static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) { +static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo *MAI) { assert(!Str.empty() && "Cannot create an empty MCSymbol"); // If the first character is a number and the target does not allow this, we // need quotes. - if (!MAI.doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') + if (!MAI->doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') return true; // If any of the characters in the string is an unacceptable character, force // quotes. - bool AllowPeriod = MAI.doesAllowPeriodsInName(); - bool AllowUTF8 = MAI.doesAllowUTF8(); + bool AllowPeriod = MAI->doesAllowPeriodsInName(); + bool AllowUTF8 = MAI->doesAllowUTF8(); for (unsigned i = 0, e = Str.size(); i != e; ++i) if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8)) return true; @@ -68,16 +68,16 @@ static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) { /// appendMangledName - Add the specified string in mangled form if it uses /// any unusual characters. static void appendMangledName(SmallVectorImpl<char> &OutName, StringRef Str, - const MCAsmInfo &MAI) { + const MCAsmInfo *MAI) { // The first character is not allowed to be a number unless the target // explicitly allows it. - if (!MAI.doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') { + if (!MAI->doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') { MangleLetter(OutName, Str[0]); Str = Str.substr(1); } - bool AllowPeriod = MAI.doesAllowPeriodsInName(); - bool AllowUTF8 = MAI.doesAllowUTF8(); + bool AllowPeriod = MAI->doesAllowPeriodsInName(); + bool AllowUTF8 = MAI->doesAllowUTF8(); for (unsigned i = 0, e = Str.size(); i != e; ++i) { if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8)) MangleLetter(OutName, Str[i]); @@ -105,39 +105,43 @@ static void appendMangledQuotedName(SmallVectorImpl<char> &OutName, /// and the specified name as the global variable name. GVName must not be /// empty. void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, - const Twine &GVName, ManglerPrefixTy PrefixTy) { + const Twine &GVName, ManglerPrefixTy PrefixTy, + bool UseGlobalPrefix) { SmallString<256> TmpData; StringRef Name = GVName.toStringRef(TmpData); assert(!Name.empty() && "getNameWithPrefix requires non-empty name"); - const MCAsmInfo &MAI = Context.getAsmInfo(); + const MCAsmInfo *MAI = Context.getAsmInfo(); // If the global name is not led with \1, add the appropriate prefixes. if (Name[0] == '\1') { Name = Name.substr(1); } else { if (PrefixTy == Mangler::Private) { - const char *Prefix = MAI.getPrivateGlobalPrefix(); + const char *Prefix = MAI->getPrivateGlobalPrefix(); OutName.append(Prefix, Prefix+strlen(Prefix)); } else if (PrefixTy == Mangler::LinkerPrivate) { - const char *Prefix = MAI.getLinkerPrivateGlobalPrefix(); + const char *Prefix = MAI->getLinkerPrivateGlobalPrefix(); OutName.append(Prefix, Prefix+strlen(Prefix)); } - const char *Prefix = MAI.getGlobalPrefix(); - if (Prefix[0] == 0) - ; // Common noop, no prefix. - else if (Prefix[1] == 0) - OutName.push_back(Prefix[0]); // Common, one character prefix. - else - OutName.append(Prefix, Prefix+strlen(Prefix)); // Arbitrary length prefix. + if (UseGlobalPrefix) { + const char *Prefix = MAI->getGlobalPrefix(); + if (Prefix[0] == 0) + ; // Common noop, no prefix. + else if (Prefix[1] == 0) + OutName.push_back(Prefix[0]); // Common, one character prefix. + else + // Arbitrary length prefix. + OutName.append(Prefix, Prefix+strlen(Prefix)); + } } // If this is a simple string that doesn't need escaping, just append it. if (!NameNeedsEscaping(Name, MAI) || // If quotes are supported, they can be used unless the string contains // a quote or newline. - (MAI.doesAllowQuotesInName() && + (MAI->doesAllowQuotesInName() && Name.find_first_of("\n\"") == StringRef::npos)) { OutName.append(Name.begin(), Name.end()); return; @@ -145,7 +149,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, // On systems that do not allow quoted names, we need to mangle most // strange characters. - if (!MAI.doesAllowQuotesInName()) + if (!MAI->doesAllowQuotesInName()) return appendMangledName(OutName, Name, MAI); // Okay, the system allows quoted strings. We can quote most anything, the @@ -179,8 +183,8 @@ static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName, /// and the specified global variable's name. If the global variable doesn't /// have a name, this fills in a unique name for the global. void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, - const GlobalValue *GV, - bool isImplicitlyPrivate) { + const GlobalValue *GV, bool isImplicitlyPrivate, + bool UseGlobalPrefix) { ManglerPrefixTy PrefixTy = Mangler::Default; if (GV->hasPrivateLinkage() || isImplicitlyPrivate) PrefixTy = Mangler::Private; @@ -190,7 +194,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, // If this global has a name, handle it simply. if (GV->hasName()) { StringRef Name = GV->getName(); - getNameWithPrefix(OutName, Name, PrefixTy); + getNameWithPrefix(OutName, Name, PrefixTy, UseGlobalPrefix); // No need to do anything else if the global has the special "do not mangle" // flag in the name. if (Name[0] == 1) @@ -202,12 +206,13 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, if (ID == 0) ID = NextAnonGlobalID++; // Must mangle the global into a unique ID. - getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy); + getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy, + UseGlobalPrefix); } // If we are supposed to add a microsoft-style suffix for stdcall/fastcall, // add it. - if (Context.getAsmInfo().hasMicrosoftFastStdCallMangling()) { + if (Context.getAsmInfo()->hasMicrosoftFastStdCallMangling()) { if (const Function *F = dyn_cast<Function>(GV)) { CallingConv::ID CC = F->getCallingConv(); diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index d1d69d8..3dd6562 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -78,19 +78,20 @@ class MipsAsmParser : public MCTargetAsmParser { SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands); - bool parseMathOperation(StringRef Name, SMLoc NameLoc, - SmallVectorImpl<MCParsedAsmOperand*> &Operands); - bool ParseDirective(AsmToken DirectiveID); MipsAsmParser::OperandMatchResultTy + parseRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands, + int RegKind); + + MipsAsmParser::OperandMatchResultTy parseMemOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands); MipsAsmParser::OperandMatchResultTy - parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + parseGPR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands); MipsAsmParser::OperandMatchResultTy - parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + parseGPR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands); MipsAsmParser::OperandMatchResultTy parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); @@ -101,8 +102,23 @@ class MipsAsmParser : public MCTargetAsmParser { MipsAsmParser::OperandMatchResultTy parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + MipsAsmParser::OperandMatchResultTy + parseAFGR64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + + MipsAsmParser::OperandMatchResultTy + parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + + MipsAsmParser::OperandMatchResultTy + parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + + MipsAsmParser::OperandMatchResultTy + parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + + MipsAsmParser::OperandMatchResultTy + parseACRegsDSP(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + bool searchSymbolAlias(SmallVectorImpl<MCParsedAsmOperand*> &Operands, - unsigned RegisterClass); + unsigned RegKind); bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); @@ -162,6 +178,8 @@ class MipsAsmParser : public MCTargetAsmParser { int matchRegisterByNumber(unsigned RegNum, unsigned RegClass); + int matchFPURegisterName(StringRef Name, FpFormatTy Format); + void setFpFormat(FpFormatTy Format) { FpFormat = Format; } @@ -172,8 +190,6 @@ class MipsAsmParser : public MCTargetAsmParser { FpFormatTy getFpFormat() {return FpFormat;} - bool requestsDoubleOperand(StringRef Mnemonic); - unsigned getReg(int RC, int RegNo); int getATReg(); @@ -202,14 +218,16 @@ class MipsOperand : public MCParsedAsmOperand { public: enum RegisterKind { Kind_None, - Kind_CPURegs, - Kind_CPU64Regs, + Kind_GPR32, + Kind_GPR64, Kind_HWRegs, Kind_HW64Regs, Kind_FGR32Regs, Kind_FGR64Regs, Kind_AFGR64Regs, - Kind_CCRRegs + Kind_CCRRegs, + Kind_FCCRegs, + Kind_ACRegsDSP }; private: @@ -354,45 +372,52 @@ public: return Op; } - bool isCPURegsAsm() const { - return Kind == k_Register && Reg.Kind == Kind_CPURegs; + bool isGPR32Asm() const { + return Kind == k_Register && Reg.Kind == Kind_GPR32; } - void addCPURegsAsmOperands(MCInst &Inst, unsigned N) const { + void addRegAsmOperands(MCInst &Inst, unsigned N) const { Inst.addOperand(MCOperand::CreateReg(Reg.RegNum)); } - bool isCPU64RegsAsm() const { - return Kind == k_Register && Reg.Kind == Kind_CPU64Regs; - } - void addCPU64RegsAsmOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::CreateReg(Reg.RegNum)); + bool isGPR64Asm() const { + return Kind == k_Register && Reg.Kind == Kind_GPR64; } bool isHWRegsAsm() const { assert((Kind == k_Register) && "Invalid access!"); return Reg.Kind == Kind_HWRegs; } - void addHWRegsAsmOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::CreateReg(Reg.RegNum)); - } bool isHW64RegsAsm() const { assert((Kind == k_Register) && "Invalid access!"); return Reg.Kind == Kind_HW64Regs; } - void addHW64RegsAsmOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::CreateReg(Reg.RegNum)); - } - - void addCCRAsmOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::CreateReg(Reg.RegNum)); - } bool isCCRAsm() const { assert((Kind == k_Register) && "Invalid access!"); return Reg.Kind == Kind_CCRRegs; } + bool isAFGR64Asm() const { + return Kind == k_Register && Reg.Kind == Kind_AFGR64Regs; + } + + bool isFGR64Asm() const { + return Kind == k_Register && Reg.Kind == Kind_FGR64Regs; + } + + bool isFGR32Asm() const { + return (Kind == k_Register) && Reg.Kind == Kind_FGR32Regs; + } + + bool isFCCRegsAsm() const { + return (Kind == k_Register) && Reg.Kind == Kind_FCCRegs; + } + + bool isACRegsDSPAsm() const { + return Kind == k_Register && Reg.Kind == Kind_ACRegsDSP; + } + /// getStartLoc - Get the location of the first token of this operand. SMLoc getStartLoc() const { return StartLoc; @@ -624,8 +649,8 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, unsigned ImmOffset, HiOffset, LoOffset; const MCExpr *ExprOffset; unsigned TmpRegNum; - unsigned AtRegNum = getReg((isMips64()) ? Mips::CPU64RegsRegClassID - : Mips::CPURegsRegClassID, getATReg()); + unsigned AtRegNum = getReg((isMips64()) ? Mips::GPR64RegClassID + : Mips::GPR32RegClassID, getATReg()); // 1st operand is either the source or destination register. assert(Inst.getOperand(0).isReg() && "expected register operand kind"); unsigned RegOpNum = Inst.getOperand(0).getReg(); @@ -800,16 +825,7 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) { return CC; } -int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { - - if (Name.equals("fcc0")) - return Mips::FCC0; - - int CC; - CC = matchCPURegisterName(Name); - if (CC != -1) - return matchRegisterByNumber(CC, is64BitReg ? Mips::CPU64RegsRegClassID - : Mips::CPURegsRegClassID); +int MipsAsmParser::matchFPURegisterName(StringRef Name, FpFormatTy Format) { if (Name[0] == 'f') { StringRef NumString = Name.substr(1); @@ -819,8 +835,6 @@ int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { if (IntVal > 31) return -1; - FpFormatTy Format = getFpFormat(); - if (Format == FP_FORMAT_S || Format == FP_FORMAT_W) return getReg(Mips::FGR32RegClassID, IntVal); if (Format == FP_FORMAT_D) { @@ -833,10 +847,22 @@ int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { return getReg(Mips::AFGR64RegClassID, IntVal / 2); } } - return -1; } +int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { + + if (Name.equals("fcc0")) + return Mips::FCC0; + + int CC; + CC = matchCPURegisterName(Name); + if (CC != -1) + return matchRegisterByNumber(CC, is64BitReg ? Mips::GPR64RegClassID + : Mips::GPR32RegClassID); + return matchFPURegisterName(Name, getFpFormat()); +} + void MipsAsmParser::setDefaultFpFormat() { if (isMips64() || isFP64()) @@ -845,18 +871,6 @@ void MipsAsmParser::setDefaultFpFormat() { FpFormat = FP_FORMAT_S; } -bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){ - - bool IsDouble = StringSwitch<bool>(Mnemonic.lower()) - .Case("ldxc1", true) - .Case("ldc1", true) - .Case("sdxc1", true) - .Case("sdc1", true) - .Default(false); - - return IsDouble; -} - void MipsAsmParser::setFpFormat(StringRef Format) { FpFormat = StringSwitch<FpFormatTy>(Format.lower()) @@ -880,7 +894,7 @@ int MipsAsmParser::getATReg() { } unsigned MipsAsmParser::getReg(int RC, int RegNo) { - return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo); + return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo); } int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, unsigned RegClass) { @@ -900,7 +914,7 @@ int MipsAsmParser::tryParseRegister(bool is64BitReg) { RegNum = matchRegisterName(lowerCase, is64BitReg); } else if (Tok.is(AsmToken::Integer)) RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()), - is64BitReg ? Mips::CPU64RegsRegClassID : Mips::CPURegsRegClassID); + is64BitReg ? Mips::GPR64RegClassID : Mips::GPR32RegClassID); return RegNum; } @@ -1253,12 +1267,11 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( } MipsAsmParser::OperandMatchResultTy -MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - - if (!isMips64()) - return MatchOperand_NoMatch; +MipsAsmParser::parseRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands, + int RegKind) { + MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind; if (getLexer().getKind() == AsmToken::Identifier) { - if (searchSymbolAlias(Operands, MipsOperand::Kind_CPU64Regs)) + if (searchSymbolAlias(Operands, Kind)) return MatchOperand_Success; return MatchOperand_NoMatch; } @@ -1267,17 +1280,137 @@ MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { return MatchOperand_NoMatch; Parser.Lex(); // Eat $ - if (!tryParseRegisterOperand(Operands, true)) { + if (!tryParseRegisterOperand(Operands, + RegKind == MipsOperand::Kind_GPR64)) { // Set the proper register kind. MipsOperand* op = static_cast<MipsOperand*>(Operands.back()); - op->setRegKind(MipsOperand::Kind_CPU64Regs); + op->setRegKind(Kind); + if ((Kind == MipsOperand::Kind_GPR32) + && (getLexer().is(AsmToken::LParen))) { + // Check if it is indexed addressing operand. + Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc())); + Parser.Lex(); // Eat the parenthesis. + if (parseRegs(Operands,RegKind) != MatchOperand_Success) + return MatchOperand_NoMatch; + if (getLexer().isNot(AsmToken::RParen)) + return MatchOperand_NoMatch; + Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc())); + Parser.Lex(); + } return MatchOperand_Success; } return MatchOperand_NoMatch; } +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseGPR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + + if (!isMips64()) + return MatchOperand_NoMatch; + return parseRegs(Operands, (int) MipsOperand::Kind_GPR64); +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseGPR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + return parseRegs(Operands, (int) MipsOperand::Kind_GPR32); +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseAFGR64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + + if (isFP64()) + return MatchOperand_NoMatch; + // Double operand is expected, set appropriate format + setFpFormat(FP_FORMAT_D); + + return parseRegs(Operands, (int) MipsOperand::Kind_AFGR64Regs); +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + if (!isFP64()) + return MatchOperand_NoMatch; + // Double operand is expected, set appropriate format + setFpFormat(FP_FORMAT_D); + + return parseRegs(Operands, (int) MipsOperand::Kind_FGR64Regs); +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + // Single operand is expected, set appropriate format + setFpFormat(FP_FORMAT_S); + return parseRegs(Operands, (int) MipsOperand::Kind_FGR32Regs); +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + // If the first token is not '$' we have an error. + if (Parser.getTok().isNot(AsmToken::Dollar)) + return MatchOperand_NoMatch; + + SMLoc S = Parser.getTok().getLoc(); + Parser.Lex(); // Eat the '$' + + const AsmToken &Tok = Parser.getTok(); // Get next token. + + if (Tok.isNot(AsmToken::Identifier)) + return MatchOperand_NoMatch; + + if (!Tok.getIdentifier().startswith("fcc")) + return MatchOperand_NoMatch; + + StringRef NumString = Tok.getIdentifier().substr(3); + + unsigned IntVal; + if (NumString.getAsInteger(10, IntVal)) + return MatchOperand_NoMatch; + + unsigned Reg = matchRegisterByNumber(IntVal, Mips::FCCRegClassID); + + MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc()); + Op->setRegKind(MipsOperand::Kind_FCCRegs); + Operands.push_back(Op); + + Parser.Lex(); // Eat the register number. + return MatchOperand_Success; +} + +MipsAsmParser::OperandMatchResultTy +MipsAsmParser::parseACRegsDSP(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + // If the first token is not '$' we have an error. + if (Parser.getTok().isNot(AsmToken::Dollar)) + return MatchOperand_NoMatch; + + SMLoc S = Parser.getTok().getLoc(); + Parser.Lex(); // Eat the '$' + + const AsmToken &Tok = Parser.getTok(); // Get next token. + + if (Tok.isNot(AsmToken::Identifier)) + return MatchOperand_NoMatch; + + if (!Tok.getIdentifier().startswith("acc")) + return MatchOperand_NoMatch; + + StringRef NumString = Tok.getIdentifier().substr(3); + + unsigned IntVal; + if (NumString.getAsInteger(10, IntVal)) + return MatchOperand_NoMatch; + + unsigned Reg = matchRegisterByNumber(IntVal, Mips::ACRegsDSPRegClassID); + + MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc()); + Op->setRegKind(MipsOperand::Kind_ACRegsDSP); + Operands.push_back(Op); + + Parser.Lex(); // Eat the register number. + return MatchOperand_Success; +} + bool MipsAsmParser::searchSymbolAlias( - SmallVectorImpl<MCParsedAsmOperand*> &Operands, unsigned RegisterKind) { + SmallVectorImpl<MCParsedAsmOperand*> &Operands, unsigned RegKind) { MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier()); if (Sym) { @@ -1288,6 +1421,7 @@ bool MipsAsmParser::searchSymbolAlias( else return false; if (Expr->getKind() == MCExpr::SymbolRef) { + MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind) RegKind; const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); const StringRef DefSymbol = Ref->getSymbol().getName(); if (DefSymbol.startswith("$")) { @@ -1295,17 +1429,31 @@ bool MipsAsmParser::searchSymbolAlias( APInt IntVal(32, -1); if (!DefSymbol.substr(1).getAsInteger(10, IntVal)) RegNum = matchRegisterByNumber(IntVal.getZExtValue(), - isMips64() - ? Mips::CPU64RegsRegClassID - : Mips::CPURegsRegClassID); - else - // Lookup for the register with corresponding name - RegNum = matchRegisterName(DefSymbol.substr(1), isMips64()); + isMips64() + ? Mips::GPR64RegClassID + : Mips::GPR32RegClassID); + else { + // Lookup for the register with the corresponding name. + switch (Kind) { + case MipsOperand::Kind_AFGR64Regs: + case MipsOperand::Kind_FGR64Regs: + RegNum = matchFPURegisterName(DefSymbol.substr(1), FP_FORMAT_D); + break; + case MipsOperand::Kind_FGR32Regs: + RegNum = matchFPURegisterName(DefSymbol.substr(1), FP_FORMAT_S); + break; + case MipsOperand::Kind_GPR64: + case MipsOperand::Kind_GPR32: + default: + RegNum = matchRegisterName(DefSymbol.substr(1), isMips64()); + break; + } + } if (RegNum > -1) { Parser.Lex(); MipsOperand *op = MipsOperand::CreateReg(RegNum, S, Parser.getTok().getLoc()); - op->setRegKind((MipsOperand::RegisterKind) RegisterKind); + op->setRegKind(Kind); Operands.push_back(op); return true; } @@ -1314,7 +1462,7 @@ bool MipsAsmParser::searchSymbolAlias( Parser.Lex(); const MCConstantExpr *Const = static_cast<const MCConstantExpr*>(Expr); MipsOperand *op = MipsOperand::CreateImm(Const, S, - Parser.getTok().getLoc()); + Parser.getTok().getLoc()); Operands.push_back(op); return true; } @@ -1323,33 +1471,8 @@ bool MipsAsmParser::searchSymbolAlias( } MipsAsmParser::OperandMatchResultTy -MipsAsmParser::parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - - if (getLexer().getKind() == AsmToken::Identifier) { - if (searchSymbolAlias(Operands, MipsOperand::Kind_CPURegs)) - return MatchOperand_Success; - return MatchOperand_NoMatch; - } - // If the first token is not '$' we have an error. - if (Parser.getTok().isNot(AsmToken::Dollar)) - return MatchOperand_NoMatch; - - Parser.Lex(); // Eat $ - if (!tryParseRegisterOperand(Operands, false)) { - // Set the proper register kind. - MipsOperand* op = static_cast<MipsOperand*>(Operands.back()); - op->setRegKind(MipsOperand::Kind_CPURegs); - return MatchOperand_Success; - } - return MatchOperand_NoMatch; -} - -MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - if (isMips64()) - return MatchOperand_NoMatch; - // If the first token is not '$' we have error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; @@ -1406,30 +1529,23 @@ MipsAsmParser::parseHW64Regs( MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - unsigned RegNum; // If the first token is not '$' we have an error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; + SMLoc S = Parser.getTok().getLoc(); Parser.Lex(); // Eat the '$' const AsmToken &Tok = Parser.getTok(); // Get next token. - if (Tok.is(AsmToken::Integer)) { - RegNum = Tok.getIntVal(); - // At the moment only fcc0 is supported. - if (RegNum != 0) - return MatchOperand_ParseFail; - } else if (Tok.is(AsmToken::Identifier)) { - // At the moment only fcc0 is supported. - if (Tok.getIdentifier() != "fcc0") - return MatchOperand_ParseFail; - } else + + if (Tok.isNot(AsmToken::Integer)) return MatchOperand_NoMatch; - MipsOperand *op = MipsOperand::CreateReg(Mips::FCC0, S, - Parser.getTok().getLoc()); - op->setRegKind(MipsOperand::Kind_CCRRegs); - Operands.push_back(op); + unsigned Reg = matchRegisterByNumber(Tok.getIntVal(), Mips::CCRRegClassID); + + MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc()); + Op->setRegKind(MipsOperand::Kind_CCRRegs); + Operands.push_back(Op); Parser.Lex(); // Eat the register number. return MatchOperand_Success; @@ -1460,136 +1576,22 @@ MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) { return VK; } -// Converts condition string to immediate operand value. -static int ConvertCcString(StringRef CondString) { - int CC = StringSwitch<unsigned>(CondString) - .Case(".f", 0) - .Case(".un", 1) - .Case(".eq", 2) - .Case(".ueq", 3) - .Case(".olt", 4) - .Case(".ult", 5) - .Case(".ole", 6) - .Case(".ule", 7) - .Case(".sf", 8) - .Case(".ngle", 9) - .Case(".seq", 10) - .Case(".ngl", 11) - .Case(".lt", 12) - .Case(".nge", 13) - .Case(".le", 14) - .Case(".ngt", 15) - .Default(-1); - - return CC; -} - -bool MipsAsmParser:: -parseMathOperation(StringRef Name, SMLoc NameLoc, - SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // Split the format. - size_t Start = Name.find('.'), Next = Name.rfind('.'); - StringRef Format1 = Name.slice(Start, Next); - // Add the first format to the operands. - Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc)); - // Now for the second format. - StringRef Format2 = Name.slice(Next, StringRef::npos); - Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc)); - - // Set the format for the first register. - setFpFormat(Format1); - - // Read the remaining operands. - if (getLexer().isNot(AsmToken::EndOfStatement)) { - // Read the first operand. - if (ParseOperand(Operands, Name)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "unexpected token in argument list"); - } - - if (getLexer().isNot(AsmToken::Comma)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "unexpected token in argument list"); - } - Parser.Lex(); // Eat the comma. - - // Set the format for the first register - setFpFormat(Format2); - - // Parse and remember the operand. - if (ParseOperand(Operands, Name)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "unexpected token in argument list"); - } - } - - if (getLexer().isNot(AsmToken::EndOfStatement)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "unexpected token in argument list"); - } - - Parser.Lex(); // Consume the EndOfStatement. - return false; -} bool MipsAsmParser:: ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - StringRef Mnemonic; - // Floating point instructions: Should the register be treated as a double? - if (requestsDoubleOperand(Name)) { - setFpFormat(FP_FORMAT_D); - Operands.push_back(MipsOperand::CreateToken(Name, NameLoc)); - Mnemonic = Name; - } else { - setDefaultFpFormat(); - // Create the leading tokens for the mnemonic, split by '.' characters. - size_t Start = 0, Next = Name.find('.'); - Mnemonic = Name.slice(Start, Next); - - Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc)); - - if (Next != StringRef::npos) { - // There is a format token in mnemonic. - size_t Dot = Name.find('.', Next + 1); - StringRef Format = Name.slice(Next, Dot); - if (Dot == StringRef::npos) // Only one '.' in a string, it's a format. - Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); - else { - if (Name.startswith("c.")) { - // Floating point compare, add '.' and immediate represent for cc. - Operands.push_back(MipsOperand::CreateToken(".", NameLoc)); - int Cc = ConvertCcString(Format); - if (Cc == -1) { - return Error(NameLoc, "Invalid conditional code"); - } - SMLoc E = SMLoc::getFromPointer( - Parser.getTok().getLoc().getPointer() - 1); - Operands.push_back( - MipsOperand::CreateImm(MCConstantExpr::Create(Cc, getContext()), - NameLoc, E)); - } else { - // trunc, ceil, floor ... - return parseMathOperation(Name, NameLoc, Operands); - } - - // The rest is a format. - Format = Name.slice(Dot, StringRef::npos); - Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); - } - - setFpFormat(Format); - } + // Check if we have valid mnemonic + if (!mnemonicIsValid(Name, 0)) { + Parser.eatToEndOfStatement(); + return Error(NameLoc, "Unknown instruction"); } + // First operand in MCInst is instruction mnemonic. + Operands.push_back(MipsOperand::CreateToken(Name, NameLoc)); // Read the remaining operands. if (getLexer().isNot(AsmToken::EndOfStatement)) { // Read the first operand. - if (ParseOperand(Operands, Mnemonic)) { + if (ParseOperand(Operands, Name)) { SMLoc Loc = getLexer().getLoc(); Parser.eatToEndOfStatement(); return Error(Loc, "unexpected token in argument list"); @@ -1597,7 +1599,6 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, while (getLexer().is(AsmToken::Comma)) { Parser.Lex(); // Eat the comma. - // Parse and remember the operand. if (ParseOperand(Operands, Name)) { SMLoc Loc = getLexer().getLoc(); @@ -1606,13 +1607,11 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, } } } - if (getLexer().isNot(AsmToken::EndOfStatement)) { SMLoc Loc = getLexer().getLoc(); Parser.eatToEndOfStatement(); return Error(Loc, "unexpected token in argument list"); } - Parser.Lex(); // Consume the EndOfStatement. return false; } diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt index 834a998..aedb78b 100644 --- a/lib/Target/Mips/CMakeLists.txt +++ b/lib/Target/Mips/CMakeLists.txt @@ -48,10 +48,11 @@ add_llvm_target(MipsCodeGen MipsSelectionDAGInfo.cpp ) -add_dependencies(LLVMMipsCodeGen intrinsics_gen) +add_dependencies(LLVMMipsCodeGen MipsCommonTableGen intrinsics_gen) add_subdirectory(InstPrinter) add_subdirectory(Disassembler) add_subdirectory(TargetInfo) add_subdirectory(MCTargetDesc) add_subdirectory(AsmParser) + diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index 4af6703..d99df4d 100644 --- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -39,10 +39,10 @@ public: virtual ~MipsDisassemblerBase() {} - const MCRegisterInfo *getRegInfo() const { return RegInfo; } + const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); } private: - const MCRegisterInfo *RegInfo; + OwningPtr<const MCRegisterInfo> RegInfo; protected: bool isBigEndian; }; @@ -88,20 +88,20 @@ public: // Forward declare these because the autogenerated code will reference them. // Definitions are further down. -static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder); +static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder); -static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder); +static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst, unsigned RegNo, @@ -123,6 +123,11 @@ static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -133,11 +138,6 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); -static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, - unsigned Insn, - uint64_t Address, - const void *Decoder); - static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -158,12 +158,6 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst, uint64_t Address, const void *Decoder); -static DecodeStatus DecodeBC1(MCInst &Inst, - unsigned Insn, - uint64_t Address, - const void *Decoder); - - static DecodeStatus DecodeJumpTarget(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -183,11 +177,6 @@ static DecodeStatus DecodeSimm16(MCInst &Inst, uint64_t Address, const void *Decoder); -static DecodeStatus DecodeCondCode(MCInst &Inst, - unsigned Insn, - uint64_t Address, - const void *Decoder); - static DecodeStatus DecodeInsSize(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -346,26 +335,26 @@ static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst, } -static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder) { +static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::CPU64RegsRegClassID, RegNo); + unsigned Reg = getReg(Decoder, Mips::GPR64RegClassID, RegNo); Inst.addOperand(MCOperand::CreateReg(Reg)); return MCDisassembler::Success; } -static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder) { +static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::CPURegsRegClassID, RegNo); + unsigned Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); Inst.addOperand(MCOperand::CreateReg(Reg)); return MCDisassembler::Success; } @@ -374,7 +363,7 @@ static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder) { - return DecodeCPURegsRegisterClass(Inst, RegNo, Address, Decoder); + return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder); } static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, @@ -405,7 +394,21 @@ static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const void *Decoder) { - Inst.addOperand(MCOperand::CreateReg(RegNo)); + if (RegNo > 31) + return MCDisassembler::Fail; + unsigned Reg = getReg(Decoder, Mips::CCRRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 7) + return MCDisassembler::Fail; + unsigned Reg = getReg(Decoder, Mips::FCCRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); return MCDisassembler::Success; } @@ -417,8 +420,8 @@ static DecodeStatus DecodeMem(MCInst &Inst, unsigned Reg = fieldFromInstruction(Insn, 16, 5); unsigned Base = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::CPURegsRegClassID, Reg); - Base = getReg(Decoder, Mips::CPURegsRegClassID, Base); + Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); + Base = getReg(Decoder, Mips::GPR32RegClassID, Base); if(Inst.getOpcode() == Mips::SC){ Inst.addOperand(MCOperand::CreateReg(Reg)); @@ -440,7 +443,7 @@ static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Base = fieldFromInstruction(Insn, 21, 5); Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg); - Base = getReg(Decoder, Mips::CPURegsRegClassID, Base); + Base = getReg(Decoder, Mips::GPR32RegClassID, Base); Inst.addOperand(MCOperand::CreateReg(Reg)); Inst.addOperand(MCOperand::CreateReg(Base)); @@ -461,15 +464,6 @@ static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst, return MCDisassembler::Success; } -static DecodeStatus DecodeCondCode(MCInst &Inst, - unsigned Insn, - uint64_t Address, - const void *Decoder) { - int CondCode = Insn & 0xf; - Inst.addOperand(MCOperand::CreateImm(CondCode)); - return MCDisassembler::Success; -} - static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -483,17 +477,6 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, return MCDisassembler::Success; } -static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder) { - //Currently only hardware register 29 is supported - if (RegNo != 29) - return MCDisassembler::Fail; - Inst.addOperand(MCOperand::CreateReg(Mips::HWR29_64)); - return MCDisassembler::Success; -} - static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -540,16 +523,6 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst, return MCDisassembler::Success; } -static DecodeStatus DecodeBC1(MCInst &Inst, - unsigned Insn, - uint64_t Address, - const void *Decoder) { - unsigned BranchOffset = Insn & 0xffff; - BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4; - Inst.addOperand(MCOperand::CreateImm(BranchOffset)); - return MCDisassembler::Success; -} - static DecodeStatus DecodeJumpTarget(MCInst &Inst, unsigned Insn, uint64_t Address, diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp index fc23cd3..369fece 100644 --- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp +++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp @@ -26,6 +26,12 @@ using namespace llvm; #define PRINT_ALIAS_INSTR #include "MipsGenAsmWriter.inc" +template<unsigned R> +static bool isReg(const MCInst &MI, unsigned OpNo) { + assert(MI.getOperand(OpNo).isReg() && "Register operand expected."); + return MI.getOperand(OpNo).getReg() == R; +} + const char* Mips::MipsFCCToString(Mips::CondCode CC) { switch (CC) { case FCOND_F: @@ -80,7 +86,7 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O, } // Try to print any aliases first. - if (!printAliasInstr(MI, O)) + if (!printAliasInstr(MI, O) && !printAlias(*MI, O)) printInstruction(MI, O); printAnnotation(O, Annot); @@ -152,11 +158,6 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) { OS << ')'; } -void MipsInstPrinter::printCPURegs(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printRegName(O, MI->getOperand(OpNo).getReg()); -} - void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); @@ -209,3 +210,61 @@ printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O) { const MCOperand& MO = MI->getOperand(opNum); O << MipsFCCToString((Mips::CondCode)MO.getImm()); } + +bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI, + unsigned OpNo, raw_ostream &OS) { + OS << "\t" << Str << "\t"; + printOperand(&MI, OpNo, OS); + return true; +} + +bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI, + unsigned OpNo0, unsigned OpNo1, + raw_ostream &OS) { + printAlias(Str, MI, OpNo0, OS); + OS << ", "; + printOperand(&MI, OpNo1, OS); + return true; +} + +bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) { + switch (MI.getOpcode()) { + case Mips::BEQ: + // beq $r0, $zero, $L2 => beqz $r0, $L2 + return isReg<Mips::ZERO>(MI, 1) && printAlias("beqz", MI, 0, 2, OS); + case Mips::BEQ64: + // beq $r0, $zero, $L2 => beqz $r0, $L2 + return isReg<Mips::ZERO_64>(MI, 1) && printAlias("beqz", MI, 0, 2, OS); + case Mips::BNE: + // bne $r0, $zero, $L2 => bnez $r0, $L2 + return isReg<Mips::ZERO>(MI, 1) && printAlias("bnez", MI, 0, 2, OS); + case Mips::BNE64: + // bne $r0, $zero, $L2 => bnez $r0, $L2 + return isReg<Mips::ZERO_64>(MI, 1) && printAlias("bnez", MI, 0, 2, OS); + case Mips::BGEZAL: + // bgezal $zero, $L1 => bal $L1 + return isReg<Mips::ZERO>(MI, 0) && printAlias("bal", MI, 1, OS); + case Mips::BC1T: + // bc1t $fcc0, $L1 => bc1t $L1 + return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1t", MI, 1, OS); + case Mips::BC1F: + // bc1f $fcc0, $L1 => bc1f $L1 + return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1f", MI, 1, OS); + case Mips::JALR: + // jalr $ra, $r1 => jalr $r1 + return isReg<Mips::RA>(MI, 0) && printAlias("jalr", MI, 1, OS); + case Mips::JALR64: + // jalr $ra, $r1 => jalr $r1 + return isReg<Mips::RA_64>(MI, 0) && printAlias("jalr", MI, 1, OS); + case Mips::NOR: + // nor $r0, $r1, $zero => not $r0, $r1 + return isReg<Mips::ZERO>(MI, 2) && printAlias("not", MI, 0, 1, OS); + case Mips::NOR64: + // nor $r0, $r1, $zero => not $r0, $r1 + return isReg<Mips::ZERO_64>(MI, 2) && printAlias("not", MI, 0, 1, OS); + case Mips::OR: + // or $r0, $r1, $zero => move $r0, $r1 + return isReg<Mips::ZERO>(MI, 2) && printAlias("move", MI, 0, 1, OS); + default: return false; + } +} diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h index d1b561f..1253ab0 100644 --- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h +++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h @@ -87,7 +87,6 @@ public: virtual void printRegName(raw_ostream &OS, unsigned RegNo) const; virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot); - void printCPURegs(const MCInst *MI, unsigned OpNo, raw_ostream &O); bool printAliasInstr(const MCInst *MI, raw_ostream &OS); @@ -97,6 +96,12 @@ private: void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O); void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O); void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O); + + bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo, + raw_ostream &OS); + bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo0, + unsigned OpNo1, raw_ostream &OS); + bool printAlias(const MCInst &MI, raw_ostream &OS); }; } // end namespace llvm diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp index c33bc9a..cfcb877 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp @@ -36,6 +36,10 @@ namespace llvm { MCAssembler& MCA = getAssembler(); unsigned EFlags = MCA.getELFHeaderEFlags(); + // TODO: Need to add -mabicalls and -mno-abicalls flags. + // Currently we assume that -mabicalls is the default. + EFlags |= ELF::EF_MIPS_CPIC; + if (Subtarget.inMips16Mode()) EFlags |= ELF::EF_MIPS_ARCH_ASE_M16; else diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index a464dfe..4dc6917 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -380,7 +380,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) { unsigned Reg = MO.getReg(); - unsigned RegNo = Ctx.getRegisterInfo().getEncodingValue(Reg); + unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg); return RegNo; } else if (MO.isImm()) { return static_cast<unsigned>(MO.getImm()); diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td index 7a42719..249d712 100644 --- a/lib/Target/Mips/MicroMipsInstrInfo.td +++ b/lib/Target/Mips/MicroMipsInstrInfo.td @@ -1,69 +1,67 @@ let isCodeGenOnly = 1 in { /// Arithmetic Instructions (ALU Immediate) - def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>, + def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd>, ADDI_FM_MM<0xc>; - def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, + def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM_MM<0x4>; - def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>, + def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>, SLTI_FM_MM<0x24>; - def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>, + def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>, SLTI_FM_MM<0x2c>; - def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, - and>, + def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd>, ADDI_FM_MM<0x34>; - def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>, + def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd>, ADDI_FM_MM<0x14>; - def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, - xor>, + def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd>, ADDI_FM_MM<0x1c>; - def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM; + def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM_MM; /// Arithmetic Instructions (3-Operand, R-Type) - def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>; - def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>; - def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>; - def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>; - def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>; - def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>; - def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>, + def ADDu_MM : MMRel, ArithLogicR<"addu", GPR32Opnd>, ADD_FM_MM<0, 0x150>; + def SUBu_MM : MMRel, ArithLogicR<"subu", GPR32Opnd>, ADD_FM_MM<0, 0x1d0>; + def MUL_MM : MMRel, ArithLogicR<"mul", GPR32Opnd>, ADD_FM_MM<0, 0x210>; + def ADD_MM : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM_MM<0, 0x110>; + def SUB_MM : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM_MM<0, 0x190>; + def SLT_MM : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM_MM<0, 0x350>; + def SLTu_MM : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM_MM<0, 0x390>; - def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>, + def AND_MM : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IIAlu, and>, ADD_FM_MM<0, 0x250>; - def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>, + def OR_MM : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IIAlu, or>, ADD_FM_MM<0, 0x290>; - def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>, + def XOR_MM : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IIAlu, xor>, ADD_FM_MM<0, 0x310>; - def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>; - def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>, + def NOR_MM : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM_MM<0, 0x2d0>; + def MULT_MM : MMRel, Mult<"mult", IIImul, GPR32Opnd, [HI, LO]>, MULT_FM_MM<0x22c>; - def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>, + def MULTu_MM : MMRel, Mult<"multu", IIImul, GPR32Opnd, [HI, LO]>, MULT_FM_MM<0x26c>; /// Shift Instructions - def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>, + def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, GPR32Opnd>, SRA_FM_MM<0, 0>; - def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>, + def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, GPR32Opnd>, SRA_FM_MM<0x40, 0>; - def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>, + def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, GPR32Opnd>, SRA_FM_MM<0x80, 0>; - def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>, + def SLLV_MM : MMRel, shift_rotate_reg<"sllv", GPR32Opnd>, SRLV_FM_MM<0x10, 0>; - def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>, + def SRLV_MM : MMRel, shift_rotate_reg<"srlv", GPR32Opnd>, SRLV_FM_MM<0x50, 0>; - def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>, + def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd>, SRLV_FM_MM<0x90, 0>; - def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>, + def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, GPR32Opnd>, SRA_FM_MM<0xc0, 0>; - def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>, + def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd>, SRLV_FM_MM<0xd0, 0>; /// Load and Store Instructions - aligned - defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>; - defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>; - defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>; - defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>; - defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>; - defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>; - defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>; - defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>; + defm LB_MM : LoadM<"lb", GPR32Opnd, sextloadi8>, MMRel, LW_FM_MM<0x7>; + defm LBu_MM : LoadM<"lbu", GPR32Opnd, zextloadi8>, MMRel, LW_FM_MM<0x5>; + defm LH_MM : LoadM<"lh", GPR32Opnd, sextloadi16>, MMRel, LW_FM_MM<0xf>; + defm LHu_MM : LoadM<"lhu", GPR32Opnd, zextloadi16>, MMRel, LW_FM_MM<0xd>; + defm LW_MM : LoadM<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>; + defm SB_MM : StoreM<"sb", GPR32Opnd, truncstorei8>, MMRel, LW_FM_MM<0x6>; + defm SH_MM : StoreM<"sh", GPR32Opnd, truncstorei16>, MMRel, LW_FM_MM<0xe>; + defm SW_MM : StoreM<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>; } diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td index eefb02a..2595e41 100644 --- a/lib/Target/Mips/Mips.td +++ b/lib/Target/Mips/Mips.td @@ -101,6 +101,7 @@ def MipsAsmWriter : AsmWriter { def MipsAsmParser : AsmParser { let ShouldEmitMatchRegisterName = 0; + let MnemonicContainsDot = 1; } def MipsAsmParserVariant : AsmParserVariant { diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp index e180c49..6655ff9 100644 --- a/lib/Target/Mips/Mips16FrameLowering.cpp +++ b/lib/Target/Mips/Mips16FrameLowering.cpp @@ -40,7 +40,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const { if (StackSize == 0 && !MFI->adjustsStack()) return; MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); MachineLocation DstML, SrcML; // Adjust stack. @@ -56,13 +56,16 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const { MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol(); BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel); - unsigned S1 = MRI.getDwarfRegNum(Mips::S1, true); - MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S1, -8)); + unsigned S2 = MRI->getDwarfRegNum(Mips::S2, true); + MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S2, -8)); - unsigned S0 = MRI.getDwarfRegNum(Mips::S0, true); - MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S0, -12)); + unsigned S1 = MRI->getDwarfRegNum(Mips::S1, true); + MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S1, -12)); - unsigned RA = MRI.getDwarfRegNum(Mips::RA, true); + unsigned S0 = MRI->getDwarfRegNum(Mips::S0, true); + MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S0, -16)); + + unsigned RA = MRI->getDwarfRegNum(Mips::RA, true); MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, RA, -4)); if (hasFP(MF)) @@ -168,6 +171,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, MF.getRegInfo().setPhysRegUsed(Mips::RA); MF.getRegInfo().setPhysRegUsed(Mips::S0); MF.getRegInfo().setPhysRegUsed(Mips::S1); + MF.getRegInfo().setPhysRegUsed(Mips::S2); } const MipsFrameLowering * diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp index 45dd5d7..7e456aa 100644 --- a/lib/Target/Mips/Mips16HardFloat.cpp +++ b/lib/Target/Mips/Mips16HardFloat.cpp @@ -247,7 +247,7 @@ static void assureFPCallStub(Function &F, Module *M, bool LE = Subtarget.isLittle(); std::string Name = F.getName(); std::string SectionName = ".mips16.call.fp." + Name; - std::string StubName = "__call_stub_" + Name; + std::string StubName = "__call_stub_fp_" + Name; // // see if we already have the stub // @@ -257,11 +257,13 @@ static void assureFPCallStub(Function &F, Module *M, Function::InternalLinkage, StubName, M); FStub->addFnAttr("mips16_fp_stub"); FStub->addFnAttr(llvm::Attribute::Naked); + FStub->addFnAttr(llvm::Attribute::NoInline); FStub->addFnAttr(llvm::Attribute::NoUnwind); FStub->addFnAttr("nomips16"); FStub->setSection(SectionName); BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub); InlineAsmHelper IAH(Context, BB); + IAH.Out(".set reorder"); FPReturnVariant RV = whichFPReturnVariant(FStub->getReturnType()); FPParamVariant PV = whichFPParamVariantNeeded(F); swapFPIntParams(PV, M, IAH, LE, true); @@ -361,6 +363,8 @@ static bool fixupFPReturnAndCall "__Mips16RetHelper"); A = A.addAttribute(C, AttributeSet::FunctionIndex, Attribute::ReadNone); + A = A.addAttribute(C, AttributeSet::FunctionIndex, + Attribute::NoInline); Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL)); CallInst::Create(F, Params, "", &Inst ); } else if (const CallInst *CI = dyn_cast<CallInst>(I)) { @@ -389,10 +393,11 @@ static void createFPFnStub(Function *F, Module *M, FPParamVariant PV, std::string LocalName = "__fn_local_" + Name; Function *FStub = Function::Create (F->getFunctionType(), - Function::ExternalLinkage, StubName, M); + Function::InternalLinkage, StubName, M); FStub->addFnAttr("mips16_fp_stub"); FStub->addFnAttr(llvm::Attribute::Naked); FStub->addFnAttr(llvm::Attribute::NoUnwind); + FStub->addFnAttr(llvm::Attribute::NoInline); FStub->addFnAttr("nomips16"); FStub->setSection(SectionName); BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub); diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp index f70abda..0caa277 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -118,11 +118,13 @@ void Mips16DAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) { SDValue Mips16DAGToDAGISel::getMips16SPAliasReg() { unsigned Mips16SPAliasReg = MF->getInfo<MipsFunctionInfo>()->getMips16SPAliasReg(); - return CurDAG->getRegister(Mips16SPAliasReg, TLI->getPointerTy()); + return CurDAG->getRegister(Mips16SPAliasReg, + getTargetLowering()->getPointerTy()); } void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) { - SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, TLI->getPointerTy()); + SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, + getTargetLowering()->getPointerTy()); if (Parent) { switch (Parent->getOpcode()) { case ISD::LOAD: { @@ -149,7 +151,7 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) { } } } - AliasReg = CurDAG->getRegister(Mips::SP, TLI->getPointerTy()); + AliasReg = CurDAG->getRegister(Mips::SP, getTargetLowering()->getPointerTy()); return; } diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index d8dd88c..6ed1d9e 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -18,7 +18,6 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetInstrInfo.h" -#include <set> using namespace llvm; @@ -30,15 +29,97 @@ static cl::opt<bool> DontExpandCondPseudos16( cl::Hidden); namespace { - std::set<const char*, MipsTargetLowering::LTStr> NoHelperNeeded; +struct Mips16Libcall { + RTLIB::Libcall Libcall; + const char *Name; + + bool operator<(const Mips16Libcall &RHS) const { + return std::strcmp(Name, RHS.Name) < 0; + } +}; + +struct Mips16IntrinsicHelperType{ + const char* Name; + const char* Helper; + + bool operator<(const Mips16IntrinsicHelperType &RHS) const { + return std::strcmp(Name, RHS.Name) < 0; + } + bool operator==(const Mips16IntrinsicHelperType &RHS) const { + return std::strcmp(Name, RHS.Name) == 0; + } +}; } +// Libcalls for which no helper is generated. Sorted by name for binary search. +static const Mips16Libcall HardFloatLibCalls[] = { + { RTLIB::ADD_F64, "__mips16_adddf3" }, + { RTLIB::ADD_F32, "__mips16_addsf3" }, + { RTLIB::DIV_F64, "__mips16_divdf3" }, + { RTLIB::DIV_F32, "__mips16_divsf3" }, + { RTLIB::OEQ_F64, "__mips16_eqdf2" }, + { RTLIB::OEQ_F32, "__mips16_eqsf2" }, + { RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2" }, + { RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi" }, + { RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi" }, + { RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf" }, + { RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf" }, + { RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf" }, + { RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf" }, + { RTLIB::OGE_F64, "__mips16_gedf2" }, + { RTLIB::OGE_F32, "__mips16_gesf2" }, + { RTLIB::OGT_F64, "__mips16_gtdf2" }, + { RTLIB::OGT_F32, "__mips16_gtsf2" }, + { RTLIB::OLE_F64, "__mips16_ledf2" }, + { RTLIB::OLE_F32, "__mips16_lesf2" }, + { RTLIB::OLT_F64, "__mips16_ltdf2" }, + { RTLIB::OLT_F32, "__mips16_ltsf2" }, + { RTLIB::MUL_F64, "__mips16_muldf3" }, + { RTLIB::MUL_F32, "__mips16_mulsf3" }, + { RTLIB::UNE_F64, "__mips16_nedf2" }, + { RTLIB::UNE_F32, "__mips16_nesf2" }, + { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_dc" }, // No associated libcall. + { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_df" }, // No associated libcall. + { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sc" }, // No associated libcall. + { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sf" }, // No associated libcall. + { RTLIB::SUB_F64, "__mips16_subdf3" }, + { RTLIB::SUB_F32, "__mips16_subsf3" }, + { RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2" }, + { RTLIB::UO_F64, "__mips16_unorddf2" }, + { RTLIB::UO_F32, "__mips16_unordsf2" } +}; + +static const Mips16IntrinsicHelperType Mips16IntrinsicHelper[] = { + {"ceil", "__mips16_call_stub_df_2"}, + {"ceilf", "__mips16_call_stub_sf_1"}, + {"copysign", "__mips16_call_stub_df_10"}, + {"copysignf", "__mips16_call_stub_sf_5"}, + {"cos", "__mips16_call_stub_df_2"}, + {"cosf", "__mips16_call_stub_sf_1"}, + {"exp2", "__mips16_call_stub_df_2"}, + {"exp2f", "__mips16_call_stub_sf_1"}, + {"floor", "__mips16_call_stub_df_2"}, + {"floorf", "__mips16_call_stub_sf_1"}, + {"log2", "__mips16_call_stub_df_2"}, + {"log2f", "__mips16_call_stub_sf_1"}, + {"nearbyint", "__mips16_call_stub_df_2"}, + {"nearbyintf", "__mips16_call_stub_sf_1"}, + {"rint", "__mips16_call_stub_df_2"}, + {"rintf", "__mips16_call_stub_sf_1"}, + {"sin", "__mips16_call_stub_df_2"}, + {"sinf", "__mips16_call_stub_sf_1"}, + {"sqrt", "__mips16_call_stub_df_2"}, + {"sqrtf", "__mips16_call_stub_sf_1"}, + {"trunc", "__mips16_call_stub_df_2"}, + {"truncf", "__mips16_call_stub_sf_1"}, +}; + Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM) : MipsTargetLowering(TM) { // // set up as if mips32 and then revert so we can test the mechanism // for switching - addRegisterClass(MVT::i32, &Mips::CPURegsRegClass); + addRegisterClass(MVT::i32, &Mips::GPR32RegClass); addRegisterClass(MVT::f32, &Mips::FGR32RegClass); computeRegisterProperties(); clearRegisterClasses(); @@ -46,13 +127,9 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM) // Set up the register classes addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass); - if (Subtarget->inMips16HardFloat()) { + if (Subtarget->inMips16HardFloat()) setMips16HardFloatLibCalls(); - NoHelperNeeded.insert("__mips16_ret_sf"); - NoHelperNeeded.insert("__mips16_ret_df"); - NoHelperNeeded.insert("__mips16_ret_sc"); - NoHelperNeeded.insert("__mips16_ret_dc"); - } + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); @@ -166,47 +243,17 @@ isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo, return false; } -void Mips16TargetLowering::setMips16LibcallName - (RTLIB::Libcall L, const char *Name) { - setLibcallName(L, Name); - NoHelperNeeded.insert(Name); -} - void Mips16TargetLowering::setMips16HardFloatLibCalls() { - setMips16LibcallName(RTLIB::ADD_F32, "__mips16_addsf3"); - setMips16LibcallName(RTLIB::ADD_F64, "__mips16_adddf3"); - setMips16LibcallName(RTLIB::SUB_F32, "__mips16_subsf3"); - setMips16LibcallName(RTLIB::SUB_F64, "__mips16_subdf3"); - setMips16LibcallName(RTLIB::MUL_F32, "__mips16_mulsf3"); - setMips16LibcallName(RTLIB::MUL_F64, "__mips16_muldf3"); - setMips16LibcallName(RTLIB::DIV_F32, "__mips16_divsf3"); - setMips16LibcallName(RTLIB::DIV_F64, "__mips16_divdf3"); - setMips16LibcallName(RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2"); - setMips16LibcallName(RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2"); - setMips16LibcallName(RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi"); - setMips16LibcallName(RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi"); - setMips16LibcallName(RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf"); - setMips16LibcallName(RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf"); - setMips16LibcallName(RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf"); - setMips16LibcallName(RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf"); - setMips16LibcallName(RTLIB::OEQ_F32, "__mips16_eqsf2"); - setMips16LibcallName(RTLIB::OEQ_F64, "__mips16_eqdf2"); - setMips16LibcallName(RTLIB::UNE_F32, "__mips16_nesf2"); - setMips16LibcallName(RTLIB::UNE_F64, "__mips16_nedf2"); - setMips16LibcallName(RTLIB::OGE_F32, "__mips16_gesf2"); - setMips16LibcallName(RTLIB::OGE_F64, "__mips16_gedf2"); - setMips16LibcallName(RTLIB::OLT_F32, "__mips16_ltsf2"); - setMips16LibcallName(RTLIB::OLT_F64, "__mips16_ltdf2"); - setMips16LibcallName(RTLIB::OLE_F32, "__mips16_lesf2"); - setMips16LibcallName(RTLIB::OLE_F64, "__mips16_ledf2"); - setMips16LibcallName(RTLIB::OGT_F32, "__mips16_gtsf2"); - setMips16LibcallName(RTLIB::OGT_F64, "__mips16_gtdf2"); - setMips16LibcallName(RTLIB::UO_F32, "__mips16_unordsf2"); - setMips16LibcallName(RTLIB::UO_F64, "__mips16_unorddf2"); - setMips16LibcallName(RTLIB::O_F32, "__mips16_unordsf2"); - setMips16LibcallName(RTLIB::O_F64, "__mips16_unorddf2"); -} + for (unsigned I = 0; I != array_lengthof(HardFloatLibCalls); ++I) { + assert((I == 0 || HardFloatLibCalls[I - 1] < HardFloatLibCalls[I]) && + "Array not sorted!"); + if (HardFloatLibCalls[I].Libcall != RTLIB::UNKNOWN_LIBCALL) + setLibcallName(HardFloatLibCalls[I].Libcall, HardFloatLibCalls[I].Name); + } + setLibcallName(RTLIB::O_F64, "__mips16_unorddf2"); + setLibcallName(RTLIB::O_F32, "__mips16_unordsf2"); +} // // The Mips16 hard float is a crazy quilt inherited from gcc. I have a much @@ -383,16 +430,34 @@ getOpndList(SmallVectorImpl<SDValue> &Ops, // bool LookupHelper = true; if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) { - if (NoHelperNeeded.find(S->getSymbol()) != NoHelperNeeded.end()) { + Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() }; + + if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls), + Find)) LookupHelper = false; + else { + Mips16IntrinsicHelperType IntrinsicFind = {S->getSymbol(), ""}; + // one more look at list of intrinsics + if (std::binary_search(Mips16IntrinsicHelper, + array_endof(Mips16IntrinsicHelper), + IntrinsicFind)) { + const Mips16IntrinsicHelperType *h =(std::find(Mips16IntrinsicHelper, + array_endof(Mips16IntrinsicHelper), + IntrinsicFind)); + Mips16HelperFunction = h->Helper; + NeedMips16Helper = true; + LookupHelper = false; + } + } - } - else if (GlobalAddressSDNode *G = - dyn_cast<GlobalAddressSDNode>(CLI.Callee)) { - if (NoHelperNeeded.find(G->getGlobal()->getName().data()) != - NoHelperNeeded.end()) { + } else if (GlobalAddressSDNode *G = + dyn_cast<GlobalAddressSDNode>(CLI.Callee)) { + Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, + G->getGlobal()->getName().data() }; + + if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls), + Find)) LookupHelper = false; - } } if (LookupHelper) Mips16HelperFunction = getMips16HelperFunction(CLI.RetTy, CLI.Args, NeedMips16Helper); diff --git a/lib/Target/Mips/Mips16ISelLowering.h b/lib/Target/Mips/Mips16ISelLowering.h index d3c7028..33b953f 100644 --- a/lib/Target/Mips/Mips16ISelLowering.h +++ b/lib/Target/Mips/Mips16ISelLowering.h @@ -32,8 +32,6 @@ namespace llvm { unsigned NextStackOffset, const MipsFunctionInfo& FI) const; - void setMips16LibcallName(RTLIB::Libcall, const char *Name); - void setMips16HardFloatLibCalls(); unsigned int diff --git a/lib/Target/Mips/Mips16InstrFormats.td b/lib/Target/Mips/Mips16InstrFormats.td index 1e49934..da3a1f1 100644 --- a/lib/Target/Mips/Mips16InstrFormats.td +++ b/lib/Target/Mips/Mips16InstrFormats.td @@ -148,6 +148,20 @@ class FRR16<bits<5> _funct, dag outs, dag ins, string asmstr, let Inst{4-0} = funct; } +class FRRBreak16<dag outs, dag ins, string asmstr, + list<dag> pattern, InstrItinClass itin>: + MipsInst16<outs, ins, asmstr, pattern, itin> +{ + bits<6> Code; + bits<5> funct; + + let Opcode = 0b11101; + let funct = 0b00101; + + let Inst{10-5} = Code; + let Inst{4-0} = funct; +} + // // For conversion functions. // diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp index c2a496c..05e70ab 100644 --- a/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/lib/Target/Mips/Mips16InstrInfo.cpp @@ -10,7 +10,7 @@ // This file contains the Mips16 implementation of the TargetInstrInfo class. // //===----------------------------------------------------------------------===// - +#include <stdio.h> #include "Mips16InstrInfo.h" #include "InstPrinter/MipsInstPrinter.h" #include "MipsMachineFunction.h" @@ -72,9 +72,9 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, unsigned Opc = 0; if (Mips::CPU16RegsRegClass.contains(DestReg) && - Mips::CPURegsRegClass.contains(SrcReg)) + Mips::GPR32RegClass.contains(SrcReg)) Opc = Mips::MoveR3216; - else if (Mips::CPURegsRegClass.contains(DestReg) && + else if (Mips::GPR32RegClass.contains(DestReg) && Mips::CPU16RegsRegClass.contains(SrcReg)) Opc = Mips::Move32R16; else if ((SrcReg == Mips::HI) && @@ -109,8 +109,9 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, if (Mips::CPU16RegsRegClass.hasSubClassEq(RC)) Opc = Mips::SwRxSpImmX16; assert(Opc && "Register class not handled!"); - BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); + BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)). + addFrameIndex(FI).addImm(Offset) + .addMemOperand(MMO); } void Mips16InstrInfo:: @@ -323,48 +324,88 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg, // RegScavenger rs; int32_t lo = Imm & 0xFFFF; - int32_t hi = ((Imm >> 16) + (lo >> 15)) & 0xFFFF; NewImm = lo; - unsigned Reg =0; - unsigned SpReg = 0; + int Reg =0; + int SpReg = 0; + rs.enterBasicBlock(&MBB); rs.forward(II); // + // We need to know which registers can be used, in the case where there + // are not enough free registers. We exclude all registers that + // are used in the instruction that we are helping. + // // Consider all allocatable registers in the register class initially + BitVector Candidates = + RI.getAllocatableSet + (*II->getParent()->getParent(), &Mips::CPU16RegsRegClass); + // Exclude all the registers being used by the instruction. + for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) { + MachineOperand &MO = II->getOperand(i); + if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() && + !TargetRegisterInfo::isVirtualRegister(MO.getReg())) + Candidates.reset(MO.getReg()); + } + // + // If the same register was used and defined in an instruction, then + // it will not be in the list of candidates. + // + // we need to analyze the instruction that we are helping. + // we need to know if it defines register x but register x is not + // present as an operand of the instruction. this tells + // whether the register is live before the instruction. if it's not + // then we don't need to save it in case there are no free registers. + // + int DefReg = 0; + for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) { + MachineOperand &MO = II->getOperand(i); + if (MO.isReg() && MO.isDef()) { + DefReg = MO.getReg(); + break; + } + } + // + BitVector Available = rs.getRegsAvailable(&Mips::CPU16RegsRegClass); + + Available &= Candidates; + // // we use T0 for the first register, if we need to save something away. // we use T1 for the second register, if we need to save something away. // unsigned FirstRegSaved =0, SecondRegSaved=0; unsigned FirstRegSavedTo = 0, SecondRegSavedTo = 0; - Reg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass); - if (Reg == 0) { - FirstRegSaved = Reg = Mips::V0; - FirstRegSavedTo = Mips::T0; - copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true); + + Reg = Available.find_first(); + + if (Reg == -1) { + Reg = Candidates.find_first(); + Candidates.reset(Reg); + if (DefReg != Reg) { + FirstRegSaved = Reg; + FirstRegSavedTo = Mips::T0; + copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true); + } } else - rs.setUsed(Reg); - BuildMI(MBB, II, DL, get(Mips::LiRxImmX16), Reg).addImm(hi); - BuildMI(MBB, II, DL, get(Mips::SllX16), Reg).addReg(Reg). - addImm(16); + Available.reset(Reg); + BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm); + NewImm = 0; if (FrameReg == Mips::SP) { - SpReg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass); - if (SpReg == 0) { - if (Reg != Mips::V1) { - SecondRegSaved = SpReg = Mips::V1; + SpReg = Available.find_first(); + if (SpReg == -1) { + SpReg = Candidates.find_first(); + // Candidates.reset(SpReg); // not really needed + if (DefReg!= SpReg) { + SecondRegSaved = SpReg; SecondRegSavedTo = Mips::T1; } - else { - SecondRegSaved = SpReg = Mips::V0; - SecondRegSavedTo = Mips::T0; - } - copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true); + if (SecondRegSaved) + copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true); } - else - rs.setUsed(SpReg); - + else + Available.reset(SpReg); copyPhysReg(MBB, II, DL, SpReg, Mips::SP, false); - BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg) + BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg, RegState::Kill) .addReg(Reg); } else @@ -380,6 +421,22 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg, return Reg; } +/// This function generates the sequence of instructions needed to get the +/// result of adding register REG and immediate IMM. +unsigned +Mips16InstrInfo::basicLoadImmediate( + unsigned FrameReg, + int64_t Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator II, DebugLoc DL, + unsigned &NewImm) const { + const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass; + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + unsigned Reg = RegInfo.createVirtualRegister(RC); + BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm); + NewImm = 0; + return Reg; +} + unsigned Mips16InstrInfo::getAnalyzableBrOpc(unsigned Opc) const { return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 || Opc == Mips::BnezRxImmX16 || Opc == Mips::BteqzX16 || @@ -415,3 +472,27 @@ void Mips16InstrInfo::BuildAddiuSpImm const MipsInstrInfo *llvm::createMips16InstrInfo(MipsTargetMachine &TM) { return new Mips16InstrInfo(TM); } + +#include <stdio.h> +bool Mips16InstrInfo::validImmediate(unsigned Opcode, unsigned Reg, + int64_t Amount) { + switch (Opcode) { + case Mips::LbRxRyOffMemX16: + case Mips::LbuRxRyOffMemX16: + case Mips::LhRxRyOffMemX16: + case Mips::LhuRxRyOffMemX16: + case Mips::SbRxRyOffMemX16: + case Mips::ShRxRyOffMemX16: + case Mips::LwRxRyOffMemX16: + case Mips::SwRxRyOffMemX16: + case Mips::SwRxSpImmX16: + case Mips::LwRxSpImmX16: + return isInt<16>(Amount); + case Mips::AddiuRxRyOffMemX16: + if ((Reg == Mips::PC) || (Reg == Mips::SP)) + return isInt<16>(Amount); + return isInt<15>(Amount); + } + printf("Unexpected opcode %i \n", Opcode); + llvm_unreachable("unexpected Opcode in validImmediate"); +} diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h index a3bd31e..118d258 100644 --- a/lib/Target/Mips/Mips16InstrInfo.h +++ b/lib/Target/Mips/Mips16InstrInfo.h @@ -68,7 +68,7 @@ public: // Adjust SP by FrameSize bytes. Save RA, S0, S1 void makeFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB, - MachineBasicBlock::iterator I) const; + MachineBasicBlock::iterator I) const; // Adjust SP by FrameSize bytes. Restore RA, S0, S1 void restoreFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB, @@ -88,6 +88,13 @@ public: MachineBasicBlock::iterator II, DebugLoc DL, unsigned &NewImm) const; + unsigned basicLoadImmediate(unsigned FrameReg, + int64_t Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator II, DebugLoc DL, + unsigned &NewImm) const; + + static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount); + static bool validSpImm8(int offset) { return ((offset & 7) == 0) && isInt<11>(offset); } diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td index aa51aaf..aef4e92 100644 --- a/lib/Target/Mips/Mips16InstrInfo.td +++ b/lib/Target/Mips/Mips16InstrInfo.td @@ -21,13 +21,13 @@ def addr16 : // Address operand def mem16 : Operand<i32> { let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops CPU16Regs, simm16, CPU16Regs); + let MIOperandInfo = (ops CPU16Regs, simm16, CPU16RegsPlusSP); let EncoderMethod = "getMemEncoding"; } def mem16_ea : Operand<i32> { let PrintMethod = "printMemOperandEA"; - let MIOperandInfo = (ops CPU16Regs, simm16); + let MIOperandInfo = (ops CPU16RegsPlusSP, simm16); let EncoderMethod = "getMemEncoding"; } @@ -187,6 +187,11 @@ class FEXT_RI16_SP_explicit_ins<bits<5> _op, string asmstr, FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm), !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>; +class FEXT_RI16_SP_Store_explicit_ins<bits<5> _op, string asmstr, + InstrItinClass itin>: + FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, CPUSPReg:$ry, simm16:$imm), + !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>; + // // EXT-RRI instruction format // @@ -248,7 +253,7 @@ class FEXT_T8I8I16_ins<string asmstr, string asmstr2>: // I8_MOVR32 instruction format (used only by the MOVR32 instructio // class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>: - FI8_MOVR3216<(outs CPU16Regs:$rz), (ins CPURegs:$r32), + FI8_MOVR3216<(outs CPU16Regs:$rz), (ins GPR32:$r32), !strconcat(asmstr, "\t$rz, $r32"), [], itin>; // @@ -256,7 +261,7 @@ class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>: // class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>: - FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz), + FI8_MOV32R16<(outs GPR32:$r32), (ins CPU16Regs:$rz), !strconcat(asmstr, "\t$r32, $rz"), [], itin>; // @@ -287,6 +292,11 @@ class FRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> : !strconcat(asmstr, "\t$rx, $ry"), [], itin> { } +class FRRBreakNull16_ins<string asmstr, InstrItinClass itin> : + FRRBreak16<(outs), (ins), asmstr, [], itin> { + let Code=0; +} + class FRR16R_ins<bits<5> f, string asmstr, InstrItinClass itin> : FRR16<f, (outs), (ins CPU16Regs:$rx, CPU16Regs:$ry), !strconcat(asmstr, "\t$rx, $ry"), [], itin> { @@ -437,7 +447,7 @@ def Constant32: MipsPseudo16<(outs), (ins imm32:$imm), "\t.word $imm", []>; def LwConstant32: - MipsPseudo16<(outs), (ins CPU16Regs:$rx, imm32:$imm), + MipsPseudo16<(outs CPU16Regs:$rx), (ins imm32:$imm), "lw\t$rx, 1f\n\tb\t2f\n\t.align\t2\n1: \t.word\t$imm\n2:", []>; @@ -569,6 +579,13 @@ def BnezRxImm16: FRI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16; // def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16; + +// +//Format: BREAK immediate +// Purpose: Breakpoint +// To cause a Breakpoint exception. + +def Break16: FRRBreakNull16_ins<"break 0", NoItinerary>; // // Format: BTEQZ offset MIPS16e // Purpose: Branch on T Equal to Zero (Extended) @@ -671,6 +688,7 @@ def Jal16 : FJAL16_ins<0b0, "jal", IIAlu> { let hasDelaySlot = 0; // not true, but we add the nop for now let isTerminator=1; let isBarrier=1; + let isCall=1; } // @@ -878,9 +896,9 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>; let ra=1, s=0,s0=1,s1=1 in def RestoreRaF16: FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), - "restore\t$$ra, $$s0, $$s1, $frame_size", [], IILoad >, MayLoad { + "restore\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IILoad >, MayLoad { let isCodeGenOnly = 1; - let Defs = [S0, S1, RA, SP]; + let Defs = [S0, S1, S2, RA, SP]; let Uses = [SP]; } @@ -906,9 +924,9 @@ def RestoreIncSpF16: let ra=1, s=1,s0=1,s1=1 in def SaveRaF16: FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), - "save\t$$ra, $$s0, $$s1, $frame_size", [], IIStore >, MayStore { + "save\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IIStore >, MayStore { let isCodeGenOnly = 1; - let Uses = [RA, SP, S0, S1]; + let Uses = [RA, SP, S0, S1, S2]; let Defs = [SP]; } @@ -1195,7 +1213,8 @@ def SwRxRyOffMemX16: // Purpose: Store Word rx (SP-Relative) // To store an SP-relative word to memory. // -def SwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b11010, "sw", IIStore>, MayStore; +def SwRxSpImmX16: FEXT_RI16_SP_Store_explicit_ins + <0b11010, "sw", IIStore>, MayStore; // // @@ -1789,3 +1808,6 @@ def : Mips16Pat<(i32 (extloadi8 addr16:$src)), (LbuRxRyOffMemX16 addr16:$src)>; def : Mips16Pat<(i32 (extloadi16 addr16:$src)), (LhuRxRyOffMemX16 addr16:$src)>; + +def: Mips16Pat<(trap), (Break16)>; + diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp index 018f56c..9d0f2c9 100644 --- a/lib/Target/Mips/Mips16RegisterInfo.cpp +++ b/lib/Target/Mips/Mips16RegisterInfo.cpp @@ -134,8 +134,8 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II, DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); - if (!MI.isDebugValue() && ( ((FrameReg != Mips::SP) && !isInt<16>(Offset)) || - ((FrameReg == Mips::SP) && !isInt<15>(Offset)) )) { + if (!MI.isDebugValue() && + !Mips16InstrInfo::validImmediate(MI.getOpcode(), FrameReg, Offset)) { MachineBasicBlock &MBB = *MI.getParent(); DebugLoc DL = II->getDebugLoc(); unsigned NewImm; diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td index df717fe..a752ab8 100644 --- a/lib/Target/Mips/Mips64InstrInfo.td +++ b/lib/Target/Mips/Mips64InstrInfo.td @@ -37,21 +37,15 @@ def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>; let DecoderNamespace = "Mips64" in { multiclass Atomic2Ops64<PatFrag Op> { - def NAME : Atomic2Ops<Op, CPU64Regs, CPURegs>, - Requires<[NotN64, HasStdEnc]>; - def _P8 : Atomic2Ops<Op, CPU64Regs, CPU64Regs>, - Requires<[IsN64, HasStdEnc]> { - let isCodeGenOnly = 1; - } + def NAME : Atomic2Ops<Op, GPR64, GPR32>, Requires<[NotN64, HasStdEnc]>; + def _P8 : Atomic2Ops<Op, GPR64, GPR64>, Requires<[IsN64, HasStdEnc]>; } multiclass AtomicCmpSwap64<PatFrag Op> { - def NAME : AtomicCmpSwap<Op, CPU64Regs, CPURegs>, + def NAME : AtomicCmpSwap<Op, GPR64, GPR32>, Requires<[NotN64, HasStdEnc]>; - def _P8 : AtomicCmpSwap<Op, CPU64Regs, CPU64Regs>, - Requires<[IsN64, HasStdEnc]> { - let isCodeGenOnly = 1; - } + def _P8 : AtomicCmpSwap<Op, GPR64, GPR64>, + Requires<[IsN64, HasStdEnc]>; } } let usesCustomInserter = 1, Predicates = [HasStdEnc], @@ -67,9 +61,9 @@ let usesCustomInserter = 1, Predicates = [HasStdEnc], } /// Pseudo instructions for loading and storing accumulator registers. -let isPseudo = 1 in { - defm LOAD_AC128 : LoadM<"load_ac128", ACRegs128>; - defm STORE_AC128 : StoreM<"store_ac128", ACRegs128>; +let isPseudo = 1, isCodeGenOnly = 1 in { + defm LOAD_AC128 : LoadM<"", ACRegs128>; + defm STORE_AC128 : StoreM<"", ACRegs128>; } //===----------------------------------------------------------------------===// @@ -77,166 +71,179 @@ let isPseudo = 1 in { //===----------------------------------------------------------------------===// let DecoderNamespace = "Mips64" in { /// Arithmetic Instructions (ALU Immediate) -def DADDi : ArithLogicI<"daddi", simm16_64, CPU64RegsOpnd>, ADDI_FM<0x18>; -def DADDiu : ArithLogicI<"daddiu", simm16_64, CPU64RegsOpnd, immSExt16, add>, +def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>; +def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, IIArith, + immSExt16, add>, ADDI_FM<0x19>, IsAsCheapAsAMove; -def DANDi : ArithLogicI<"andi", uimm16_64, CPU64RegsOpnd, immZExt16, and>, - ADDI_FM<0xc>; -def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, CPU64Regs>, + +let isCodeGenOnly = 1 in { +def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, GPR64Opnd>, SLTI_FM<0xa>; -def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, CPU64Regs>, +def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, GPR64Opnd>, SLTI_FM<0xb>; -def ORi64 : ArithLogicI<"ori", uimm16_64, CPU64RegsOpnd, immZExt16, or>, +def ANDi64 : ArithLogicI<"andi", uimm16_64, GPR64Opnd, IILogic, immZExt16, + and>, + ADDI_FM<0xc>; +def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, IILogic, immZExt16, + or>, ADDI_FM<0xd>; -def XORi64 : ArithLogicI<"xori", uimm16_64, CPU64RegsOpnd, immZExt16, xor>, +def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, IILogic, immZExt16, + xor>, ADDI_FM<0xe>; -def LUi64 : LoadUpper<"lui", CPU64Regs, uimm16_64>, LUI_FM; +def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64>, LUI_FM; +} /// Arithmetic Instructions (3-Operand, R-Type) -def DADD : ArithLogicR<"dadd", CPU64RegsOpnd>, ADD_FM<0, 0x2c>; -def DADDu : ArithLogicR<"daddu", CPU64RegsOpnd, 1, IIAlu, add>, +def DADD : ArithLogicR<"dadd", GPR64Opnd>, ADD_FM<0, 0x2c>; +def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, IIArith, add>, ADD_FM<0, 0x2d>; -def DSUBu : ArithLogicR<"dsubu", CPU64RegsOpnd, 0, IIAlu, sub>, +def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, IIArith, sub>, ADD_FM<0, 0x2f>; -def SLT64 : SetCC_R<"slt", setlt, CPU64Regs>, ADD_FM<0, 0x2a>; -def SLTu64 : SetCC_R<"sltu", setult, CPU64Regs>, ADD_FM<0, 0x2b>; -def AND64 : ArithLogicR<"and", CPU64RegsOpnd, 1, IIAlu, and>, ADD_FM<0, 0x24>; -def OR64 : ArithLogicR<"or", CPU64RegsOpnd, 1, IIAlu, or>, ADD_FM<0, 0x25>; -def XOR64 : ArithLogicR<"xor", CPU64RegsOpnd, 1, IIAlu, xor>, ADD_FM<0, 0x26>; -def NOR64 : LogicNOR<"nor", CPU64RegsOpnd>, ADD_FM<0, 0x27>; + +let isCodeGenOnly = 1 in { +def SLT64 : SetCC_R<"slt", setlt, GPR64Opnd>, ADD_FM<0, 0x2a>; +def SLTu64 : SetCC_R<"sltu", setult, GPR64Opnd>, ADD_FM<0, 0x2b>; +def AND64 : ArithLogicR<"and", GPR64Opnd, 1, IIArith, and>, ADD_FM<0, 0x24>; +def OR64 : ArithLogicR<"or", GPR64Opnd, 1, IIArith, or>, ADD_FM<0, 0x25>; +def XOR64 : ArithLogicR<"xor", GPR64Opnd, 1, IIArith, xor>, ADD_FM<0, 0x26>; +def NOR64 : LogicNOR<"nor", GPR64Opnd>, ADD_FM<0, 0x27>; +} /// Shift Instructions -def DSLL : shift_rotate_imm<"dsll", shamt, CPU64RegsOpnd, shl, immZExt6>, +def DSLL : shift_rotate_imm<"dsll", shamt, GPR64Opnd, shl, immZExt6>, SRA_FM<0x38, 0>; -def DSRL : shift_rotate_imm<"dsrl", shamt, CPU64RegsOpnd, srl, immZExt6>, +def DSRL : shift_rotate_imm<"dsrl", shamt, GPR64Opnd, srl, immZExt6>, SRA_FM<0x3a, 0>; -def DSRA : shift_rotate_imm<"dsra", shamt, CPU64RegsOpnd, sra, immZExt6>, +def DSRA : shift_rotate_imm<"dsra", shamt, GPR64Opnd, sra, immZExt6>, SRA_FM<0x3b, 0>; -def DSLLV : shift_rotate_reg<"dsllv", CPU64RegsOpnd, shl>, SRLV_FM<0x14, 0>; -def DSRLV : shift_rotate_reg<"dsrlv", CPU64RegsOpnd, srl>, SRLV_FM<0x16, 0>; -def DSRAV : shift_rotate_reg<"dsrav", CPU64RegsOpnd, sra>, SRLV_FM<0x17, 0>; -def DSLL32 : shift_rotate_imm<"dsll32", shamt, CPU64RegsOpnd>, SRA_FM<0x3c, 0>; -def DSRL32 : shift_rotate_imm<"dsrl32", shamt, CPU64RegsOpnd>, SRA_FM<0x3e, 0>; -def DSRA32 : shift_rotate_imm<"dsra32", shamt, CPU64RegsOpnd>, SRA_FM<0x3f, 0>; -} +def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, shl>, SRLV_FM<0x14, 0>; +def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, srl>, SRLV_FM<0x16, 0>; +def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, sra>, SRLV_FM<0x17, 0>; +def DSLL32 : shift_rotate_imm<"dsll32", shamt, GPR64Opnd>, SRA_FM<0x3c, 0>; +def DSRL32 : shift_rotate_imm<"dsrl32", shamt, GPR64Opnd>, SRA_FM<0x3e, 0>; +def DSRA32 : shift_rotate_imm<"dsra32", shamt, GPR64Opnd>, SRA_FM<0x3f, 0>; + // Rotate Instructions -let Predicates = [HasMips64r2, HasStdEnc], - DecoderNamespace = "Mips64" in { - def DROTR : shift_rotate_imm<"drotr", shamt, CPU64RegsOpnd, rotr, immZExt6>, +let Predicates = [HasMips64r2, HasStdEnc] in { + def DROTR : shift_rotate_imm<"drotr", shamt, GPR64Opnd, rotr, immZExt6>, SRA_FM<0x3a, 1>; - def DROTRV : shift_rotate_reg<"drotrv", CPU64RegsOpnd, rotr>, + def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, rotr>, SRLV_FM<0x16, 1>; } -let DecoderNamespace = "Mips64" in { /// Load and Store Instructions /// aligned -defm LB64 : LoadM<"lb", CPU64Regs, sextloadi8>, LW_FM<0x20>; -defm LBu64 : LoadM<"lbu", CPU64Regs, zextloadi8>, LW_FM<0x24>; -defm LH64 : LoadM<"lh", CPU64Regs, sextloadi16>, LW_FM<0x21>; -defm LHu64 : LoadM<"lhu", CPU64Regs, zextloadi16>, LW_FM<0x25>; -defm LW64 : LoadM<"lw", CPU64Regs, sextloadi32>, LW_FM<0x23>; -defm LWu64 : LoadM<"lwu", CPU64Regs, zextloadi32>, LW_FM<0x27>; -defm SB64 : StoreM<"sb", CPU64Regs, truncstorei8>, LW_FM<0x28>; -defm SH64 : StoreM<"sh", CPU64Regs, truncstorei16>, LW_FM<0x29>; -defm SW64 : StoreM<"sw", CPU64Regs, truncstorei32>, LW_FM<0x2b>; -defm LD : LoadM<"ld", CPU64Regs, load>, LW_FM<0x37>; -defm SD : StoreM<"sd", CPU64Regs, store>, LW_FM<0x3f>; +let isCodeGenOnly = 1 in { +defm LB64 : LoadM<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>; +defm LBu64 : LoadM<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>; +defm LH64 : LoadM<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>; +defm LHu64 : LoadM<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>; +defm LW64 : LoadM<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>; +defm SB64 : StoreM<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>; +defm SH64 : StoreM<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>; +defm SW64 : StoreM<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>; +} + +defm LWu : LoadM<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>; +defm LD : LoadM<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>; +defm SD : StoreM<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>; /// load/store left/right -defm LWL64 : LoadLeftRightM<"lwl", MipsLWL, CPU64Regs>, LW_FM<0x22>; -defm LWR64 : LoadLeftRightM<"lwr", MipsLWR, CPU64Regs>, LW_FM<0x26>; -defm SWL64 : StoreLeftRightM<"swl", MipsSWL, CPU64Regs>, LW_FM<0x2a>; -defm SWR64 : StoreLeftRightM<"swr", MipsSWR, CPU64Regs>, LW_FM<0x2e>; +let isCodeGenOnly = 1 in { +defm LWL64 : LoadLeftRightM<"lwl", MipsLWL, GPR64Opnd>, LW_FM<0x22>; +defm LWR64 : LoadLeftRightM<"lwr", MipsLWR, GPR64Opnd>, LW_FM<0x26>; +defm SWL64 : StoreLeftRightM<"swl", MipsSWL, GPR64Opnd>, LW_FM<0x2a>; +defm SWR64 : StoreLeftRightM<"swr", MipsSWR, GPR64Opnd>, LW_FM<0x2e>; +} -defm LDL : LoadLeftRightM<"ldl", MipsLDL, CPU64Regs>, LW_FM<0x1a>; -defm LDR : LoadLeftRightM<"ldr", MipsLDR, CPU64Regs>, LW_FM<0x1b>; -defm SDL : StoreLeftRightM<"sdl", MipsSDL, CPU64Regs>, LW_FM<0x2c>; -defm SDR : StoreLeftRightM<"sdr", MipsSDR, CPU64Regs>, LW_FM<0x2d>; +defm LDL : LoadLeftRightM<"ldl", MipsLDL, GPR64Opnd>, LW_FM<0x1a>; +defm LDR : LoadLeftRightM<"ldr", MipsLDR, GPR64Opnd>, LW_FM<0x1b>; +defm SDL : StoreLeftRightM<"sdl", MipsSDL, GPR64Opnd>, LW_FM<0x2c>; +defm SDR : StoreLeftRightM<"sdr", MipsSDR, GPR64Opnd>, LW_FM<0x2d>; /// Load-linked, Store-conditional let Predicates = [NotN64, HasStdEnc] in { - def LLD : LLBase<"lld", CPU64RegsOpnd, mem>, LW_FM<0x34>; - def SCD : SCBase<"scd", CPU64RegsOpnd, mem>, LW_FM<0x3c>; + def LLD : LLBase<"lld", GPR64Opnd, mem>, LW_FM<0x34>; + def SCD : SCBase<"scd", GPR64Opnd, mem>, LW_FM<0x3c>; } let Predicates = [IsN64, HasStdEnc], isCodeGenOnly = 1 in { - def LLD_P8 : LLBase<"lld", CPU64RegsOpnd, mem64>, LW_FM<0x34>; - def SCD_P8 : SCBase<"scd", CPU64RegsOpnd, mem64>, LW_FM<0x3c>; + def LLD_P8 : LLBase<"lld", GPR64Opnd, mem64>, LW_FM<0x34>; + def SCD_P8 : SCBase<"scd", GPR64Opnd, mem64>, LW_FM<0x3c>; } /// Jump and Branch Instructions -def JR64 : IndirectBranch<CPU64Regs>, MTLO_FM<8>; -def BEQ64 : CBranch<"beq", seteq, CPU64RegsOpnd>, BEQ_FM<4>; -def BNE64 : CBranch<"bne", setne, CPU64RegsOpnd>, BEQ_FM<5>; -def BGEZ64 : CBranchZero<"bgez", setge, CPU64RegsOpnd>, BGEZ_FM<1, 1>; -def BGTZ64 : CBranchZero<"bgtz", setgt, CPU64RegsOpnd>, BGEZ_FM<7, 0>; -def BLEZ64 : CBranchZero<"blez", setle, CPU64RegsOpnd>, BGEZ_FM<6, 0>; -def BLTZ64 : CBranchZero<"bltz", setlt, CPU64RegsOpnd>, BGEZ_FM<1, 0>; +let isCodeGenOnly = 1 in { +def JR64 : IndirectBranch<GPR64Opnd>, MTLO_FM<8>; +def BEQ64 : CBranch<"beq", seteq, GPR64Opnd>, BEQ_FM<4>; +def BNE64 : CBranch<"bne", setne, GPR64Opnd>, BEQ_FM<5>; +def BGEZ64 : CBranchZero<"bgez", setge, GPR64Opnd>, BGEZ_FM<1, 1>; +def BGTZ64 : CBranchZero<"bgtz", setgt, GPR64Opnd>, BGEZ_FM<7, 0>; +def BLEZ64 : CBranchZero<"blez", setle, GPR64Opnd>, BGEZ_FM<6, 0>; +def BLTZ64 : CBranchZero<"bltz", setlt, GPR64Opnd>, BGEZ_FM<1, 0>; +def JALR64 : JumpLinkReg<"jalr", GPR64Opnd>, JALR_FM; +def JALR64Pseudo : JumpLinkRegPseudo<GPR64Opnd, JALR, RA, GPR32Opnd>; +def TAILCALL64_R : JumpFR<GPR64Opnd, MipsTailCall>, MTLO_FM<8>, IsTailCall; } -let DecoderNamespace = "Mips64" in -def JALR64 : JumpLinkReg<"jalr", CPU64Regs>, JALR_FM; -def JALR64Pseudo : JumpLinkRegPseudo<CPU64Regs, JALR64, RA_64>; -def TAILCALL64_R : JumpFR<CPU64Regs, MipsTailCall>, MTLO_FM<8>, IsTailCall; -let DecoderNamespace = "Mips64" in { /// Multiply and Divide Instructions. -def DMULT : Mult<"dmult", IIImul, CPU64RegsOpnd, [HI64, LO64]>, +def DMULT : Mult<"dmult", IIImult, GPR64Opnd, [HI64, LO64]>, MULT_FM<0, 0x1c>; -def DMULTu : Mult<"dmultu", IIImul, CPU64RegsOpnd, [HI64, LO64]>, +def DMULTu : Mult<"dmultu", IIImult, GPR64Opnd, [HI64, LO64]>, MULT_FM<0, 0x1d>; -def PseudoDMULT : MultDivPseudo<DMULT, ACRegs128, CPU64RegsOpnd, MipsMult, - IIImul>; -def PseudoDMULTu : MultDivPseudo<DMULTu, ACRegs128, CPU64RegsOpnd, MipsMultu, - IIImul>; -def DSDIV : Div<"ddiv", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1e>; -def DUDIV : Div<"ddivu", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1f>; -def PseudoDSDIV : MultDivPseudo<DSDIV, ACRegs128, CPU64RegsOpnd, MipsDivRem, +def PseudoDMULT : MultDivPseudo<DMULT, ACRegs128, GPR64Opnd, MipsMult, + IIImult>; +def PseudoDMULTu : MultDivPseudo<DMULTu, ACRegs128, GPR64Opnd, MipsMultu, + IIImult>; +def DSDIV : Div<"ddiv", IIIdiv, GPR64Opnd, [HI64, LO64]>, MULT_FM<0, 0x1e>; +def DUDIV : Div<"ddivu", IIIdiv, GPR64Opnd, [HI64, LO64]>, MULT_FM<0, 0x1f>; +def PseudoDSDIV : MultDivPseudo<DSDIV, ACRegs128, GPR64Opnd, MipsDivRem, IIIdiv, 0, 1, 1>; -def PseudoDUDIV : MultDivPseudo<DUDIV, ACRegs128, CPU64RegsOpnd, MipsDivRemU, +def PseudoDUDIV : MultDivPseudo<DUDIV, ACRegs128, GPR64Opnd, MipsDivRemU, IIIdiv, 0, 1, 1>; -def MTHI64 : MoveToLOHI<"mthi", CPU64Regs, [HI64]>, MTLO_FM<0x11>; -def MTLO64 : MoveToLOHI<"mtlo", CPU64Regs, [LO64]>, MTLO_FM<0x13>; -def MFHI64 : MoveFromLOHI<"mfhi", CPU64Regs, [HI64]>, MFLO_FM<0x10>; -def MFLO64 : MoveFromLOHI<"mflo", CPU64Regs, [LO64]>, MFLO_FM<0x12>; +let isCodeGenOnly = 1 in { +def MTHI64 : MoveToLOHI<"mthi", GPR64Opnd, [HI64]>, MTLO_FM<0x11>; +def MTLO64 : MoveToLOHI<"mtlo", GPR64Opnd, [LO64]>, MTLO_FM<0x13>; +def MFHI64 : MoveFromLOHI<"mfhi", GPR64Opnd, [HI64]>, MFLO_FM<0x10>; +def MFLO64 : MoveFromLOHI<"mflo", GPR64Opnd, [LO64]>, MFLO_FM<0x12>; /// Sign Ext In Register Instructions. -def SEB64 : SignExtInReg<"seb", i8, CPU64Regs>, SEB_FM<0x10, 0x20>; -def SEH64 : SignExtInReg<"seh", i16, CPU64Regs>, SEB_FM<0x18, 0x20>; +def SEB64 : SignExtInReg<"seb", i8, GPR64Opnd>, SEB_FM<0x10, 0x20>; +def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd>, SEB_FM<0x18, 0x20>; +} /// Count Leading -def DCLZ : CountLeading0<"dclz", CPU64RegsOpnd>, CLO_FM<0x24>; -def DCLO : CountLeading1<"dclo", CPU64RegsOpnd>, CLO_FM<0x25>; +def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>; +def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>; /// Double Word Swap Bytes/HalfWords -def DSBH : SubwordSwap<"dsbh", CPU64RegsOpnd>, SEB_FM<2, 0x24>; -def DSHD : SubwordSwap<"dshd", CPU64RegsOpnd>, SEB_FM<5, 0x24>; +def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>; +def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>; -def LEA_ADDiu64 : EffectiveAddress<"daddiu", CPU64Regs, mem_ea_64>, LW_FM<0x19>; +def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd, mem_ea_64>, LW_FM<0x19>; -} -let DecoderNamespace = "Mips64" in { -def RDHWR64 : ReadHardware<CPU64Regs, HW64RegsOpnd>, RDHWR_FM; +let isCodeGenOnly = 1 in +def RDHWR64 : ReadHardware<GPR64Opnd, HW64RegsOpnd>, RDHWR_FM; -def DEXT : ExtBase<"dext", CPU64RegsOpnd>, EXT_FM<3>; +def DEXT : ExtBase<"dext", GPR64Opnd>, EXT_FM<3>; let Pattern = []<dag> in { - def DEXTU : ExtBase<"dextu", CPU64RegsOpnd>, EXT_FM<2>; - def DEXTM : ExtBase<"dextm", CPU64RegsOpnd>, EXT_FM<1>; + def DEXTU : ExtBase<"dextu", GPR64Opnd>, EXT_FM<2>; + def DEXTM : ExtBase<"dextm", GPR64Opnd>, EXT_FM<1>; } -def DINS : InsBase<"dins", CPU64RegsOpnd>, EXT_FM<7>; +def DINS : InsBase<"dins", GPR64Opnd>, EXT_FM<7>; let Pattern = []<dag> in { - def DINSU : InsBase<"dinsu", CPU64RegsOpnd>, EXT_FM<6>; - def DINSM : InsBase<"dinsm", CPU64RegsOpnd>, EXT_FM<5>; + def DINSU : InsBase<"dinsu", GPR64Opnd>, EXT_FM<6>; + def DINSM : InsBase<"dinsm", GPR64Opnd>, EXT_FM<5>; } let isCodeGenOnly = 1, rs = 0, shamt = 0 in { - def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt), - "dsll\t$rd, $rt, 32", [], IIAlu>; - def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt), - "sll\t$rd, $rt, 0", [], IIAlu>; - def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt), - "sll\t$rd, $rt, 0", [], IIAlu>; + def DSLL64_32 : FR<0x00, 0x3c, (outs GPR64:$rd), (ins GPR32:$rt), + "dsll\t$rd, $rt, 32", [], IIArith>; + def SLL64_32 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR32:$rt), + "sll\t$rd, $rt, 0", [], IIArith>; + def SLL64_64 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR64:$rt), + "sll\t$rd, $rt, 0", [], IIArith>; } } //===----------------------------------------------------------------------===// @@ -273,25 +280,25 @@ def : MipsPat<(MipsLo tglobaltlsaddr:$in), (DADDiu ZERO_64, tglobaltlsaddr:$in)>; def : MipsPat<(MipsLo texternalsym:$in), (DADDiu ZERO_64, texternalsym:$in)>; -def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)), - (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>; -def : MipsPat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)), - (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>; -def : MipsPat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)), - (DADDiu CPU64Regs:$hi, tjumptable:$lo)>; -def : MipsPat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)), - (DADDiu CPU64Regs:$hi, tconstpool:$lo)>; -def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)), - (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>; - -def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>; -def : WrapperPat<tconstpool, DADDiu, CPU64Regs>; -def : WrapperPat<texternalsym, DADDiu, CPU64Regs>; -def : WrapperPat<tblockaddress, DADDiu, CPU64Regs>; -def : WrapperPat<tjumptable, DADDiu, CPU64Regs>; -def : WrapperPat<tglobaltlsaddr, DADDiu, CPU64Regs>; - -defm : BrcondPats<CPU64Regs, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64, +def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)), + (DADDiu GPR64:$hi, tglobaladdr:$lo)>; +def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)), + (DADDiu GPR64:$hi, tblockaddress:$lo)>; +def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)), + (DADDiu GPR64:$hi, tjumptable:$lo)>; +def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)), + (DADDiu GPR64:$hi, tconstpool:$lo)>; +def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)), + (DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>; + +def : WrapperPat<tglobaladdr, DADDiu, GPR64>; +def : WrapperPat<tconstpool, DADDiu, GPR64>; +def : WrapperPat<texternalsym, DADDiu, GPR64>; +def : WrapperPat<tblockaddress, DADDiu, GPR64>; +def : WrapperPat<tjumptable, DADDiu, GPR64>; +def : WrapperPat<tglobaltlsaddr, DADDiu, GPR64>; + +defm : BrcondPats<GPR64, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64, ZERO_64>; def : MipsPat<(brcond (i32 (setlt i64:$lhs, 1)), bb:$dst), @@ -300,28 +307,28 @@ def : MipsPat<(brcond (i32 (setgt i64:$lhs, -1)), bb:$dst), (BGEZ64 i64:$lhs, bb:$dst)>; // setcc patterns -defm : SeteqPats<CPU64Regs, SLTiu64, XOR64, SLTu64, ZERO_64>; -defm : SetlePats<CPU64Regs, SLT64, SLTu64>; -defm : SetgtPats<CPU64Regs, SLT64, SLTu64>; -defm : SetgePats<CPU64Regs, SLT64, SLTu64>; -defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>; +defm : SeteqPats<GPR64, SLTiu64, XOR64, SLTu64, ZERO_64>; +defm : SetlePats<GPR64, SLT64, SLTu64>; +defm : SetgtPats<GPR64, SLT64, SLTu64>; +defm : SetgePats<GPR64, SLT64, SLTu64>; +defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>; // truncate -def : MipsPat<(i32 (trunc CPU64Regs:$src)), - (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, +def : MipsPat<(i32 (trunc GPR64:$src)), + (SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>, Requires<[IsN64, HasStdEnc]>; // 32-to-64-bit extension -def : MipsPat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>; -def : MipsPat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>; -def : MipsPat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>; +def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>; +def : MipsPat<(i64 (zext GPR32:$src)), (DSRL (DSLL64_32 GPR32:$src), 32)>; +def : MipsPat<(i64 (sext GPR32:$src)), (SLL64_32 GPR32:$src)>; // Sign extend in register -def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)), - (SLL64_64 CPU64Regs:$src)>; +def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)), + (SLL64_64 GPR64:$src)>; // bswap MipsPattern -def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>; +def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>; // mflo/hi patterns. def : MipsPat<(i64 (ExtractLOHI ACRegs128:$ac, imm:$lohi_idx)), @@ -331,71 +338,38 @@ def : MipsPat<(i64 (ExtractLOHI ACRegs128:$ac, imm:$lohi_idx)), // Instruction aliases //===----------------------------------------------------------------------===// def : InstAlias<"move $dst, $src", - (DADDu CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>, - Requires<[HasMips64]>; -def : InstAlias<"move $dst, $src", - (OR64 CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>, - Requires<[HasMips64]>; -def : InstAlias<"and $rs, $rt, $imm", - (DANDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm), - 1>, - Requires<[HasMips64]>; -def : InstAlias<"slt $rs, $rt, $imm", - (SLTi64 CPURegsOpnd:$rs, CPU64Regs:$rt, simm16_64:$imm), 1>, - Requires<[HasMips64]>; -def : InstAlias<"xor $rs, $rt, $imm", - (XORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm), - 1>, - Requires<[HasMips64]>; -def : InstAlias<"not $rt, $rs", - (NOR64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rs, ZERO_64), 1>, - Requires<[HasMips64]>; -def : InstAlias<"j $rs", (JR64 CPU64Regs:$rs), 0>, Requires<[HasMips64]>; -def : InstAlias<"jalr $rs", (JALR64 RA_64, CPU64Regs:$rs)>, + (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>, Requires<[HasMips64]>; -def : InstAlias<"jal $rs", (JALR64 RA_64, CPU64Regs:$rs), 0>, - Requires<[HasMips64]>; -def : InstAlias<"jal $rd,$rs", (JALR64 CPU64Regs:$rd, CPU64Regs:$rs), 0>, - Requires<[HasMips64]>; def : InstAlias<"daddu $rs, $rt, $imm", - (DADDiu CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm), - 1>; + (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm), + 0>; def : InstAlias<"dadd $rs, $rt, $imm", - (DADDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm), - 1>; -def : InstAlias<"or $rs, $rt, $imm", - (ORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm), - 1>, Requires<[HasMips64]>; -def : InstAlias<"bnez $rs,$offset", - (BNE64 CPU64RegsOpnd:$rs, ZERO_64, brtarget:$offset), 1>, - Requires<[HasMips64]>; -def : InstAlias<"beqz $rs,$offset", - (BEQ64 CPU64RegsOpnd:$rs, ZERO_64, brtarget:$offset), 1>, - Requires<[HasMips64]>; + (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm), + 0>; /// Move between CPU and coprocessor registers let DecoderNamespace = "Mips64" in { -def DMFC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt), - (ins CPU64RegsOpnd:$rd, uimm16:$sel), +def DMFC0_3OP64 : MFC3OP<(outs GPR64Opnd:$rt), + (ins GPR64Opnd:$rd, uimm16:$sel), "dmfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 1>; -def DMTC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel), - (ins CPU64RegsOpnd:$rt), +def DMTC0_3OP64 : MFC3OP<(outs GPR64Opnd:$rd, uimm16:$sel), + (ins GPR64Opnd:$rt), "dmtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 5>; -def DMFC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt), - (ins CPU64RegsOpnd:$rd, uimm16:$sel), +def DMFC2_3OP64 : MFC3OP<(outs GPR64Opnd:$rt), + (ins GPR64Opnd:$rd, uimm16:$sel), "dmfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 1>; -def DMTC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel), - (ins CPU64RegsOpnd:$rt), +def DMTC2_3OP64 : MFC3OP<(outs GPR64Opnd:$rd, uimm16:$sel), + (ins GPR64Opnd:$rt), "dmtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 5>; } // Two operand (implicit 0 selector) versions: def : InstAlias<"dmfc0 $rt, $rd", - (DMFC0_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>; + (DMFC0_3OP64 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>; def : InstAlias<"dmtc0 $rt, $rd", - (DMTC0_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>; + (DMTC0_3OP64 GPR64Opnd:$rd, 0, GPR64Opnd:$rt), 0>; def : InstAlias<"dmfc2 $rt, $rd", - (DMFC2_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>; + (DMFC2_3OP64 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>; def : InstAlias<"dmtc2 $rt, $rd", - (DMTC2_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>; + (DMTC2_3OP64 GPR64Opnd:$rd, 0, GPR64Opnd:$rt), 0>; diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 6e4feda..1dc3326 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -141,7 +141,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) { const MachineFrameInfo *MFI = MF->getFrameInfo(); const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); // size of stack area to which FP callee-saved regs are saved. - unsigned CPURegSize = Mips::CPURegsRegClass.getSize(); + unsigned CPURegSize = Mips::GPR32RegClass.getSize(); unsigned FGR32RegSize = Mips::FGR32RegClass.getSize(); unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize(); bool HasAFGR64Reg = false; @@ -151,7 +151,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) { // Set FPU Bitmask. for (i = 0; i != e; ++i) { unsigned Reg = CSI[i].getReg(); - if (Mips::CPURegsRegClass.contains(Reg)) + if (Mips::GPR32RegClass.contains(Reg)) break; unsigned RegNum = TM.getRegisterInfo()->getEncodingValue(Reg); @@ -557,6 +557,15 @@ printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O, void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) { // FIXME: Use SwitchSection. + // TODO: Need to add -mabicalls and -mno-abicalls flags. + // Currently we assume that -mabicalls is the default. + if (OutStreamer.hasRawTextSupport()) { + OutStreamer.EmitRawText(StringRef("\t.abicalls")); + Reloc::Model RM = Subtarget->getRelocationModel(); + if (RM == Reloc::Static && !Subtarget->hasMips64()) + OutStreamer.EmitRawText(StringRef("\t.option\tpic0")); + } + // Tell the assembler which ABI we are using if (OutStreamer.hasRawTextSupport()) OutStreamer.EmitRawText("\t.section .mdebug." + @@ -589,16 +598,6 @@ void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) { MES->emitELFHeaderFlagsCG(*Subtarget); } -MachineLocation -MipsAsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { - // Handles frame addresses emitted in MipsInstrInfo::emitFrameIndexDebugValue. - assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!"); - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() && - "Unexpected MachineOperand types"); - return MachineLocation(MI->getOperand(0).getReg(), - MI->getOperand(1).getImm()); -} - void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS) { // TODO: implement diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h index dbdaf26..4d1d624 100644 --- a/lib/Target/Mips/MipsAsmPrinter.h +++ b/lib/Target/Mips/MipsAsmPrinter.h @@ -81,7 +81,6 @@ public: const char *Modifier = 0); void EmitStartOfAsmFile(Module &M); void EmitEndOfAsmFile(Module &M); - virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const; void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); }; } diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td index 42e4c99..39862b3 100644 --- a/lib/Target/Mips/MipsCondMov.td +++ b/lib/Target/Mips/MipsCondMov.td @@ -16,7 +16,7 @@ // MipsISelLowering::EmitInstrWithCustomInserter if target does not have // conditional move instructions. // cond:int, data:int -class CMov_I_I_FT<string opstr, RegisterClass CRC, RegisterClass DRC, +class CMov_I_I_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC, InstrItinClass Itin> : InstSE<(outs DRC:$rd), (ins DRC:$rs, CRC:$rt, DRC:$F), !strconcat(opstr, "\t$rd, $rs, $rt"), [], Itin, FrmFR> { @@ -24,7 +24,7 @@ class CMov_I_I_FT<string opstr, RegisterClass CRC, RegisterClass DRC, } // cond:int, data:float -class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC, +class CMov_I_F_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC, InstrItinClass Itin> : InstSE<(outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F), !strconcat(opstr, "\t$fd, $fs, $rt"), [], Itin, FrmFR> { @@ -32,22 +32,22 @@ class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC, } // cond:float, data:int -class CMov_F_I_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class CMov_F_I_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : - InstSE<(outs RC:$rd), (ins RC:$rs, RC:$F), - !strconcat(opstr, "\t$rd, $rs, $$fcc0"), - [(set RC:$rd, (OpNode RC:$rs, RC:$F))], Itin, FrmFR> { - let Uses = [FCR31]; + InstSE<(outs RC:$rd), (ins RC:$rs, FCCRegsOpnd:$fcc, RC:$F), + !strconcat(opstr, "\t$rd, $rs, $fcc"), + [(set RC:$rd, (OpNode RC:$rs, FCCRegsOpnd:$fcc, RC:$F))], + Itin, FrmFR> { let Constraints = "$F = $rd"; } // cond:float, data:float -class CMov_F_F_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class CMov_F_F_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : - InstSE<(outs RC:$fd), (ins RC:$fs, RC:$F), - !strconcat(opstr, "\t$fd, $fs, $$fcc0"), - [(set RC:$fd, (OpNode RC:$fs, RC:$F))], Itin, FrmFR> { - let Uses = [FCR31]; + InstSE<(outs RC:$fd), (ins RC:$fs, FCCRegsOpnd:$fcc, RC:$F), + !strconcat(opstr, "\t$fd, $fs, $fcc"), + [(set RC:$fd, (OpNode RC:$fs, FCCRegsOpnd:$fcc, RC:$F))], + Itin, FrmFR> { let Constraints = "$F = $fd"; } @@ -103,151 +103,140 @@ multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst, } // Instantiation of instructions. -def MOVZ_I_I : CMov_I_I_FT<"movz", CPURegs, CPURegs, NoItinerary>, +def MOVZ_I_I : CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd, NoItinerary>, ADD_FM<0, 0xa>; -let Predicates = [HasStdEnc], - DecoderNamespace = "Mips64" in { - def MOVZ_I_I64 : CMov_I_I_FT<"movz", CPURegs, CPU64Regs, NoItinerary>, - ADD_FM<0, 0xa>; - def MOVZ_I64_I : CMov_I_I_FT<"movz", CPU64Regs, CPURegs, NoItinerary>, - ADD_FM<0, 0xa> { - let isCodeGenOnly = 1; - } - def MOVZ_I64_I64 : CMov_I_I_FT<"movz", CPU64Regs, CPU64Regs, NoItinerary>, - ADD_FM<0, 0xa> { - let isCodeGenOnly = 1; - } -} - -def MOVN_I_I : CMov_I_I_FT<"movn", CPURegs, CPURegs, NoItinerary>, - ADD_FM<0, 0xb>; -let Predicates = [HasStdEnc], - DecoderNamespace = "Mips64" in { - def MOVN_I_I64 : CMov_I_I_FT<"movn", CPURegs, CPU64Regs, NoItinerary>, - ADD_FM<0, 0xb>; - def MOVN_I64_I : CMov_I_I_FT<"movn", CPU64Regs, CPURegs, NoItinerary>, - ADD_FM<0, 0xb> { - let isCodeGenOnly = 1; - } - def MOVN_I64_I64 : CMov_I_I_FT<"movn", CPU64Regs, CPU64Regs, NoItinerary>, - ADD_FM<0, 0xb> { - let isCodeGenOnly = 1; - } -} - -def MOVZ_I_S : CMov_I_F_FT<"movz.s", CPURegs, FGR32, IIFmove>, - CMov_I_F_FM<18, 16>; -def MOVZ_I64_S : CMov_I_F_FT<"movz.s", CPU64Regs, FGR32, IIFmove>, - CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; + +let Predicates = [HasStdEnc], isCodeGenOnly = 1 in { + def MOVZ_I_I64 : CMov_I_I_FT<"movz", GPR32Opnd, GPR64Opnd, + NoItinerary>, ADD_FM<0, 0xa>; + def MOVZ_I64_I : CMov_I_I_FT<"movz", GPR64Opnd, GPR32Opnd, + NoItinerary>, ADD_FM<0, 0xa>; + def MOVZ_I64_I64 : CMov_I_I_FT<"movz", GPR64Opnd, GPR64Opnd, + NoItinerary>, ADD_FM<0, 0xa>; } -def MOVN_I_S : CMov_I_F_FT<"movn.s", CPURegs, FGR32, IIFmove>, - CMov_I_F_FM<19, 16>; -def MOVN_I64_S : CMov_I_F_FT<"movn.s", CPU64Regs, FGR32, IIFmove>, - CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; +def MOVN_I_I : CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd, + NoItinerary>, ADD_FM<0, 0xb>; + +let Predicates = [HasStdEnc], isCodeGenOnly = 1 in { + def MOVN_I_I64 : CMov_I_I_FT<"movn", GPR32Opnd, GPR64Opnd, + NoItinerary>, ADD_FM<0, 0xb>; + def MOVN_I64_I : CMov_I_I_FT<"movn", GPR64Opnd, GPR32Opnd, + NoItinerary>, ADD_FM<0, 0xb>; + def MOVN_I64_I64 : CMov_I_I_FT<"movn", GPR64Opnd, GPR64Opnd, + NoItinerary>, ADD_FM<0, 0xb>; } +def MOVZ_I_S : CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32RegsOpnd, IIFmove>, + CMov_I_F_FM<18, 16>; + +let isCodeGenOnly = 1 in +def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32RegsOpnd, IIFmove>, + CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]>; + +def MOVN_I_S : CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32RegsOpnd, IIFmove>, + CMov_I_F_FM<19, 16>; + +let isCodeGenOnly = 1 in +def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32RegsOpnd, IIFmove>, + CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]>; + let Predicates = [NotFP64bit, HasStdEnc] in { - def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", CPURegs, AFGR64, IIFmove>, + def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64RegsOpnd, IIFmove>, CMov_I_F_FM<18, 17>; - def MOVN_I_D32 : CMov_I_F_FT<"movn.d", CPURegs, AFGR64, IIFmove>, + def MOVN_I_D32 : CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64RegsOpnd, IIFmove>, CMov_I_F_FM<19, 17>; } -let Predicates = [IsFP64bit, HasStdEnc], - DecoderNamespace = "Mips64" in { - def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", CPURegs, FGR64, IIFmove>, + +let Predicates = [IsFP64bit, HasStdEnc], isCodeGenOnly = 1 in { + def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", GPR32Opnd, FGR64RegsOpnd, IIFmove>, CMov_I_F_FM<18, 17>; - def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", CPU64Regs, FGR64, IIFmove>, - CMov_I_F_FM<18, 17> { - let isCodeGenOnly = 1; - } - def MOVN_I_D64 : CMov_I_F_FT<"movn.d", CPURegs, FGR64, IIFmove>, + def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", GPR64Opnd, FGR64RegsOpnd, + IIFmove>, CMov_I_F_FM<18, 17>; + def MOVN_I_D64 : CMov_I_F_FT<"movn.d", GPR32Opnd, FGR64RegsOpnd, IIFmove>, CMov_I_F_FM<19, 17>; - def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", CPU64Regs, FGR64, IIFmove>, - CMov_I_F_FM<19, 17> { - let isCodeGenOnly = 1; - } + def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", GPR64Opnd, FGR64RegsOpnd, + IIFmove>, CMov_I_F_FM<19, 17>; } -def MOVT_I : CMov_F_I_FT<"movt", CPURegs, IIAlu, MipsCMovFP_T>, CMov_F_I_FM<1>; -def MOVT_I64 : CMov_F_I_FT<"movt", CPU64Regs, IIAlu, MipsCMovFP_T>, - CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; -} +def MOVT_I : CMov_F_I_FT<"movt", GPR32Opnd, IIArith, MipsCMovFP_T>, + CMov_F_I_FM<1>; -def MOVF_I : CMov_F_I_FT<"movf", CPURegs, IIAlu, MipsCMovFP_F>, CMov_F_I_FM<0>; -def MOVF_I64 : CMov_F_I_FT<"movf", CPU64Regs, IIAlu, MipsCMovFP_F>, - CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; -} +let isCodeGenOnly = 1 in +def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, IIArith, MipsCMovFP_T>, + CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]>; + +def MOVF_I : CMov_F_I_FT<"movf", GPR32Opnd, IIArith, MipsCMovFP_F>, + CMov_F_I_FM<0>; + +let isCodeGenOnly = 1 in +def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, IIArith, MipsCMovFP_F>, + CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]>; -def MOVT_S : CMov_F_F_FT<"movt.s", FGR32, IIFmove, MipsCMovFP_T>, +def MOVT_S : CMov_F_F_FT<"movt.s", FGR32RegsOpnd, IIFmove, MipsCMovFP_T>, CMov_F_F_FM<16, 1>; -def MOVF_S : CMov_F_F_FT<"movf.s", FGR32, IIFmove, MipsCMovFP_F>, +def MOVF_S : CMov_F_F_FT<"movf.s", FGR32RegsOpnd, IIFmove, MipsCMovFP_F>, CMov_F_F_FM<16, 0>; let Predicates = [NotFP64bit, HasStdEnc] in { - def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64, IIFmove, MipsCMovFP_T>, + def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64RegsOpnd, IIFmove, MipsCMovFP_T>, CMov_F_F_FM<17, 1>; - def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64, IIFmove, MipsCMovFP_F>, + def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64RegsOpnd, IIFmove, MipsCMovFP_F>, CMov_F_F_FM<17, 0>; } -let Predicates = [IsFP64bit, HasStdEnc], - DecoderNamespace = "Mips64" in { - def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64, IIFmove, MipsCMovFP_T>, +let Predicates = [IsFP64bit, HasStdEnc], isCodeGenOnly = 1 in { + def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64RegsOpnd, IIFmove, MipsCMovFP_T>, CMov_F_F_FM<17, 1>; - def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64, IIFmove, MipsCMovFP_F>, + def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64RegsOpnd, IIFmove, MipsCMovFP_F>, CMov_F_F_FM<17, 0>; } // Instantiation of conditional move patterns. -defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>; -defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>; -defm : MovzPats2<CPURegs, CPURegs, MOVZ_I_I, XORi>; +defm : MovzPats0<GPR32, GPR32, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>; +defm : MovzPats1<GPR32, GPR32, MOVZ_I_I, XOR>; +defm : MovzPats2<GPR32, GPR32, MOVZ_I_I, XORi>; let Predicates = [HasMips64, HasStdEnc] in { - defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>; - defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64, + defm : MovzPats0<GPR32, GPR64, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats0<GPR64, GPR32, MOVZ_I_I, SLT64, SLTu64, SLTi64, SLTiu64>; - defm : MovzPats0<CPU64Regs, CPU64Regs, MOVZ_I_I64, SLT64, SLTu64, SLTi64, + defm : MovzPats0<GPR64, GPR64, MOVZ_I_I64, SLT64, SLTu64, SLTi64, SLTiu64>; - defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>; - defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>; - defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>; - defm : MovzPats2<CPURegs, CPU64Regs, MOVZ_I_I64, XORi>; - defm : MovzPats2<CPU64Regs, CPURegs, MOVZ_I64_I, XORi64>; - defm : MovzPats2<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XORi64>; + defm : MovzPats1<GPR32, GPR64, MOVZ_I_I64, XOR>; + defm : MovzPats1<GPR64, GPR32, MOVZ_I64_I, XOR64>; + defm : MovzPats1<GPR64, GPR64, MOVZ_I64_I64, XOR64>; + defm : MovzPats2<GPR32, GPR64, MOVZ_I_I64, XORi>; + defm : MovzPats2<GPR64, GPR32, MOVZ_I64_I, XORi64>; + defm : MovzPats2<GPR64, GPR64, MOVZ_I64_I64, XORi64>; } -defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>; +defm : MovnPats<GPR32, GPR32, MOVN_I_I, XOR>; let Predicates = [HasMips64, HasStdEnc] in { - defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>; - defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>; - defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>; + defm : MovnPats<GPR32, GPR64, MOVN_I_I64, XOR>; + defm : MovnPats<GPR64, GPR32, MOVN_I64_I, XOR64>; + defm : MovnPats<GPR64, GPR64, MOVN_I64_I64, XOR64>; } -defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>; -defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>; -defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>; +defm : MovzPats0<GPR32, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>; +defm : MovzPats1<GPR32, FGR32, MOVZ_I_S, XOR>; +defm : MovnPats<GPR32, FGR32, MOVN_I_S, XOR>; let Predicates = [HasMips64, HasStdEnc] in { - defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64, + defm : MovzPats0<GPR64, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64, SLTiu64>; - defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>; - defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>; + defm : MovzPats1<GPR64, FGR32, MOVZ_I64_S, XOR64>; + defm : MovnPats<GPR64, FGR32, MOVN_I64_S, XOR64>; } let Predicates = [NotFP64bit, HasStdEnc] in { - defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>; - defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>; - defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>; + defm : MovzPats0<GPR32, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats1<GPR32, AFGR64, MOVZ_I_D32, XOR>; + defm : MovnPats<GPR32, AFGR64, MOVN_I_D32, XOR>; } let Predicates = [IsFP64bit, HasStdEnc] in { - defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>; - defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64, + defm : MovzPats0<GPR32, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats0<GPR64, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64, SLTiu64>; - defm : MovzPats1<CPURegs, FGR64, MOVZ_I_D64, XOR>; - defm : MovzPats1<CPU64Regs, FGR64, MOVZ_I64_D64, XOR64>; - defm : MovnPats<CPURegs, FGR64, MOVN_I_D64, XOR>; - defm : MovnPats<CPU64Regs, FGR64, MOVN_I64_D64, XOR64>; + defm : MovzPats1<GPR32, FGR64, MOVZ_I_D64, XOR>; + defm : MovzPats1<GPR64, FGR64, MOVZ_I64_D64, XOR64>; + defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>; + defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>; } diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td index c12878a..526821a 100644 --- a/lib/Target/Mips/MipsDSPInstrInfo.td +++ b/lib/Target/Mips/MipsDSPInstrInfo.td @@ -328,9 +328,9 @@ class REPL_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class SHLL_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin, RegisterClass RC> { dag OutOperandList = (outs RC:$rd); - dag InOperandList = (ins RC:$rt, CPURegs:$rs_sa); + dag InOperandList = (ins RC:$rt, GPR32:$rs_sa); string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa"); - list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs_sa))]; + list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, GPR32:$rs_sa))]; InstrItinClass Itinerary = itin; } @@ -347,11 +347,11 @@ class SHLL_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class LX_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rd); - dag InOperandList = (ins CPURegs:$base, CPURegs:$index); + dag OutOperandList = (outs GPR32:$rd); + dag InOperandList = (ins GPR32:$base, GPR32:$index); string AsmString = !strconcat(instr_asm, "\t$rd, ${index}(${base})"); - list<dag> Pattern = [(set CPURegs:$rd, - (OpNode CPURegs:$base, CPURegs:$index))]; + list<dag> Pattern = [(set GPR32:$rd, + (OpNode GPR32:$base, GPR32:$index))]; InstrItinClass Itinerary = itin; bit mayLoad = 1; } @@ -368,26 +368,26 @@ class ADDUH_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class APPEND_DESC_BASE<string instr_asm, SDPatternOperator OpNode, SDPatternOperator ImmOp, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rt); - dag InOperandList = (ins CPURegs:$rs, shamt:$sa, CPURegs:$src); + dag OutOperandList = (outs GPR32:$rt); + dag InOperandList = (ins GPR32:$rs, shamt:$sa, GPR32:$src); string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa"); - list<dag> Pattern = [(set CPURegs:$rt, - (OpNode CPURegs:$src, CPURegs:$rs, ImmOp:$sa))]; + list<dag> Pattern = [(set GPR32:$rt, + (OpNode GPR32:$src, GPR32:$rs, ImmOp:$sa))]; InstrItinClass Itinerary = itin; string Constraints = "$src = $rt"; } class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rt); - dag InOperandList = (ins ACRegsDSP:$ac, CPURegs:$shift_rs); + dag OutOperandList = (outs GPR32:$rt); + dag InOperandList = (ins ACRegsDSP:$ac, GPR32:$shift_rs); string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); InstrItinClass Itinerary = itin; } class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rt); + dag OutOperandList = (outs GPR32:$rt); dag InOperandList = (ins ACRegsDSP:$ac, uimm16:$shift_rs); string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); InstrItinClass Itinerary = itin; @@ -404,55 +404,55 @@ class SHILO_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { class SHILO_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { dag OutOperandList = (outs ACRegsDSP:$ac); - dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin); + dag InOperandList = (ins GPR32:$rs, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$ac, $rs"); list<dag> Pattern = [(set ACRegsDSP:$ac, - (OpNode CPURegs:$rs, ACRegsDSP:$acin))]; + (OpNode GPR32:$rs, ACRegsDSP:$acin))]; string Constraints = "$acin = $ac"; } class MTHLIP_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { dag OutOperandList = (outs ACRegsDSP:$ac); - dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin); + dag InOperandList = (ins GPR32:$rs, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$rs, $ac"); list<dag> Pattern = [(set ACRegsDSP:$ac, - (OpNode CPURegs:$rs, ACRegsDSP:$acin))]; + (OpNode GPR32:$rs, ACRegsDSP:$acin))]; string Constraints = "$acin = $ac"; } class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rd); + dag OutOperandList = (outs GPR32:$rd); dag InOperandList = (ins uimm16:$mask); string AsmString = !strconcat(instr_asm, "\t$rd, $mask"); - list<dag> Pattern = [(set CPURegs:$rd, (OpNode immZExt10:$mask))]; + list<dag> Pattern = [(set GPR32:$rd, (OpNode immZExt10:$mask))]; InstrItinClass Itinerary = itin; } class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { dag OutOperandList = (outs); - dag InOperandList = (ins CPURegs:$rs, uimm16:$mask); + dag InOperandList = (ins GPR32:$rs, uimm16:$mask); string AsmString = !strconcat(instr_asm, "\t$rs, $mask"); - list<dag> Pattern = [(OpNode CPURegs:$rs, immZExt10:$mask)]; + list<dag> Pattern = [(OpNode GPR32:$rs, immZExt10:$mask)]; InstrItinClass Itinerary = itin; } class DPA_W_PH_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { dag OutOperandList = (outs ACRegsDSP:$ac); - dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin); + dag InOperandList = (ins GPR32:$rs, GPR32:$rt, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); list<dag> Pattern = [(set ACRegsDSP:$ac, - (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))]; + (OpNode GPR32:$rs, GPR32:$rt, ACRegsDSP:$acin))]; string Constraints = "$acin = $ac"; } class MULT_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { dag OutOperandList = (outs ACRegsDSP:$ac); - dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt); + dag InOperandList = (ins GPR32:$rs, GPR32:$rt); string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); - list<dag> Pattern = [(set ACRegsDSP:$ac, (OpNode CPURegs:$rs, CPURegs:$rt))]; + list<dag> Pattern = [(set ACRegsDSP:$ac, (OpNode GPR32:$rs, GPR32:$rt))]; InstrItinClass Itinerary = itin; int AddedComplexity = 20; bit isCommutable = 1; @@ -461,17 +461,17 @@ class MULT_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class MADD_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { dag OutOperandList = (outs ACRegsDSP:$ac); - dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin); + dag InOperandList = (ins GPR32:$rs, GPR32:$rt, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); list<dag> Pattern = [(set ACRegsDSP:$ac, - (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))]; + (OpNode GPR32:$rs, GPR32:$rt, ACRegsDSP:$acin))]; InstrItinClass Itinerary = itin; int AddedComplexity = 20; string Constraints = "$acin = $ac"; } class MFHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rd); + dag OutOperandList = (outs GPR32:$rd); dag InOperandList = (ins RC:$ac); string AsmString = !strconcat(instr_asm, "\t$rd, $ac"); InstrItinClass Itinerary = itin; @@ -479,13 +479,13 @@ class MFHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> { class MTHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> { dag OutOperandList = (outs RC:$ac); - dag InOperandList = (ins CPURegs:$rs); + dag InOperandList = (ins GPR32:$rs); string AsmString = !strconcat(instr_asm, "\t$rs, $ac"); InstrItinClass Itinerary = itin; } class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> : - MipsPseudo<(outs CPURegs:$dst), (ins), [(set CPURegs:$dst, (OpNode))]> { + MipsPseudo<(outs GPR32:$dst), (ins), [(set GPR32:$dst, (OpNode))]> { bit usesCustomInserter = 1; } @@ -501,10 +501,10 @@ class BPOSGE32_DESC_BASE<string instr_asm, InstrItinClass itin> { class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { - dag OutOperandList = (outs CPURegs:$rt); - dag InOperandList = (ins CPURegs:$src, CPURegs:$rs); + dag OutOperandList = (outs GPR32:$rt); + dag InOperandList = (ins GPR32:$src, GPR32:$rs); string AsmString = !strconcat(instr_asm, "\t$rt, $rs"); - list<dag> Pattern = [(set CPURegs:$rt, (OpNode CPURegs:$src, CPURegs:$rs))]; + list<dag> Pattern = [(set GPR32:$rt, (OpNode GPR32:$src, GPR32:$rs))]; InstrItinClass Itinerary = itin; string Constraints = "$src = $rt"; } @@ -547,26 +547,26 @@ class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph, Defs<[DSPOutFlag20]>; class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w, - NoItinerary, CPURegs, CPURegs>, + NoItinerary, GPR32, GPR32>, IsCommutable, Defs<[DSPOutFlag20]>; class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w, - NoItinerary, CPURegs, CPURegs>, + NoItinerary, GPR32, GPR32>, Defs<[DSPOutFlag20]>; class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", null_frag, NoItinerary, - CPURegs, CPURegs>, IsCommutable, + GPR32, GPR32>, IsCommutable, Defs<[DSPCarry]>; class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", null_frag, NoItinerary, - CPURegs, CPURegs>, + GPR32, GPR32>, IsCommutable, Uses<[DSPCarry]>, Defs<[DSPOutFlag20]>; class MODSUB_DESC : ADDU_QB_DESC_BASE<"modsub", int_mips_modsub, NoItinerary, - CPURegs, CPURegs>; + GPR32, GPR32>; class RADDU_W_QB_DESC : RADDU_W_QB_DESC_BASE<"raddu.w.qb", int_mips_raddu_w_qb, - NoItinerary, CPURegs, DSPRegs>; + NoItinerary, GPR32, DSPRegs>; // Absolute value class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph, @@ -574,7 +574,7 @@ class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph, Defs<[DSPOutFlag20]>; class ABSQ_S_W_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.w", int_mips_absq_s_w, - NoItinerary, CPURegs>, + NoItinerary, GPR32>, Defs<[DSPOutFlag20]>; // Precision reduce/expand @@ -584,12 +584,12 @@ class PRECRQ_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.qb.ph", class PRECRQ_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.ph.w", int_mips_precrq_ph_w, - NoItinerary, DSPRegs, CPURegs>; + NoItinerary, DSPRegs, GPR32>; class PRECRQ_RS_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq_rs.ph.w", int_mips_precrq_rs_ph_w, NoItinerary, DSPRegs, - CPURegs>, + GPR32>, Defs<[DSPOutFlag22]>; class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph", @@ -600,11 +600,11 @@ class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph", class PRECEQ_W_PHL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phl", int_mips_preceq_w_phl, - NoItinerary, CPURegs, DSPRegs>; + NoItinerary, GPR32, DSPRegs>; class PRECEQ_W_PHR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phr", int_mips_preceq_w_phr, - NoItinerary, CPURegs, DSPRegs>; + NoItinerary, GPR32, DSPRegs>; class PRECEQU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbl", int_mips_precequ_ph_qbl, @@ -682,18 +682,18 @@ class SHRAV_R_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.ph", int_mips_shra_r_ph, NoItinerary, DSPRegs>; class SHLL_S_W_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.w", int_mips_shll_s_w, - immZExt5, NoItinerary, CPURegs>, + immZExt5, NoItinerary, GPR32>, Defs<[DSPOutFlag22]>; class SHLLV_S_W_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.w", int_mips_shll_s_w, - NoItinerary, CPURegs>, + NoItinerary, GPR32>, Defs<[DSPOutFlag22]>; class SHRA_R_W_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.w", int_mips_shra_r_w, - immZExt5, NoItinerary, CPURegs>; + immZExt5, NoItinerary, GPR32>; class SHRAV_R_W_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.w", int_mips_shra_r_w, - NoItinerary, CPURegs>; + NoItinerary, GPR32>; // Multiplication class MULEU_S_PH_QBL_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbl", @@ -708,12 +708,12 @@ class MULEU_S_PH_QBR_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbr", class MULEQ_S_W_PHL_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phl", int_mips_muleq_s_w_phl, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, IsCommutable, Defs<[DSPOutFlag21]>; class MULEQ_S_W_PHR_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phr", int_mips_muleq_s_w_phr, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, IsCommutable, Defs<[DSPOutFlag21]>; class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph, @@ -786,16 +786,16 @@ class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb", class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb", int_mips_cmpgu_eq_qb, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, IsCommutable; class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb", int_mips_cmpgu_lt_qb, - NoItinerary, CPURegs, DSPRegs>; + NoItinerary, GPR32, DSPRegs>; class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb", int_mips_cmpgu_le_qb, - NoItinerary, CPURegs, DSPRegs>; + NoItinerary, GPR32, DSPRegs>; class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph, NoItinerary, DSPRegs>, @@ -811,7 +811,7 @@ class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph, // Misc class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev, - NoItinerary, CPURegs>; + NoItinerary, GPR32>; class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph, NoItinerary, DSPRegs, DSPRegs>; @@ -823,10 +823,10 @@ class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10, NoItinerary, DSPRegs>; class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb, - NoItinerary, DSPRegs, CPURegs>; + NoItinerary, DSPRegs, GPR32>; class REPLV_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.ph", int_mips_repl_ph, - NoItinerary, DSPRegs, CPURegs>; + NoItinerary, DSPRegs, GPR32>; class PICK_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.qb", int_mips_pick_qb, NoItinerary, DSPRegs, DSPRegs>, @@ -945,31 +945,31 @@ class SUBQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"subqh_r.ph", int_mips_subqh_r_ph, NoItinerary, DSPRegs>; class ADDQH_W_DESC : ADDUH_QB_DESC_BASE<"addqh.w", int_mips_addqh_w, - NoItinerary, CPURegs>, IsCommutable; + NoItinerary, GPR32>, IsCommutable; class ADDQH_R_W_DESC : ADDUH_QB_DESC_BASE<"addqh_r.w", int_mips_addqh_r_w, - NoItinerary, CPURegs>, IsCommutable; + NoItinerary, GPR32>, IsCommutable; class SUBQH_W_DESC : ADDUH_QB_DESC_BASE<"subqh.w", int_mips_subqh_w, - NoItinerary, CPURegs>; + NoItinerary, GPR32>; class SUBQH_R_W_DESC : ADDUH_QB_DESC_BASE<"subqh_r.w", int_mips_subqh_r_w, - NoItinerary, CPURegs>; + NoItinerary, GPR32>; // Comparison class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb", int_mips_cmpgdu_eq_qb, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, IsCommutable, Defs<[DSPCCond]>; class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb", int_mips_cmpgdu_lt_qb, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, Defs<[DSPCCond]>; class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb", int_mips_cmpgdu_le_qb, - NoItinerary, CPURegs, DSPRegs>, + NoItinerary, GPR32, DSPRegs>, Defs<[DSPCCond]>; // Absolute @@ -987,11 +987,11 @@ class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph, Defs<[DSPOutFlag21]>; class MULQ_S_W_DESC : ADDUH_QB_DESC_BASE<"mulq_s.w", int_mips_mulq_s_w, - NoItinerary, CPURegs>, IsCommutable, + NoItinerary, GPR32>, IsCommutable, Defs<[DSPOutFlag21]>; class MULQ_RS_W_DESC : ADDUH_QB_DESC_BASE<"mulq_rs.w", int_mips_mulq_rs_w, - NoItinerary, CPURegs>, IsCommutable, + NoItinerary, GPR32>, IsCommutable, Defs<[DSPOutFlag21]>; class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph, @@ -1031,12 +1031,12 @@ class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph", class PRECR_SRA_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra.ph.w", int_mips_precr_sra_ph_w, NoItinerary, DSPRegs, - CPURegs>; + GPR32>; class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w", int_mips_precr_sra_r_ph_w, NoItinerary, DSPRegs, - CPURegs>; + GPR32>; // Shift class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", null_frag, immZExt3, @@ -1242,8 +1242,8 @@ def PREPEND : PREPEND_ENC, PREPEND_DESC; // Pseudos. let isPseudo = 1 in { // Pseudo instructions for loading and storing accumulator registers. - defm LOAD_AC_DSP : LoadM<"load_ac_dsp", ACRegsDSP>; - defm STORE_AC_DSP : StoreM<"store_ac_dsp", ACRegsDSP>; + defm LOAD_AC_DSP : LoadM<"load_ac_dsp", ACRegsDSPOpnd>; + defm STORE_AC_DSP : StoreM<"store_ac_dsp", ACRegsDSPOpnd>; // Pseudos for loading and storing ccond field of DSP control register. defm LOAD_CCOND_DSP : LoadM<"load_ccond_dsp", DSPCC>; @@ -1279,19 +1279,19 @@ class BitconvertPat<ValueType DstVT, ValueType SrcVT, RegisterClass DstRC, DSPPat<(DstVT (bitconvert (SrcVT SrcRC:$src))), (COPY_TO_REGCLASS SrcRC:$src, DstRC)>; -def : BitconvertPat<i32, v2i16, CPURegs, DSPRegs>; -def : BitconvertPat<i32, v4i8, CPURegs, DSPRegs>; -def : BitconvertPat<v2i16, i32, DSPRegs, CPURegs>; -def : BitconvertPat<v4i8, i32, DSPRegs, CPURegs>; +def : BitconvertPat<i32, v2i16, GPR32, DSPRegs>; +def : BitconvertPat<i32, v4i8, GPR32, DSPRegs>; +def : BitconvertPat<v2i16, i32, DSPRegs, GPR32>; +def : BitconvertPat<v4i8, i32, DSPRegs, GPR32>; def : DSPPat<(v2i16 (load addr:$a)), (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>; def : DSPPat<(v4i8 (load addr:$a)), (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>; def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a), - (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>; + (SW (COPY_TO_REGCLASS DSPRegs:$val, GPR32), addr:$a)>; def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a), - (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>; + (SW (COPY_TO_REGCLASS DSPRegs:$val, GPR32), addr:$a)>; // Binary operations. class DSPBinPat<Instruction Inst, ValueType ValTy, SDPatternOperator Node, @@ -1384,8 +1384,8 @@ def : DSPSelectCCPatInv<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETUGT>; // Extr patterns. class EXTR_W_TY1_R2_Pat<SDPatternOperator OpNode, Instruction Instr> : - DSPPat<(i32 (OpNode CPURegs:$rs, ACRegsDSP:$ac)), - (Instr ACRegsDSP:$ac, CPURegs:$rs)>; + DSPPat<(i32 (OpNode GPR32:$rs, ACRegsDSP:$ac)), + (Instr ACRegsDSP:$ac, GPR32:$rs)>; class EXTR_W_TY1_R1_Pat<SDPatternOperator OpNode, Instruction Instr> : DSPPat<(i32 (OpNode immZExt5:$shift, ACRegsDSP:$ac)), diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index 928a43d..545a38d 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -437,7 +437,7 @@ bool MemDefsUses::hasHazard_(const MachineInstr &MI) { // Check underlying object list. if (getUnderlyingObjects(MI, Objs)) { - for (SmallVector<const Value *, 4>::const_iterator I = Objs.begin(); + for (SmallVectorImpl<const Value *>::const_iterator I = Objs.begin(); I != Objs.end(); ++I) HasHazard |= updateDefsUses(*I, MI.mayStore()); @@ -473,7 +473,7 @@ getUnderlyingObjects(const MachineInstr &MI, SmallVector<Value *, 4> Objs; GetUnderlyingObjects(const_cast<Value *>(V), Objs); - for (SmallVector<Value*, 4>::iterator I = Objs.begin(), E = Objs.end(); + for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), E = Objs.end(); I != E; ++I) { if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(*I)) { if (PSV->isAliased(MFI)) diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index a1de174..0002a5f 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -57,7 +57,8 @@ bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { /// GOT address into a register. SDNode *MipsDAGToDAGISel::getGlobalBaseReg() { unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg(); - return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); + return CurDAG->getRegister(GlobalBaseReg, + getTargetLowering()->getPointerTy()).getNode(); } /// ComplexPattern used on MipsInstrInfo diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 6351073..a62e84f 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -346,11 +346,6 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::FNEG, MVT::f64, Expand); } - setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); - setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); setOperationAction(ISD::VAARG, MVT::Other, Expand); @@ -390,6 +385,8 @@ MipsTargetLowering(MipsTargetMachine &TM) setTruncStoreAction(MVT::i64, MVT::i32, Custom); } + setOperationAction(ISD::TRAP, MVT::Other, Legal); + setTargetDAGCombine(ISD::SDIVREM); setTargetDAGCombine(ISD::UDIVREM); setTargetDAGCombine(ISD::SELECT); @@ -524,9 +521,10 @@ static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, SDLoc DL) { ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2)); bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue()); + SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32); return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL, - True.getValueType(), True, False, Cond); + True.getValueType(), True, FCC0, False, Cond); } static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, @@ -1084,9 +1082,9 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI, BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper) .addReg(Mips::ZERO).addImm(MaskImm); BuildMI(BB, DL, TII->get(Mips::SLLV), Mask) - .addReg(ShiftAmt).addReg(MaskUpper); + .addReg(MaskUpper).addReg(ShiftAmt); BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask); - BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr); + BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt); // atomic.load.binop // loopMBB: @@ -1147,7 +1145,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI, BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1) .addReg(OldVal).addReg(Mask); BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes) - .addReg(ShiftAmt).addReg(MaskedOldVal1); + .addReg(MaskedOldVal1).addReg(ShiftAmt); BuildMI(BB, DL, TII->get(Mips::SLL), SllRes) .addReg(SrlRes).addImm(ShiftImm); BuildMI(BB, DL, TII->get(Mips::SRA), Dest) @@ -1334,16 +1332,16 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI, BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper) .addReg(Mips::ZERO).addImm(MaskImm); BuildMI(BB, DL, TII->get(Mips::SLLV), Mask) - .addReg(ShiftAmt).addReg(MaskUpper); + .addReg(MaskUpper).addReg(ShiftAmt); BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask); BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal) .addReg(CmpVal).addImm(MaskImm); BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal) - .addReg(ShiftAmt).addReg(MaskedCmpVal); + .addReg(MaskedCmpVal).addReg(ShiftAmt); BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal) .addReg(NewVal).addImm(MaskImm); BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal) - .addReg(ShiftAmt).addReg(MaskedNewVal); + .addReg(MaskedNewVal).addReg(ShiftAmt); // loop1MBB: // ll oldval,0(alginedaddr) @@ -1379,7 +1377,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI, int64_t ShiftImm = (Size == 1) ? 24 : 16; BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes) - .addReg(ShiftAmt).addReg(MaskedOldVal0); + .addReg(MaskedOldVal0).addReg(ShiftAmt); BuildMI(BB, DL, TII->get(Mips::SLL), SllRes) .addReg(SrlRes).addImm(ShiftImm); BuildMI(BB, DL, TII->get(Mips::SRA), Dest) @@ -1443,8 +1441,9 @@ lowerBRCOND(SDValue Op, SelectionDAG &DAG) const (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue(); unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T; SDValue BrCode = DAG.getConstant(Opc, MVT::i32); + SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32); return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode, - Dest, CondRes); + FCC0, Dest, CondRes); } SDValue MipsTargetLowering:: @@ -2328,9 +2327,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc DL = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &IsTailCall = CLI.IsTailCall; @@ -2620,9 +2619,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, if (RegVT == MVT::i32) RC = Subtarget->inMips16Mode()? &Mips::CPU16RegsRegClass : - &Mips::CPURegsRegClass; + &Mips::GPR32RegClass; else if (RegVT == MVT::i64) - RC = &Mips::CPU64RegsRegClass; + RC = &Mips::GPR64RegClass; else if (RegVT == MVT::f32) RC = &Mips::FGR32RegClass; else if (RegVT == MVT::f64) @@ -2885,7 +2884,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight( /// to an LLVM register class, return a register of 0 and the register class /// pointer. std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering:: -getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const +getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { @@ -2895,12 +2894,12 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) { if (Subtarget->inMips16Mode()) return std::make_pair(0U, &Mips::CPU16RegsRegClass); - return std::make_pair(0U, &Mips::CPURegsRegClass); + return std::make_pair(0U, &Mips::GPR32RegClass); } if (VT == MVT::i64 && !HasMips64) - return std::make_pair(0U, &Mips::CPURegsRegClass); + return std::make_pair(0U, &Mips::GPR32RegClass); if (VT == MVT::i64 && HasMips64) - return std::make_pair(0U, &Mips::CPU64RegsRegClass); + return std::make_pair(0U, &Mips::GPR64RegClass); // This will generate an error message return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0)); case 'f': @@ -2914,9 +2913,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const break; case 'c': // register suitable for indirect jump if (VT == MVT::i32) - return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass); + return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass); assert(VT == MVT::i64 && "Unexpected type."); - return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass); + return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass); case 'l': // register suitable for indirect jump if (VT == MVT::i32) return std::make_pair((unsigned)Mips::LO, &Mips::LORegsRegClass); @@ -3388,7 +3387,7 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, void MipsTargetLowering:: passByValArg(SDValue Chain, SDLoc DL, std::deque< std::pair<unsigned, SDValue> > &RegsToPass, - SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, + SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, const MipsCC &CC, const ByValArgInfo &ByVal, const ISD::ArgFlagsTy &Flags, bool isLittle) const { diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index fe043ae..123a2a6 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -282,7 +282,7 @@ namespace llvm { /// Return pointer to array of integer argument registers. const uint16_t *intArgRegs() const; - typedef SmallVector<ByValArgInfo, 2>::const_iterator byval_iterator; + typedef SmallVectorImpl<ByValArgInfo>::const_iterator byval_iterator; byval_iterator byval_begin() const { return ByValArgs.begin(); } byval_iterator byval_end() const { return ByValArgs.end(); } @@ -386,7 +386,7 @@ namespace llvm { /// passByValArg - Pass a byval argument in registers or on stack. void passByValArg(SDValue Chain, SDLoc DL, std::deque< std::pair<unsigned, SDValue> > &RegsToPass, - SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, + SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, const MipsCC &CC, const ByValArgInfo &ByVal, const ISD::ArgFlagsTy &Flags, bool isLittle) const; @@ -435,7 +435,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index e2acf28..b992e77 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -24,12 +24,13 @@ //===----------------------------------------------------------------------===// // Floating Point Compare and Branch -def SDT_MipsFPBrcond : SDTypeProfile<0, 2, [SDTCisInt<0>, - SDTCisVT<1, OtherVT>]>; +def SDT_MipsFPBrcond : SDTypeProfile<0, 3, [SDTCisInt<0>, + SDTCisVT<1, i32>, + SDTCisVT<2, OtherVT>]>; def SDT_MipsFPCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, SDTCisFP<1>, SDTCisVT<2, i32>]>; -def SDT_MipsCMovFP : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, - SDTCisSameAs<1, 2>]>; +def SDT_MipsCMovFP : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVT<2, i32>, + SDTCisSameAs<1, 3>]>; def SDT_MipsTruncIntFP : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>; def SDT_MipsBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, @@ -88,7 +89,7 @@ def fpimm0neg : PatLeaf<(fpimm), [{ // Only S32 and D32 are supported right now. //===----------------------------------------------------------------------===// -class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm, +class ADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, bit IsComm, SDPatternOperator OpNode= null_frag> : InstSE<(outs RC:$fd), (ins RC:$fs, RC:$ft), !strconcat(opstr, "\t$fd, $fs, $ft"), @@ -98,15 +99,15 @@ class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm, multiclass ADDS_M<string opstr, InstrItinClass Itin, bit IsComm, SDPatternOperator OpNode = null_frag> { - def _D32 : ADDS_FT<opstr, AFGR64, Itin, IsComm, OpNode>, + def _D32 : ADDS_FT<opstr, AFGR64RegsOpnd, Itin, IsComm, OpNode>, Requires<[NotFP64bit, HasStdEnc]>; - def _D64 : ADDS_FT<opstr, FGR64, Itin, IsComm, OpNode>, + def _D64 : ADDS_FT<opstr, FGR64RegsOpnd, Itin, IsComm, OpNode>, Requires<[IsFP64bit, HasStdEnc]> { string DecoderNamespace = "Mips64"; } } -class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC, +class ABSS_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC, InstrItinClass Itin, SDPatternOperator OpNode= null_frag> : InstSE<(outs DstRC:$fd), (ins SrcRC:$fs), !strconcat(opstr, "\t$fd, $fs"), [(set DstRC:$fd, (OpNode SrcRC:$fs))], Itin, FrmFR>, @@ -114,44 +115,34 @@ class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC, multiclass ABSS_M<string opstr, InstrItinClass Itin, SDPatternOperator OpNode= null_frag> { - def _D32 : ABSS_FT<opstr, AFGR64, AFGR64, Itin, OpNode>, + def _D32 : ABSS_FT<opstr, AFGR64RegsOpnd, AFGR64RegsOpnd, Itin, OpNode>, Requires<[NotFP64bit, HasStdEnc]>; - def _D64 : ABSS_FT<opstr, FGR64, FGR64, Itin, OpNode>, + def _D64 : ABSS_FT<opstr, FGR64RegsOpnd, FGR64RegsOpnd, Itin, OpNode>, Requires<[IsFP64bit, HasStdEnc]> { string DecoderNamespace = "Mips64"; } } multiclass ROUND_M<string opstr, InstrItinClass Itin> { - def _D32 : ABSS_FT<opstr, FGR32, AFGR64, Itin>, + def _D32 : ABSS_FT<opstr, FGR32RegsOpnd, AFGR64RegsOpnd, Itin>, Requires<[NotFP64bit, HasStdEnc]>; - def _D64 : ABSS_FT<opstr, FGR32, FGR64, Itin>, + def _D64 : ABSS_FT<opstr, FGR32RegsOpnd, FGR64RegsOpnd, Itin>, Requires<[IsFP64bit, HasStdEnc]> { let DecoderNamespace = "Mips64"; } } -class MFC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC, +class MFC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC, InstrItinClass Itin, SDPatternOperator OpNode= null_frag> : InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"), [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>; -class MTC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC, +class MTC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC, InstrItinClass Itin, SDPatternOperator OpNode= null_frag> : InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"), [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>; -class MFC1_FT_CCR<string opstr, RegisterClass DstRC, RegisterOperand SrcRC, - InstrItinClass Itin, SDPatternOperator OpNode= null_frag> : - InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"), - [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>; - -class MTC1_FT_CCR<string opstr, RegisterOperand DstRC, RegisterClass SrcRC, - InstrItinClass Itin, SDPatternOperator OpNode= null_frag> : - InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"), - [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>; - -class LW_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, Operand MemOpnd, SDPatternOperator OpNode= null_frag> : InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), [(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI> { @@ -159,7 +150,7 @@ class LW_FT<string opstr, RegisterClass RC, InstrItinClass Itin, let mayLoad = 1; } -class SW_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, Operand MemOpnd, SDPatternOperator OpNode= null_frag> : InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), [(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI> { @@ -167,20 +158,20 @@ class SW_FT<string opstr, RegisterClass RC, InstrItinClass Itin, let mayStore = 1; } -class MADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft), !strconcat(opstr, "\t$fd, $fr, $fs, $ft"), [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin, FrmFR>; -class NMADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, +class NMADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft), !strconcat(opstr, "\t$fd, $fr, $fs, $ft"), [(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))], Itin, FrmFR>; -class LWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC, +class LWXC1_FT<string opstr, RegisterOperand DRC, RegisterOperand PRC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : InstSE<(outs DRC:$fd), (ins PRC:$base, PRC:$index), !strconcat(opstr, "\t$fd, ${index}(${base})"), @@ -188,7 +179,7 @@ class LWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC, let AddedComplexity = 20; } -class SWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC, +class SWXC1_FT<string opstr, RegisterOperand DRC, RegisterOperand PRC, InstrItinClass Itin, SDPatternOperator OpNode = null_frag> : InstSE<(outs), (ins DRC:$fs, PRC:$base, PRC:$index), !strconcat(opstr, "\t$fs, ${index}(${base})"), @@ -198,13 +189,13 @@ class SWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC, class BC1F_FT<string opstr, InstrItinClass Itin, SDPatternOperator Op = null_frag> : - InstSE<(outs), (ins brtarget:$offset), !strconcat(opstr, "\t$offset"), - [(MipsFPBrcond Op, bb:$offset)], Itin, FrmFI> { + InstSE<(outs), (ins FCCRegsOpnd:$fcc, brtarget:$offset), + !strconcat(opstr, "\t$fcc, $offset"), + [(MipsFPBrcond Op, FCCRegsOpnd:$fcc, bb:$offset)], Itin, FrmFI> { let isBranch = 1; let isTerminator = 1; let hasDelaySlot = 1; let Defs = [AT]; - let Uses = [FCR31]; } class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin, @@ -212,17 +203,53 @@ class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin, InstSE<(outs), (ins RC:$fs, RC:$ft, condcode:$cond), !strconcat("c.$cond.", typestr, "\t$fs, $ft"), [(OpNode RC:$fs, RC:$ft, imm:$cond)], Itin, FrmFR> { - let Defs = [FCR31]; -} + let Defs = [FCC0]; + let isCodeGenOnly = 1; +} + +class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC> : + InstSE<(outs), (ins RC:$fs, RC:$ft), + !strconcat("c.", CondStr, ".", Typestr, "\t$fs, $ft"), [], IIFcmp, + FrmFR>; + +multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt> { + def C_F_#NAME : C_COND_FT<"f", TypeStr, RC>, C_COND_FM<fmt, 0>; + def C_UN_#NAME : C_COND_FT<"un", TypeStr, RC>, C_COND_FM<fmt, 1>; + def C_EQ_#NAME : C_COND_FT<"eq", TypeStr, RC>, C_COND_FM<fmt, 2>; + def C_UEQ_#NAME : C_COND_FT<"ueq", TypeStr, RC>, C_COND_FM<fmt, 3>; + def C_OLT_#NAME : C_COND_FT<"olt", TypeStr, RC>, C_COND_FM<fmt, 4>; + def C_ULT_#NAME : C_COND_FT<"ult", TypeStr, RC>, C_COND_FM<fmt, 5>; + def C_OLE_#NAME : C_COND_FT<"ole", TypeStr, RC>, C_COND_FM<fmt, 6>; + def C_ULE_#NAME : C_COND_FT<"ule", TypeStr, RC>, C_COND_FM<fmt, 7>; + def C_SF_#NAME : C_COND_FT<"sf", TypeStr, RC>, C_COND_FM<fmt, 8>; + def C_NGLE_#NAME : C_COND_FT<"ngle", TypeStr, RC>, C_COND_FM<fmt, 9>; + def C_SEQ_#NAME : C_COND_FT<"seq", TypeStr, RC>, C_COND_FM<fmt, 10>; + def C_NGL_#NAME : C_COND_FT<"ngl", TypeStr, RC>, C_COND_FM<fmt, 11>; + def C_LT_#NAME : C_COND_FT<"lt", TypeStr, RC>, C_COND_FM<fmt, 12>; + def C_NGE_#NAME : C_COND_FT<"nge", TypeStr, RC>, C_COND_FM<fmt, 13>; + def C_LE_#NAME : C_COND_FT<"le", TypeStr, RC>, C_COND_FM<fmt, 14>; + def C_NGT_#NAME : C_COND_FT<"ngt", TypeStr, RC>, C_COND_FM<fmt, 15>; +} + +defm S : C_COND_M<"s", FGR32RegsOpnd, 16>; +defm D32 : C_COND_M<"d", AFGR64RegsOpnd, 17>, + Requires<[NotFP64bit, HasStdEnc]>; +let DecoderNamespace = "Mips64" in +defm D64 : C_COND_M<"d", FGR64RegsOpnd, 17>, Requires<[IsFP64bit, HasStdEnc]>; //===----------------------------------------------------------------------===// // Floating Point Instructions //===----------------------------------------------------------------------===// -def ROUND_W_S : ABSS_FT<"round.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xc, 16>; -def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xd, 16>; -def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xe, 16>; -def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xf, 16>; -def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0x24, 16>; +def ROUND_W_S : ABSS_FT<"round.w.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xc, 16>; +def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xd, 16>; +def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xe, 16>; +def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xf, 16>; +def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x24, 16>; defm ROUND_W : ROUND_M<"round.w.d", IIFcvt>, ABSS_FM<0xc, 17>; defm TRUNC_W : ROUND_M<"trunc.w.d", IIFcvt>, ABSS_FM<0xd, 17>; @@ -231,54 +258,72 @@ defm FLOOR_W : ROUND_M<"floor.w.d", IIFcvt>, ABSS_FM<0xf, 17>; defm CVT_W : ROUND_M<"cvt.w.d", IIFcvt>, ABSS_FM<0x24, 17>; let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in { - def ROUND_L_S : ABSS_FT<"round.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x8, 16>; - def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64, FGR64, IIFcvt>, + def ROUND_L_S : ABSS_FT<"round.l.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x8, 16>; + def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, ABSS_FM<0x8, 17>; - def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x9, 16>; - def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64, FGR64, IIFcvt>, + def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x9, 16>; + def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, ABSS_FM<0x9, 17>; - def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xa, 16>; - def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0xa, 17>; - def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xb, 16>; - def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64, FGR64, IIFcvt>, + def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xa, 16>; + def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, + ABSS_FM<0xa, 17>; + def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0xb, 16>; + def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, ABSS_FM<0xb, 17>; } -def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32, FGR32, IIFcvt>, ABSS_FM<0x20, 20>; -def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x25, 16>; -def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0x25, 17>; +def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x20, 20>; +def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x25, 16>; +def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, + ABSS_FM<0x25, 17>; let Predicates = [NotFP64bit, HasStdEnc] in { - def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32, AFGR64, IIFcvt>, ABSS_FM<0x20, 17>; - def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>; - def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>; + def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32RegsOpnd, AFGR64RegsOpnd, IIFcvt>, + ABSS_FM<0x20, 17>; + def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x21, 20>; + def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x21, 16>; } let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in { - def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 17>; - def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 21>; - def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>; - def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>; - def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64, FGR64, IIFcvt>, ABSS_FM<0x21, 21>; + def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32RegsOpnd, FGR64RegsOpnd, IIFcvt>, + ABSS_FM<0x20, 17>; + def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32RegsOpnd, FGR64RegsOpnd, IIFcvt>, + ABSS_FM<0x20, 21>; + def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x21, 20>; + def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64RegsOpnd, FGR32RegsOpnd, IIFcvt>, + ABSS_FM<0x21, 16>; + def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64RegsOpnd, FGR64RegsOpnd, IIFcvt>, + ABSS_FM<0x21, 21>; } let isPseudo = 1, isCodeGenOnly = 1 in { - def PseudoCVT_S_W : ABSS_FT<"", FGR32, CPURegs, IIFcvt>; - def PseudoCVT_D32_W : ABSS_FT<"", AFGR64, CPURegs, IIFcvt>; - def PseudoCVT_S_L : ABSS_FT<"", FGR64, CPU64Regs, IIFcvt>; - def PseudoCVT_D64_W : ABSS_FT<"", FGR64, CPURegs, IIFcvt>; - def PseudoCVT_D64_L : ABSS_FT<"", FGR64, CPU64Regs, IIFcvt>; + def PseudoCVT_S_W : ABSS_FT<"", FGR32RegsOpnd, GPR32Opnd, IIFcvt>; + def PseudoCVT_D32_W : ABSS_FT<"", AFGR64RegsOpnd, GPR32Opnd, IIFcvt>; + def PseudoCVT_S_L : ABSS_FT<"", FGR64RegsOpnd, GPR64Opnd, IIFcvt>; + def PseudoCVT_D64_W : ABSS_FT<"", FGR64RegsOpnd, GPR32Opnd, IIFcvt>; + def PseudoCVT_D64_L : ABSS_FT<"", FGR64RegsOpnd, GPR64Opnd, IIFcvt>; } let Predicates = [NoNaNsFPMath, HasStdEnc] in { - def FABS_S : ABSS_FT<"abs.s", FGR32, FGR32, IIFcvt, fabs>, ABSS_FM<0x5, 16>; - def FNEG_S : ABSS_FT<"neg.s", FGR32, FGR32, IIFcvt, fneg>, ABSS_FM<0x7, 16>; + def FABS_S : ABSS_FT<"abs.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt, fabs>, + ABSS_FM<0x5, 16>; + def FNEG_S : ABSS_FT<"neg.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFcvt, fneg>, + ABSS_FM<0x7, 16>; defm FABS : ABSS_M<"abs.d", IIFcvt, fabs>, ABSS_FM<0x5, 17>; defm FNEG : ABSS_M<"neg.d", IIFcvt, fneg>, ABSS_FM<0x7, 17>; } -def FSQRT_S : ABSS_FT<"sqrt.s", FGR32, FGR32, IIFsqrtSingle, fsqrt>, - ABSS_FM<0x4, 16>; +def FSQRT_S : ABSS_FT<"sqrt.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFsqrtSingle, + fsqrt>, ABSS_FM<0x4, 16>; defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>; // The odd-numbered registers are only referenced when doing loads, @@ -287,134 +332,166 @@ defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>; // regardless of register aliasing. /// Move Control Registers From/To CPU Registers -def CFC1 : MFC1_FT_CCR<"cfc1", CPURegs, CCROpnd, IIFmove>, MFC1_FM<2>; -def CTC1 : MTC1_FT_CCR<"ctc1", CCROpnd, CPURegs, IIFmove>, MFC1_FM<6>; -def MFC1 : MFC1_FT<"mfc1", CPURegs, FGR32, IIFmove, bitconvert>, MFC1_FM<0>; -def MTC1 : MTC1_FT<"mtc1", FGR32, CPURegs, IIFmove, bitconvert>, MFC1_FM<4>; -def DMFC1 : MFC1_FT<"dmfc1", CPU64Regs, FGR64, IIFmove, bitconvert>, MFC1_FM<1>; -def DMTC1 : MTC1_FT<"dmtc1", FGR64, CPU64Regs, IIFmove, bitconvert>, MFC1_FM<5>; - -def FMOV_S : ABSS_FT<"mov.s", FGR32, FGR32, IIFmove>, ABSS_FM<0x6, 16>; -def FMOV_D32 : ABSS_FT<"mov.d", AFGR64, AFGR64, IIFmove>, ABSS_FM<0x6, 17>, - Requires<[NotFP64bit, HasStdEnc]>; -def FMOV_D64 : ABSS_FT<"mov.d", FGR64, FGR64, IIFmove>, ABSS_FM<0x6, 17>, - Requires<[IsFP64bit, HasStdEnc]> { - let DecoderNamespace = "Mips64"; +def CFC1 : MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, IIFmove>, MFC1_FM<2>; +def CTC1 : MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, IIFmove>, MFC1_FM<6>; +def MFC1 : MFC1_FT<"mfc1", GPR32Opnd, FGR32RegsOpnd, IIFmoveC1, bitconvert>, + MFC1_FM<0>; +def MTC1 : MTC1_FT<"mtc1", FGR32RegsOpnd, GPR32Opnd, IIFmoveC1, bitconvert>, + MFC1_FM<4>; +def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64RegsOpnd, IIFmoveC1, + bitconvert>, MFC1_FM<1>; +def DMTC1 : MTC1_FT<"dmtc1", FGR64RegsOpnd, GPR64Opnd, IIFmoveC1, + bitconvert>, MFC1_FM<5>; + +def FMOV_S : ABSS_FT<"mov.s", FGR32RegsOpnd, FGR32RegsOpnd, IIFmove>, + ABSS_FM<0x6, 16>; +def FMOV_D32 : ABSS_FT<"mov.d", AFGR64RegsOpnd, AFGR64RegsOpnd, IIFmove>, + ABSS_FM<0x6, 17>, Requires<[NotFP64bit, HasStdEnc]>; +def FMOV_D64 : ABSS_FT<"mov.d", FGR64RegsOpnd, FGR64RegsOpnd, IIFmove>, + ABSS_FM<0x6, 17>, Requires<[IsFP64bit, HasStdEnc]> { + let DecoderNamespace = "Mips64"; } /// Floating Point Memory Instructions let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in { - def LWC1_P8 : LW_FT<"lwc1", FGR32, IILoad, mem64, load>, LW_FM<0x31>; - def SWC1_P8 : SW_FT<"swc1", FGR32, IIStore, mem64, store>, LW_FM<0x39>; - def LDC164_P8 : LW_FT<"ldc1", FGR64, IILoad, mem64, load>, LW_FM<0x35> { + def LWC1_P8 : LW_FT<"lwc1", FGR32RegsOpnd, IIFLoad, mem64, load>, + LW_FM<0x31>; + def SWC1_P8 : SW_FT<"swc1", FGR32RegsOpnd, IIFStore, mem64, store>, + LW_FM<0x39>; + def LDC164_P8 : LW_FT<"ldc1", FGR64RegsOpnd, IIFLoad, mem64, load>, + LW_FM<0x35> { let isCodeGenOnly =1; } - def SDC164_P8 : SW_FT<"sdc1", FGR64, IIStore, mem64, store>, LW_FM<0x3d> { + def SDC164_P8 : SW_FT<"sdc1", FGR64RegsOpnd, IIFStore, mem64, store>, + LW_FM<0x3d> { let isCodeGenOnly =1; } } let Predicates = [NotN64, HasStdEnc] in { - def LWC1 : LW_FT<"lwc1", FGR32, IILoad, mem, load>, LW_FM<0x31>; - def SWC1 : SW_FT<"swc1", FGR32, IIStore, mem, store>, LW_FM<0x39>; + def LWC1 : LW_FT<"lwc1", FGR32RegsOpnd, IIFLoad, mem, load>, LW_FM<0x31>; + def SWC1 : SW_FT<"swc1", FGR32RegsOpnd, IIFStore, mem, store>, LW_FM<0x39>; } let Predicates = [NotN64, HasMips64, HasStdEnc], DecoderNamespace = "Mips64" in { - def LDC164 : LW_FT<"ldc1", FGR64, IILoad, mem, load>, LW_FM<0x35>; - def SDC164 : SW_FT<"sdc1", FGR64, IIStore, mem, store>, LW_FM<0x3d>; + def LDC164 : LW_FT<"ldc1", FGR64RegsOpnd, IIFLoad, mem, load>, LW_FM<0x35>; + def SDC164 : SW_FT<"sdc1", FGR64RegsOpnd, IIFStore, mem, store>, LW_FM<0x3d>; } let Predicates = [NotN64, NotMips64, HasStdEnc] in { let isPseudo = 1, isCodeGenOnly = 1 in { - def PseudoLDC1 : LW_FT<"", AFGR64, IILoad, mem, load>; - def PseudoSDC1 : SW_FT<"", AFGR64, IIStore, mem, store>; + def PseudoLDC1 : LW_FT<"", AFGR64RegsOpnd, IIFLoad, mem, load>; + def PseudoSDC1 : SW_FT<"", AFGR64RegsOpnd, IIFStore, mem, store>; } - def LDC1 : LW_FT<"ldc1", AFGR64, IILoad, mem>, LW_FM<0x35>; - def SDC1 : SW_FT<"sdc1", AFGR64, IIStore, mem>, LW_FM<0x3d>; + def LDC1 : LW_FT<"ldc1", AFGR64RegsOpnd, IIFLoad, mem>, LW_FM<0x35>; + def SDC1 : SW_FT<"sdc1", AFGR64RegsOpnd, IIFStore, mem>, LW_FM<0x3d>; } // Indexed loads and stores. let Predicates = [HasFPIdx, HasStdEnc] in { - def LWXC1 : LWXC1_FT<"lwxc1", FGR32, CPURegs, IILoad, load>, LWXC1_FM<0>; - def SWXC1 : SWXC1_FT<"swxc1", FGR32, CPURegs, IIStore, store>, SWXC1_FM<8>; + def LWXC1 : LWXC1_FT<"lwxc1", FGR32RegsOpnd, GPR32Opnd, IIFLoad, load>, + LWXC1_FM<0>; + def SWXC1 : SWXC1_FT<"swxc1", FGR32RegsOpnd, GPR32Opnd, IIFStore, store>, + SWXC1_FM<8>; } let Predicates = [HasMips32r2, NotMips64, HasStdEnc] in { - def LDXC1 : LWXC1_FT<"ldxc1", AFGR64, CPURegs, IILoad, load>, LWXC1_FM<1>; - def SDXC1 : SWXC1_FT<"sdxc1", AFGR64, CPURegs, IIStore, store>, SWXC1_FM<9>; + def LDXC1 : LWXC1_FT<"ldxc1", AFGR64RegsOpnd, GPR32Opnd, IIFLoad, load>, + LWXC1_FM<1>; + def SDXC1 : SWXC1_FT<"sdxc1", AFGR64RegsOpnd, GPR32Opnd, IIFStore, store>, + SWXC1_FM<9>; } let Predicates = [HasMips64, NotN64, HasStdEnc], DecoderNamespace="Mips64" in { - def LDXC164 : LWXC1_FT<"ldxc1", FGR64, CPURegs, IILoad, load>, LWXC1_FM<1>; - def SDXC164 : SWXC1_FT<"sdxc1", FGR64, CPURegs, IIStore, store>, SWXC1_FM<9>; + def LDXC164 : LWXC1_FT<"ldxc1", FGR64RegsOpnd, GPR32Opnd, IIFLoad, load>, + LWXC1_FM<1>; + def SDXC164 : SWXC1_FT<"sdxc1", FGR64RegsOpnd, GPR32Opnd, IIFStore, store>, + SWXC1_FM<9>; } // n64 let Predicates = [IsN64, HasStdEnc], isCodeGenOnly=1 in { - def LWXC1_P8 : LWXC1_FT<"lwxc1", FGR32, CPU64Regs, IILoad, load>, LWXC1_FM<0>; - def LDXC164_P8 : LWXC1_FT<"ldxc1", FGR64, CPU64Regs, IILoad, load>, - LWXC1_FM<1>; - def SWXC1_P8 : SWXC1_FT<"swxc1", FGR32, CPU64Regs, IIStore, store>, - SWXC1_FM<8>; - def SDXC164_P8 : SWXC1_FT<"sdxc1", FGR64, CPU64Regs, IIStore, store>, - SWXC1_FM<9>; + def LWXC1_P8 : LWXC1_FT<"lwxc1", FGR32RegsOpnd, GPR64Opnd, IIFLoad, load>, + LWXC1_FM<0>; + def LDXC164_P8 : LWXC1_FT<"ldxc1", FGR64RegsOpnd, GPR64Opnd, IIFLoad, + load>, LWXC1_FM<1>; + def SWXC1_P8 : SWXC1_FT<"swxc1", FGR32RegsOpnd, GPR64Opnd, IIFStore, + store>, SWXC1_FM<8>; + def SDXC164_P8 : SWXC1_FT<"sdxc1", FGR64RegsOpnd, GPR64Opnd, IIFStore, + store>, SWXC1_FM<9>; } // Load/store doubleword indexed unaligned. let Predicates = [NotMips64, HasStdEnc] in { - def LUXC1 : LWXC1_FT<"luxc1", AFGR64, CPURegs, IILoad>, LWXC1_FM<0x5>; - def SUXC1 : SWXC1_FT<"suxc1", AFGR64, CPURegs, IIStore>, SWXC1_FM<0xd>; + def LUXC1 : LWXC1_FT<"luxc1", AFGR64RegsOpnd, GPR32Opnd, IIFLoad>, + LWXC1_FM<0x5>; + def SUXC1 : SWXC1_FT<"suxc1", AFGR64RegsOpnd, GPR32Opnd, IIFStore>, + SWXC1_FM<0xd>; } let Predicates = [HasMips64, HasStdEnc], DecoderNamespace="Mips64" in { - def LUXC164 : LWXC1_FT<"luxc1", FGR64, CPURegs, IILoad>, LWXC1_FM<0x5>; - def SUXC164 : SWXC1_FT<"suxc1", FGR64, CPURegs, IIStore>, SWXC1_FM<0xd>; + def LUXC164 : LWXC1_FT<"luxc1", FGR64RegsOpnd, GPR32Opnd, IIFLoad>, + LWXC1_FM<0x5>; + def SUXC164 : SWXC1_FT<"suxc1", FGR64RegsOpnd, GPR32Opnd, IIFStore>, + SWXC1_FM<0xd>; } /// Floating-point Aritmetic -def FADD_S : ADDS_FT<"add.s", FGR32, IIFadd, 1, fadd>, ADDS_FM<0x00, 16>; -defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>; -def FDIV_S : ADDS_FT<"div.s", FGR32, IIFdivSingle, 0, fdiv>, ADDS_FM<0x03, 16>; -defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>; -def FMUL_S : ADDS_FT<"mul.s", FGR32, IIFmulSingle, 1, fmul>, ADDS_FM<0x02, 16>; -defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>; -def FSUB_S : ADDS_FT<"sub.s", FGR32, IIFadd, 0, fsub>, ADDS_FM<0x01, 16>; -defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>; +def FADD_S : ADDS_FT<"add.s", FGR32RegsOpnd, IIFadd, 1, fadd>, + ADDS_FM<0x00, 16>; +defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>; +def FDIV_S : ADDS_FT<"div.s", FGR32RegsOpnd, IIFdivSingle, 0, fdiv>, + ADDS_FM<0x03, 16>; +defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>; +def FMUL_S : ADDS_FT<"mul.s", FGR32RegsOpnd, IIFmulSingle, 1, fmul>, + ADDS_FM<0x02, 16>; +defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>; +def FSUB_S : ADDS_FT<"sub.s", FGR32RegsOpnd, IIFadd, 0, fsub>, + ADDS_FM<0x01, 16>; +defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>; let Predicates = [HasMips32r2, HasStdEnc] in { - def MADD_S : MADDS_FT<"madd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<4, 0>; - def MSUB_S : MADDS_FT<"msub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<5, 0>; + def MADD_S : MADDS_FT<"madd.s", FGR32RegsOpnd, IIFmulSingle, fadd>, + MADDS_FM<4, 0>; + def MSUB_S : MADDS_FT<"msub.s", FGR32RegsOpnd, IIFmulSingle, fsub>, + MADDS_FM<5, 0>; } let Predicates = [HasMips32r2, NoNaNsFPMath, HasStdEnc] in { - def NMADD_S : NMADDS_FT<"nmadd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<6, 0>; - def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<7, 0>; + def NMADD_S : NMADDS_FT<"nmadd.s", FGR32RegsOpnd, IIFmulSingle, fadd>, + MADDS_FM<6, 0>; + def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32RegsOpnd, IIFmulSingle, fsub>, + MADDS_FM<7, 0>; } let Predicates = [HasMips32r2, NotFP64bit, HasStdEnc] in { - def MADD_D32 : MADDS_FT<"madd.d", AFGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>; - def MSUB_D32 : MADDS_FT<"msub.d", AFGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>; + def MADD_D32 : MADDS_FT<"madd.d", AFGR64RegsOpnd, IIFmulDouble, fadd>, + MADDS_FM<4, 1>; + def MSUB_D32 : MADDS_FT<"msub.d", AFGR64RegsOpnd, IIFmulDouble, fsub>, + MADDS_FM<5, 1>; } let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath, HasStdEnc] in { - def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64, IIFmulDouble, fadd>, + def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64RegsOpnd, IIFmulDouble, fadd>, MADDS_FM<6, 1>; - def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64, IIFmulDouble, fsub>, + def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64RegsOpnd, IIFmulDouble, fsub>, MADDS_FM<7, 1>; } let Predicates = [HasMips32r2, IsFP64bit, HasStdEnc], isCodeGenOnly=1 in { - def MADD_D64 : MADDS_FT<"madd.d", FGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>; - def MSUB_D64 : MADDS_FT<"msub.d", FGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>; + def MADD_D64 : MADDS_FT<"madd.d", FGR64RegsOpnd, IIFmulDouble, fadd>, + MADDS_FM<4, 1>; + def MSUB_D64 : MADDS_FT<"msub.d", FGR64RegsOpnd, IIFmulDouble, fsub>, + MADDS_FM<5, 1>; } let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc], isCodeGenOnly=1 in { - def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64, IIFmulDouble, fadd>, + def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64RegsOpnd, IIFmulDouble, fadd>, MADDS_FM<6, 1>; - def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64, IIFmulDouble, fsub>, + def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64RegsOpnd, IIFmulDouble, fsub>, MADDS_FM<7, 1>; } @@ -426,10 +503,9 @@ let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc], def MIPS_BRANCH_F : PatLeaf<(i32 0)>; def MIPS_BRANCH_T : PatLeaf<(i32 1)>; -let DecoderMethod = "DecodeBC1" in { def BC1F : BC1F_FT<"bc1f", IIBranch, MIPS_BRANCH_F>, BC1F_FM<0, 0>; def BC1T : BC1F_FT<"bc1t", IIBranch, MIPS_BRANCH_T>, BC1F_FM<0, 1>; -} + //===----------------------------------------------------------------------===// // Floating Point Flag Conditions //===----------------------------------------------------------------------===// @@ -463,22 +539,29 @@ def FCMP_D64 : CEQS_FT<"d", FGR64, IIFcmp, MipsFPCmp>, CEQS_FM<17>, //===----------------------------------------------------------------------===// // Floating Point Pseudo-Instructions //===----------------------------------------------------------------------===// -def MOVCCRToCCR : PseudoSE<(outs CCR:$dst), (ins CCROpnd:$src), []>; // This pseudo instr gets expanded into 2 mtc1 instrs after register // allocation. def BuildPairF64 : - PseudoSE<(outs AFGR64:$dst), - (ins CPURegs:$lo, CPURegs:$hi), - [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>; + PseudoSE<(outs AFGR64RegsOpnd:$dst), + (ins GPR32Opnd:$lo, GPR32Opnd:$hi), + [(set AFGR64RegsOpnd:$dst, + (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))]>; // This pseudo instr gets expanded into 2 mfc1 instrs after register // allocation. // if n is 0, lower part of src is extracted. // if n is 1, higher part of src is extracted. def ExtractElementF64 : - PseudoSE<(outs CPURegs:$dst), (ins AFGR64:$src, i32imm:$n), - [(set CPURegs:$dst, (MipsExtractElementF64 AFGR64:$src, imm:$n))]>; + PseudoSE<(outs GPR32Opnd:$dst), (ins AFGR64RegsOpnd:$src, i32imm:$n), + [(set GPR32Opnd:$dst, + (MipsExtractElementF64 AFGR64RegsOpnd:$src, imm:$n))]>; + +//===----------------------------------------------------------------------===// +// InstAliases. +//===----------------------------------------------------------------------===// +def : InstAlias<"bc1t $offset", (BC1T FCC0, brtarget:$offset)>; +def : InstAlias<"bc1f $offset", (BC1F FCC0, brtarget:$offset)>; //===----------------------------------------------------------------------===// // Floating Point Patterns @@ -486,34 +569,44 @@ def ExtractElementF64 : def : MipsPat<(f32 fpimm0), (MTC1 ZERO)>; def : MipsPat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>; -def : MipsPat<(f32 (sint_to_fp CPURegs:$src)), (PseudoCVT_S_W CPURegs:$src)>; -def : MipsPat<(MipsTruncIntFP FGR32:$src), (TRUNC_W_S FGR32:$src)>; +def : MipsPat<(f32 (sint_to_fp GPR32Opnd:$src)), + (PseudoCVT_S_W GPR32Opnd:$src)>; +def : MipsPat<(MipsTruncIntFP FGR32RegsOpnd:$src), + (TRUNC_W_S FGR32RegsOpnd:$src)>; let Predicates = [NotFP64bit, HasStdEnc] in { - def : MipsPat<(f64 (sint_to_fp CPURegs:$src)), - (PseudoCVT_D32_W CPURegs:$src)>; - def : MipsPat<(MipsTruncIntFP AFGR64:$src), (TRUNC_W_D32 AFGR64:$src)>; - def : MipsPat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>; - def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>; + def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)), + (PseudoCVT_D32_W GPR32Opnd:$src)>; + def : MipsPat<(MipsTruncIntFP AFGR64RegsOpnd:$src), + (TRUNC_W_D32 AFGR64RegsOpnd:$src)>; + def : MipsPat<(f32 (fround AFGR64RegsOpnd:$src)), + (CVT_S_D32 AFGR64RegsOpnd:$src)>; + def : MipsPat<(f64 (fextend FGR32RegsOpnd:$src)), + (CVT_D32_S FGR32RegsOpnd:$src)>; } let Predicates = [IsFP64bit, HasStdEnc] in { def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>; def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>; - def : MipsPat<(f64 (sint_to_fp CPURegs:$src)), - (PseudoCVT_D64_W CPURegs:$src)>; - def : MipsPat<(f32 (sint_to_fp CPU64Regs:$src)), - (EXTRACT_SUBREG (PseudoCVT_S_L CPU64Regs:$src), sub_32)>; - def : MipsPat<(f64 (sint_to_fp CPU64Regs:$src)), - (PseudoCVT_D64_L CPU64Regs:$src)>; - - def : MipsPat<(MipsTruncIntFP FGR64:$src), (TRUNC_W_D64 FGR64:$src)>; - def : MipsPat<(MipsTruncIntFP FGR32:$src), (TRUNC_L_S FGR32:$src)>; - def : MipsPat<(MipsTruncIntFP FGR64:$src), (TRUNC_L_D64 FGR64:$src)>; - - def : MipsPat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>; - def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>; + def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)), + (PseudoCVT_D64_W GPR32Opnd:$src)>; + def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)), + (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_32)>; + def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)), + (PseudoCVT_D64_L GPR64Opnd:$src)>; + + def : MipsPat<(MipsTruncIntFP FGR64RegsOpnd:$src), + (TRUNC_W_D64 FGR64RegsOpnd:$src)>; + def : MipsPat<(MipsTruncIntFP FGR32RegsOpnd:$src), + (TRUNC_L_S FGR32RegsOpnd:$src)>; + def : MipsPat<(MipsTruncIntFP FGR64RegsOpnd:$src), + (TRUNC_L_D64 FGR64RegsOpnd:$src)>; + + def : MipsPat<(f32 (fround FGR64RegsOpnd:$src)), + (CVT_S_D64 FGR64RegsOpnd:$src)>; + def : MipsPat<(f64 (fextend FGR32RegsOpnd:$src)), + (CVT_D64_S FGR32RegsOpnd:$src)>; } // Patterns for loads/stores with a reg+imm operand. diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td index 14cfcf9..1322784 100644 --- a/lib/Target/Mips/MipsInstrFormats.td +++ b/lib/Target/Mips/MipsInstrFormats.td @@ -400,17 +400,6 @@ class JALR_FM { let Inst{5-0} = 9; } -class BAL_FM { - bits<16> offset; - - bits<32> Inst; - - let Inst{31-26} = 1; - let Inst{25-21} = 0; - let Inst{20-16} = 0x11; - let Inst{15-0} = offset; -} - class BGEZAL_FM<bits<5> funct> { bits<5> rs; bits<16> offset; @@ -491,6 +480,47 @@ class TEQ_FM<bits<6> funct> { } //===----------------------------------------------------------------------===// +// System calls format <op|code_|funct> +//===----------------------------------------------------------------------===// + +class SYS_FM<bits<6> funct> +{ + bits<20> code_; + bits<32> Inst; + let Inst{31-26} = 0x0; + let Inst{25-6} = code_; + let Inst{5-0} = funct; +} + +//===----------------------------------------------------------------------===// +// Break instruction format <op|code_1|funct> +//===----------------------------------------------------------------------===// + +class BRK_FM<bits<6> funct> +{ + bits<10> code_1; + bits<10> code_2; + bits<32> Inst; + let Inst{31-26} = 0x0; + let Inst{25-16} = code_1; + let Inst{15-6} = code_2; + let Inst{5-0} = funct; +} + +//===----------------------------------------------------------------------===// +// Exception return format <Cop0|1|0|funct> +//===----------------------------------------------------------------------===// + +class ER_FM<bits<6> funct> +{ + bits<32> Inst; + let Inst{31-26} = 0x10; + let Inst{25} = 1; + let Inst{24-6} = 0; + let Inst{5-0} = funct; +} + +//===----------------------------------------------------------------------===// // // FLOATING POINT INSTRUCTION FORMATS // @@ -623,13 +653,14 @@ class SWXC1_FM<bits<6> funct> { } class BC1F_FM<bit nd, bit tf> { + bits<3> fcc; bits<16> offset; bits<32> Inst; let Inst{31-26} = 0x11; let Inst{25-21} = 0x8; - let Inst{20-18} = 0; // cc + let Inst{20-18} = fcc; let Inst{17} = nd; let Inst{16} = tf; let Inst{15-0} = offset; @@ -651,6 +682,10 @@ class CEQS_FM<bits<5> fmt> { let Inst{3-0} = cond; } +class C_COND_FM<bits<5> fmt, bits<4> c> : CEQS_FM<fmt> { + let cond = c; +} + class CMov_I_F_FM<bits<6> funct, bits<5> fmt> { bits<5> fd; bits<5> fs; @@ -669,12 +704,13 @@ class CMov_I_F_FM<bits<6> funct, bits<5> fmt> { class CMov_F_I_FM<bit tf> { bits<5> rd; bits<5> rs; + bits<3> fcc; bits<32> Inst; let Inst{31-26} = 0; let Inst{25-21} = rs; - let Inst{20-18} = 0; // cc + let Inst{20-18} = fcc; let Inst{17} = 0; let Inst{16} = tf; let Inst{15-11} = rd; @@ -685,12 +721,13 @@ class CMov_F_I_FM<bit tf> { class CMov_F_F_FM<bits<5> fmt, bit tf> { bits<5> fd; bits<5> fs; + bits<3> fcc; bits<32> Inst; let Inst{31-26} = 0x11; let Inst{25-21} = fmt; - let Inst{20-18} = 0; // cc + let Inst{20-18} = fcc; let Inst{17} = 0; let Inst{16} = tf; let Inst{15-11} = fs; diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index 3144dae..eae05a3 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -61,15 +61,6 @@ MachineMemOperand *MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI, MFI.getObjectSize(FI), Align); } -MachineInstr* -MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, - uint64_t Offset, const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(Mips::DBG_VALUE)) - .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - //===----------------------------------------------------------------------===// // Branch Analysis //===----------------------------------------------------------------------===// diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h index 0f075ec..b6480ef 100644 --- a/lib/Target/Mips/MipsInstrInfo.h +++ b/lib/Target/Mips/MipsInstrInfo.h @@ -67,11 +67,6 @@ public: bool AllowModify, SmallVectorImpl<MachineInstr*> &BranchInstrs) const; - virtual MachineInstr* emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; - /// Insert nop instruction when hazard condition is found virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const; diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index dc3e4be..b9e8895 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -250,6 +250,12 @@ def simm16 : Operand<i32> { def simm20 : Operand<i32> { } +def uimm20 : Operand<i32> { +} + +def uimm10 : Operand<i32> { +} + def simm16_64 : Operand<i64>; def shamt : Operand<i32>; @@ -266,7 +272,7 @@ def MipsMemAsmOperand : AsmOperandClass { // Address operand def mem : Operand<i32> { let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops CPURegs, simm16); + let MIOperandInfo = (ops GPR32, simm16); let EncoderMethod = "getMemEncoding"; let ParserMatchClass = MipsMemAsmOperand; let OperandType = "OPERAND_MEMORY"; @@ -274,7 +280,7 @@ def mem : Operand<i32> { def mem64 : Operand<i64> { let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops CPU64Regs, simm16_64); + let MIOperandInfo = (ops GPR64, simm16_64); let EncoderMethod = "getMemEncoding"; let ParserMatchClass = MipsMemAsmOperand; let OperandType = "OPERAND_MEMORY"; @@ -282,14 +288,14 @@ def mem64 : Operand<i64> { def mem_ea : Operand<i32> { let PrintMethod = "printMemOperandEA"; - let MIOperandInfo = (ops CPURegs, simm16); + let MIOperandInfo = (ops GPR32, simm16); let EncoderMethod = "getMemEncoding"; let OperandType = "OPERAND_MEMORY"; } def mem_ea_64 : Operand<i64> { let PrintMethod = "printMemOperandEA"; - let MIOperandInfo = (ops CPU64Regs, simm16_64); + let MIOperandInfo = (ops GPR64, simm16_64); let EncoderMethod = "getMemEncoding"; let OperandType = "OPERAND_MEMORY"; } @@ -384,51 +390,52 @@ class ArithLogicR<string opstr, RegisterOperand RO, bit isComm = 0, // Arithmetic and logical instructions with 2 register operands. class ArithLogicI<string opstr, Operand Od, RegisterOperand RO, + InstrItinClass Itin = NoItinerary, SDPatternOperator imm_type = null_frag, SDPatternOperator OpNode = null_frag> : InstSE<(outs RO:$rt), (ins RO:$rs, Od:$imm16), !strconcat(opstr, "\t$rt, $rs, $imm16"), [(set RO:$rt, (OpNode RO:$rs, imm_type:$imm16))], - IIAlu, FrmI, opstr> { + Itin, FrmI, opstr> { let isReMaterializable = 1; let TwoOperandAliasConstraint = "$rs = $rt"; } // Arithmetic Multiply ADD/SUB class MArithR<string opstr, bit isComm = 0> : - InstSE<(outs), (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt), - !strconcat(opstr, "\t$rs, $rt"), [], IIImul, FrmR> { + InstSE<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt), + !strconcat(opstr, "\t$rs, $rt"), [], IIImult, FrmR> { let Defs = [HI, LO]; let Uses = [HI, LO]; let isCommutable = isComm; } // Logical -class LogicNOR<string opstr, RegisterOperand RC>: - InstSE<(outs RC:$rd), (ins RC:$rs, RC:$rt), +class LogicNOR<string opstr, RegisterOperand RO>: + InstSE<(outs RO:$rd), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu, FrmR, opstr> { + [(set RO:$rd, (not (or RO:$rs, RO:$rt)))], IIArith, FrmR, opstr> { let isCommutable = 1; } // Shifts class shift_rotate_imm<string opstr, Operand ImmOpnd, - RegisterOperand RC, SDPatternOperator OpNode = null_frag, + RegisterOperand RO, SDPatternOperator OpNode = null_frag, SDPatternOperator PF = null_frag> : - InstSE<(outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt), + InstSE<(outs RO:$rd), (ins RO:$rt, ImmOpnd:$shamt), !strconcat(opstr, "\t$rd, $rt, $shamt"), - [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu, FrmR, opstr>; + [(set RO:$rd, (OpNode RO:$rt, PF:$shamt))], IIArith, FrmR, opstr>; -class shift_rotate_reg<string opstr, RegisterOperand RC, +class shift_rotate_reg<string opstr, RegisterOperand RO, SDPatternOperator OpNode = null_frag>: - InstSE<(outs RC:$rd), (ins CPURegsOpnd:$rs, RC:$rt), + InstSE<(outs RO:$rd), (ins RO:$rt, GPR32Opnd:$rs), !strconcat(opstr, "\t$rd, $rt, $rs"), - [(set RC:$rd, (OpNode RC:$rt, CPURegsOpnd:$rs))], IIAlu, FrmR, opstr>; + [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], IIArith, FrmR, opstr>; // Load Upper Imediate -class LoadUpper<string opstr, RegisterClass RC, Operand Imm>: - InstSE<(outs RC:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"), - [], IIAlu, FrmI>, IsAsCheapAsAMove { +class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>: + InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"), + [], IIArith, FrmI>, IsAsCheapAsAMove { let neverHasSideEffects = 1; let isReMaterializable = 1; } @@ -442,43 +449,47 @@ class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, } // Memory Load/Store -class Load<string opstr, SDPatternOperator OpNode, RegisterClass RC, - Operand MemOpnd, ComplexPattern Addr, string ofsuffix> : - InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(set RC:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI, +class Load<string opstr, SDPatternOperator OpNode, DAGOperand RO, + InstrItinClass Itin, Operand MemOpnd, ComplexPattern Addr, + string ofsuffix> : + InstSE<(outs RO:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(set RO:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI, !strconcat(opstr, ofsuffix)> { let DecoderMethod = "DecodeMem"; let canFoldAsLoad = 1; let mayLoad = 1; } -class Store<string opstr, SDPatternOperator OpNode, RegisterClass RC, - Operand MemOpnd, ComplexPattern Addr, string ofsuffix> : - InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(OpNode RC:$rt, Addr:$addr)], NoItinerary, FrmI, +class Store<string opstr, SDPatternOperator OpNode, DAGOperand RO, + InstrItinClass Itin, Operand MemOpnd, ComplexPattern Addr, + string ofsuffix> : + InstSE<(outs), (ins RO:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(OpNode RO:$rt, Addr:$addr)], NoItinerary, FrmI, !strconcat(opstr, ofsuffix)> { let DecoderMethod = "DecodeMem"; let mayStore = 1; } -multiclass LoadM<string opstr, RegisterClass RC, +multiclass LoadM<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag, + InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> { - def NAME : Load<opstr, OpNode, RC, mem, Addr, "">, + def NAME : Load<opstr, OpNode, RO, Itin, mem, Addr, "">, Requires<[NotN64, HasStdEnc]>; - def _P8 : Load<opstr, OpNode, RC, mem64, Addr, "_p8">, + def _P8 : Load<opstr, OpNode, RO, Itin, mem64, Addr, "_p8">, Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; } } -multiclass StoreM<string opstr, RegisterClass RC, +multiclass StoreM<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag, + InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> { - def NAME : Store<opstr, OpNode, RC, mem, Addr, "">, + def NAME : Store<opstr, OpNode, RO, Itin, mem, Addr, "">, Requires<[NotN64, HasStdEnc]>; - def _P8 : Store<opstr, OpNode, RC, mem64, Addr, "_p8">, + def _P8 : Store<opstr, OpNode, RO, Itin, mem64, Addr, "_p8">, Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; @@ -487,36 +498,36 @@ multiclass StoreM<string opstr, RegisterClass RC, // Load/Store Left/Right let canFoldAsLoad = 1 in -class LoadLeftRight<string opstr, SDNode OpNode, RegisterClass RC, +class LoadLeftRight<string opstr, SDNode OpNode, RegisterOperand RO, Operand MemOpnd> : - InstSE<(outs RC:$rt), (ins MemOpnd:$addr, RC:$src), + InstSE<(outs RO:$rt), (ins MemOpnd:$addr, RO:$src), !strconcat(opstr, "\t$rt, $addr"), - [(set RC:$rt, (OpNode addr:$addr, RC:$src))], NoItinerary, FrmI> { + [(set RO:$rt, (OpNode addr:$addr, RO:$src))], NoItinerary, FrmI> { let DecoderMethod = "DecodeMem"; string Constraints = "$src = $rt"; } -class StoreLeftRight<string opstr, SDNode OpNode, RegisterClass RC, +class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO, Operand MemOpnd>: - InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(OpNode RC:$rt, addr:$addr)], NoItinerary, FrmI> { + InstSE<(outs), (ins RO:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(OpNode RO:$rt, addr:$addr)], NoItinerary, FrmI> { let DecoderMethod = "DecodeMem"; } -multiclass LoadLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> { - def NAME : LoadLeftRight<opstr, OpNode, RC, mem>, +multiclass LoadLeftRightM<string opstr, SDNode OpNode, RegisterOperand RO> { + def NAME : LoadLeftRight<opstr, OpNode, RO, mem>, Requires<[NotN64, HasStdEnc]>; - def _P8 : LoadLeftRight<opstr, OpNode, RC, mem64>, + def _P8 : LoadLeftRight<opstr, OpNode, RO, mem64>, Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; } } -multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> { - def NAME : StoreLeftRight<opstr, OpNode, RC, mem>, +multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterOperand RO> { + def NAME : StoreLeftRight<opstr, OpNode, RO, mem>, Requires<[NotN64, HasStdEnc]>; - def _P8 : StoreLeftRight<opstr, OpNode, RC, mem64>, + def _P8 : StoreLeftRight<opstr, OpNode, RO, mem64>, Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; @@ -524,10 +535,10 @@ multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> { } // Conditional Branch -class CBranch<string opstr, PatFrag cond_op, RegisterOperand RC> : - InstSE<(outs), (ins RC:$rs, RC:$rt, brtarget:$offset), +class CBranch<string opstr, PatFrag cond_op, RegisterOperand RO> : + InstSE<(outs), (ins RO:$rs, RO:$rt, brtarget:$offset), !strconcat(opstr, "\t$rs, $rt, $offset"), - [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$offset)], IIBranch, + [(brcond (i32 (cond_op RO:$rs, RO:$rt)), bb:$offset)], IIBranch, FrmI> { let isBranch = 1; let isTerminator = 1; @@ -535,10 +546,10 @@ class CBranch<string opstr, PatFrag cond_op, RegisterOperand RC> : let Defs = [AT]; } -class CBranchZero<string opstr, PatFrag cond_op, RegisterOperand RC> : - InstSE<(outs), (ins RC:$rs, brtarget:$offset), +class CBranchZero<string opstr, PatFrag cond_op, RegisterOperand RO> : + InstSE<(outs), (ins RO:$rs, brtarget:$offset), !strconcat(opstr, "\t$rs, $offset"), - [(brcond (i32 (cond_op RC:$rs, 0)), bb:$offset)], IIBranch, FrmI> { + [(brcond (i32 (cond_op RO:$rs, 0)), bb:$offset)], IIBranch, FrmI> { let isBranch = 1; let isTerminator = 1; let hasDelaySlot = 1; @@ -546,18 +557,18 @@ class CBranchZero<string opstr, PatFrag cond_op, RegisterOperand RC> : } // SetCC -class SetCC_R<string opstr, PatFrag cond_op, RegisterClass RC> : - InstSE<(outs CPURegsOpnd:$rd), (ins RC:$rs, RC:$rt), +class SetCC_R<string opstr, PatFrag cond_op, RegisterOperand RO> : + InstSE<(outs GPR32Opnd:$rd), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set CPURegsOpnd:$rd, (cond_op RC:$rs, RC:$rt))], - IIAlu, FrmR, opstr>; + [(set GPR32Opnd:$rd, (cond_op RO:$rs, RO:$rt))], + IIslt, FrmR, opstr>; class SetCC_I<string opstr, PatFrag cond_op, Operand Od, PatLeaf imm_type, - RegisterClass RC>: - InstSE<(outs CPURegsOpnd:$rt), (ins RC:$rs, Od:$imm16), + RegisterOperand RO>: + InstSE<(outs GPR32Opnd:$rt), (ins RO:$rs, Od:$imm16), !strconcat(opstr, "\t$rt, $rs, $imm16"), - [(set CPURegsOpnd:$rt, (cond_op RC:$rs, imm_type:$imm16))], - IIAlu, FrmI, opstr>; + [(set GPR32Opnd:$rt, (cond_op RO:$rs, imm_type:$imm16))], + IIslt, FrmI, opstr>; // Jump class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator, @@ -585,17 +596,17 @@ class UncondBranch<string opstr> : // Base class for indirect branch and return instruction classes. let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in -class JumpFR<RegisterClass RC, SDPatternOperator operator = null_frag>: - InstSE<(outs), (ins RC:$rs), "jr\t$rs", [(operator RC:$rs)], IIBranch, FrmR>; +class JumpFR<RegisterOperand RO, SDPatternOperator operator = null_frag>: + InstSE<(outs), (ins RO:$rs), "jr\t$rs", [(operator RO:$rs)], IIBranch, FrmR>; // Indirect branch -class IndirectBranch<RegisterClass RC>: JumpFR<RC, brind> { +class IndirectBranch<RegisterOperand RO>: JumpFR<RO, brind> { let isBranch = 1; let isIndirectBranch = 1; } // Return instruction -class RetBase<RegisterClass RC>: JumpFR<RC> { +class RetBase<RegisterOperand RO>: JumpFR<RO> { let isReturn = 1; let isCodeGenOnly = 1; let hasCtrlDep = 1; @@ -610,13 +621,13 @@ let isCall=1, hasDelaySlot=1, Defs = [RA] in { let DecoderMethod = "DecodeJumpTarget"; } - class JumpLinkRegPseudo<RegisterClass RC, Instruction JALRInst, - Register RetReg>: - PseudoSE<(outs), (ins RC:$rs), [(MipsJmpLink RC:$rs)], IIBranch>, - PseudoInstExpansion<(JALRInst RetReg, RC:$rs)>; + class JumpLinkRegPseudo<RegisterOperand RO, Instruction JALRInst, + Register RetReg, RegisterOperand ResRO = RO>: + PseudoSE<(outs), (ins RO:$rs), [(MipsJmpLink RO:$rs)], IIBranch>, + PseudoInstExpansion<(JALRInst RetReg, ResRO:$rs)>; - class JumpLinkReg<string opstr, RegisterClass RC>: - InstSE<(outs RC:$rd), (ins RC:$rs), !strconcat(opstr, "\t$rd, $rs"), + class JumpLinkReg<string opstr, RegisterOperand RO>: + InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), [], IIBranch, FrmR>; class BGEZAL_FT<string opstr, RegisterOperand RO> : @@ -625,8 +636,9 @@ let isCall=1, hasDelaySlot=1, Defs = [RA] in { } -class BAL_FT : - InstSE<(outs), (ins brtarget:$offset), "bal\t$offset", [], IIBranch, FrmI> { +class BAL_BR_Pseudo<Instruction RealInst> : + PseudoSE<(outs), (ins brtarget:$offset), [], IIBranch>, + PseudoInstExpansion<(RealInst ZERO, brtarget:$offset)> { let isBranch = 1; let isTerminator = 1; let isBarrier = 1; @@ -634,6 +646,20 @@ class BAL_FT : let Defs = [RA]; } +// Syscall +class SYS_FT<string opstr> : + InstSE<(outs), (ins uimm20:$code_), + !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI>; +// Break +class BRK_FT<string opstr> : + InstSE<(outs), (ins uimm10:$code_1, uimm10:$code_2), + !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary, FrmOther>; + +// (D)Eret +class ER_FT<string opstr> : + InstSE<(outs), (ins), + opstr, [], NoItinerary, FrmOther>; + // Sync let hasSideEffects = 1 in class SYNC_FT : @@ -673,11 +699,11 @@ class MultDivPseudo<Instruction RealInst, RegisterClass R0, RegisterOperand R1, // operands. class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode> : PseudoSE<(outs ACRegs:$ac), - (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin), + (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACRegs:$acin), [(set ACRegs:$ac, - (OpNode CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin))], - IIImul>, - PseudoInstExpansion<(RealInst CPURegsOpnd:$rs, CPURegsOpnd:$rt)> { + (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACRegs:$acin))], + IIImult>, + PseudoInstExpansion<(RealInst GPR32Opnd:$rs, GPR32Opnd:$rt)> { string Constraints = "$acin = $ac"; } @@ -689,21 +715,21 @@ class Div<string opstr, InstrItinClass itin, RegisterOperand RO, } // Move from Hi/Lo -class MoveFromLOHI<string opstr, RegisterClass RC, list<Register> UseRegs>: - InstSE<(outs RC:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR> { +class MoveFromLOHI<string opstr, RegisterOperand RO, list<Register> UseRegs>: + InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR> { let Uses = UseRegs; let neverHasSideEffects = 1; } -class MoveToLOHI<string opstr, RegisterClass RC, list<Register> DefRegs>: - InstSE<(outs), (ins RC:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo, FrmR> { +class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>: + InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo, FrmR> { let Defs = DefRegs; let neverHasSideEffects = 1; } -class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> : - InstSE<(outs RC:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(set RC:$rt, addr:$addr)], NoItinerary, FrmI> { +class EffectiveAddress<string opstr, RegisterOperand RO, Operand Mem> : + InstSE<(outs RO:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(set RO:$rt, addr:$addr)], NoItinerary, FrmI> { let isCodeGenOnly = 1; let DecoderMethod = "DecodeMem"; } @@ -711,19 +737,19 @@ class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> : // Count Leading Ones/Zeros in Word class CountLeading0<string opstr, RegisterOperand RO>: InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), - [(set RO:$rd, (ctlz RO:$rs))], IIAlu, FrmR>, + [(set RO:$rd, (ctlz RO:$rs))], IIArith, FrmR>, Requires<[HasBitCount, HasStdEnc]>; class CountLeading1<string opstr, RegisterOperand RO>: InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), - [(set RO:$rd, (ctlz (not RO:$rs)))], IIAlu, FrmR>, + [(set RO:$rd, (ctlz (not RO:$rs)))], IIArith, FrmR>, Requires<[HasBitCount, HasStdEnc]>; // Sign Extend in Register. -class SignExtInReg<string opstr, ValueType vt, RegisterClass RC> : - InstSE<(outs RC:$rd), (ins RC:$rt), !strconcat(opstr, "\t$rd, $rt"), - [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary, FrmR> { +class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO> : + InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), + [(set RO:$rd, (sext_inreg RO:$rt, vt))], IIseb, FrmR> { let Predicates = [HasSEInReg, HasStdEnc]; } @@ -736,9 +762,9 @@ class SubwordSwap<string opstr, RegisterOperand RO>: } // Read Hardware -class ReadHardware<RegisterClass CPURegClass, RegisterOperand RO> : - InstSE<(outs CPURegClass:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [], - IIAlu, FrmR>; +class ReadHardware<RegisterOperand CPURegOperand, RegisterOperand RO> : + InstSE<(outs CPURegOperand:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [], + IIArith, FrmR>; // Ext and Ins class ExtBase<string opstr, RegisterOperand RO>: @@ -764,11 +790,8 @@ class Atomic2Ops<PatFrag Op, RegisterClass DRC, RegisterClass PRC> : [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>; multiclass Atomic2Ops32<PatFrag Op> { - def NAME : Atomic2Ops<Op, CPURegs, CPURegs>, Requires<[NotN64, HasStdEnc]>; - def _P8 : Atomic2Ops<Op, CPURegs, CPU64Regs>, - Requires<[IsN64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; - } + def NAME : Atomic2Ops<Op, GPR32, GPR32>, Requires<[NotN64, HasStdEnc]>; + def _P8 : Atomic2Ops<Op, GPR32, GPR64>, Requires<[IsN64, HasStdEnc]>; } // Atomic Compare & Swap. @@ -777,12 +800,10 @@ class AtomicCmpSwap<PatFrag Op, RegisterClass DRC, RegisterClass PRC> : [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>; multiclass AtomicCmpSwap32<PatFrag Op> { - def NAME : AtomicCmpSwap<Op, CPURegs, CPURegs>, + def NAME : AtomicCmpSwap<Op, GPR32, GPR32>, Requires<[NotN64, HasStdEnc]>; - def _P8 : AtomicCmpSwap<Op, CPURegs, CPU64Regs>, - Requires<[IsN64, HasStdEnc]> { - let DecoderNamespace = "Mips64"; - } + def _P8 : AtomicCmpSwap<Op, GPR32, GPR64>, + Requires<[IsN64, HasStdEnc]>; } class LLBase<string opstr, RegisterOperand RO, Operand Mem> : @@ -803,6 +824,11 @@ class SCBase<string opstr, RegisterOperand RO, Operand Mem> : class MFC3OP<dag outs, dag ins, string asmstr> : InstSE<outs, ins, asmstr, [], NoItinerary, FrmFR>; +let isBarrier = 1, isTerminator = 1, isCodeGenOnly = 1 in +def TRAP : InstSE<(outs), (ins), "break", [(trap)], NoItinerary, FrmOther> { + let Inst = 0x0000000d; +} + //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// @@ -848,9 +874,9 @@ let usesCustomInserter = 1 in { } /// Pseudo instructions for loading and storing accumulator registers. -let isPseudo = 1 in { - defm LOAD_AC64 : LoadM<"load_ac64", ACRegs>; - defm STORE_AC64 : StoreM<"store_ac64", ACRegs>; +let isPseudo = 1, isCodeGenOnly = 1 in { + defm LOAD_AC64 : LoadM<"", ACRegs>; + defm STORE_AC64 : StoreM<"", ACRegs>; } //===----------------------------------------------------------------------===// @@ -861,114 +887,125 @@ let isPseudo = 1 in { //===----------------------------------------------------------------------===// /// Arithmetic Instructions (ALU Immediate) -def ADDiu : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd, immSExt16, add>, +def ADDiu : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd, IIArith, immSExt16, + add>, ADDI_FM<0x9>, IsAsCheapAsAMove; -def ADDi : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, ADDI_FM<0x8>; -def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>, +def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>; +def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>, SLTI_FM<0xa>; -def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>, +def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>, SLTI_FM<0xb>; -def ANDi : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>, +def ANDi : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd, IILogic, immZExt16, + and>, ADDI_FM<0xc>; -def ORi : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>, +def ORi : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, IILogic, immZExt16, + or>, ADDI_FM<0xd>; -def XORi : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>, +def XORi : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, IILogic, immZExt16, + xor>, ADDI_FM<0xe>; -def LUi : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM; +def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM; /// Arithmetic Instructions (3-Operand, R-Type) -def ADDu : MMRel, ArithLogicR<"addu", CPURegsOpnd, 1, IIAlu, add>, +def ADDu : MMRel, ArithLogicR<"addu", GPR32Opnd, 1, IIArith, add>, ADD_FM<0, 0x21>; -def SUBu : MMRel, ArithLogicR<"subu", CPURegsOpnd, 0, IIAlu, sub>, +def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, IIArith, sub>, ADD_FM<0, 0x23>; -def MUL : MMRel, ArithLogicR<"mul", CPURegsOpnd, 1, IIImul, mul>, +def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, IIImul, mul>, ADD_FM<0x1c, 2>; -def ADD : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM<0, 0x20>; -def SUB : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM<0, 0x22>; -def SLT : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM<0, 0x2a>; -def SLTu : MMRel, SetCC_R<"sltu", setult, CPURegs>, ADD_FM<0, 0x2b>; -def AND : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>, +def ADD : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>; +def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>; +def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>; +def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>; +def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IILogic, and>, ADD_FM<0, 0x24>; -def OR : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>, +def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IILogic, or>, ADD_FM<0, 0x25>; -def XOR : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>, +def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IILogic, xor>, ADD_FM<0, 0x26>; -def NOR : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM<0, 0x27>; +def NOR : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>; /// Shift Instructions -def SLL : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd, shl, immZExt5>, +def SLL : MMRel, shift_rotate_imm<"sll", shamt, GPR32Opnd, shl, immZExt5>, SRA_FM<0, 0>; -def SRL : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd, srl, immZExt5>, +def SRL : MMRel, shift_rotate_imm<"srl", shamt, GPR32Opnd, srl, immZExt5>, SRA_FM<2, 0>; -def SRA : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd, sra, immZExt5>, +def SRA : MMRel, shift_rotate_imm<"sra", shamt, GPR32Opnd, sra, immZExt5>, SRA_FM<3, 0>; -def SLLV : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd, shl>, SRLV_FM<4, 0>; -def SRLV : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd, srl>, SRLV_FM<6, 0>; -def SRAV : MMRel, shift_rotate_reg<"srav", CPURegsOpnd, sra>, SRLV_FM<7, 0>; +def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, shl>, SRLV_FM<4, 0>; +def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, srl>, SRLV_FM<6, 0>; +def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, sra>, SRLV_FM<7, 0>; // Rotate Instructions let Predicates = [HasMips32r2, HasStdEnc] in { - def ROTR : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd, rotr, + def ROTR : MMRel, shift_rotate_imm<"rotr", shamt, GPR32Opnd, rotr, immZExt5>, SRA_FM<2, 1>; - def ROTRV : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd, rotr>, + def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, rotr>, SRLV_FM<6, 1>; } /// Load and Store Instructions /// aligned -defm LB : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM<0x20>; -defm LBu : LoadM<"lbu", CPURegs, zextloadi8, addrDefault>, MMRel, LW_FM<0x24>; -defm LH : LoadM<"lh", CPURegs, sextloadi16, addrDefault>, MMRel, LW_FM<0x21>; -defm LHu : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM<0x25>; -defm LW : LoadM<"lw", CPURegs, load, addrDefault>, MMRel, LW_FM<0x23>; -defm SB : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM<0x28>; -defm SH : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM<0x29>; -defm SW : StoreM<"sw", CPURegs, store>, MMRel, LW_FM<0x2b>; +defm LB : LoadM<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>; +defm LBu : LoadM<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel, + LW_FM<0x24>; +defm LH : LoadM<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel, + LW_FM<0x21>; +defm LHu : LoadM<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>; +defm LW : LoadM<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel, LW_FM<0x23>; +defm SB : StoreM<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>; +defm SH : StoreM<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>; +defm SW : StoreM<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>; /// load/store left/right -defm LWL : LoadLeftRightM<"lwl", MipsLWL, CPURegs>, LW_FM<0x22>; -defm LWR : LoadLeftRightM<"lwr", MipsLWR, CPURegs>, LW_FM<0x26>; -defm SWL : StoreLeftRightM<"swl", MipsSWL, CPURegs>, LW_FM<0x2a>; -defm SWR : StoreLeftRightM<"swr", MipsSWR, CPURegs>, LW_FM<0x2e>; +defm LWL : LoadLeftRightM<"lwl", MipsLWL, GPR32Opnd>, LW_FM<0x22>; +defm LWR : LoadLeftRightM<"lwr", MipsLWR, GPR32Opnd>, LW_FM<0x26>; +defm SWL : StoreLeftRightM<"swl", MipsSWL, GPR32Opnd>, LW_FM<0x2a>; +defm SWR : StoreLeftRightM<"swr", MipsSWR, GPR32Opnd>, LW_FM<0x2e>; def SYNC : SYNC_FT, SYNC_FM; -def TEQ : TEQ_FT<"teq", CPURegsOpnd>, TEQ_FM<0x34>; +def TEQ : TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>; + +def BREAK : BRK_FT<"break">, BRK_FM<0xd>; +def SYSCALL : SYS_FT<"syscall">, SYS_FM<0xc>; + +def ERET : ER_FT<"eret">, ER_FM<0x18>; +def DERET : ER_FT<"deret">, ER_FM<0x1f>; /// Load-linked, Store-conditional let Predicates = [NotN64, HasStdEnc] in { - def LL : LLBase<"ll", CPURegsOpnd, mem>, LW_FM<0x30>; - def SC : SCBase<"sc", CPURegsOpnd, mem>, LW_FM<0x38>; + def LL : LLBase<"ll", GPR32Opnd, mem>, LW_FM<0x30>; + def SC : SCBase<"sc", GPR32Opnd, mem>, LW_FM<0x38>; } let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in { - def LL_P8 : LLBase<"ll", CPURegsOpnd, mem64>, LW_FM<0x30>; - def SC_P8 : SCBase<"sc", CPURegsOpnd, mem64>, LW_FM<0x38>; + def LL_P8 : LLBase<"ll", GPR32Opnd, mem64>, LW_FM<0x30>; + def SC_P8 : SCBase<"sc", GPR32Opnd, mem64>, LW_FM<0x38>; } /// Jump and Branch Instructions def J : JumpFJ<jmptarget, "j", br, bb>, FJ<2>, Requires<[RelocStatic, HasStdEnc]>, IsBranch; -def JR : IndirectBranch<CPURegs>, MTLO_FM<8>; +def JR : IndirectBranch<GPR32Opnd>, MTLO_FM<8>; def B : UncondBranch<"b">, B_FM; -def BEQ : CBranch<"beq", seteq, CPURegsOpnd>, BEQ_FM<4>; -def BNE : CBranch<"bne", setne, CPURegsOpnd>, BEQ_FM<5>; -def BGEZ : CBranchZero<"bgez", setge, CPURegsOpnd>, BGEZ_FM<1, 1>; -def BGTZ : CBranchZero<"bgtz", setgt, CPURegsOpnd>, BGEZ_FM<7, 0>; -def BLEZ : CBranchZero<"blez", setle, CPURegsOpnd>, BGEZ_FM<6, 0>; -def BLTZ : CBranchZero<"bltz", setlt, CPURegsOpnd>, BGEZ_FM<1, 0>; - -def BAL_BR: BAL_FT, BAL_FM; +def BEQ : CBranch<"beq", seteq, GPR32Opnd>, BEQ_FM<4>; +def BNE : CBranch<"bne", setne, GPR32Opnd>, BEQ_FM<5>; +def BGEZ : CBranchZero<"bgez", setge, GPR32Opnd>, BGEZ_FM<1, 1>; +def BGTZ : CBranchZero<"bgtz", setgt, GPR32Opnd>, BGEZ_FM<7, 0>; +def BLEZ : CBranchZero<"blez", setle, GPR32Opnd>, BGEZ_FM<6, 0>; +def BLTZ : CBranchZero<"bltz", setlt, GPR32Opnd>, BGEZ_FM<1, 0>; def JAL : JumpLink<"jal">, FJ<3>; -def JALR : JumpLinkReg<"jalr", CPURegs>, JALR_FM; -def JALRPseudo : JumpLinkRegPseudo<CPURegs, JALR, RA>; -def BGEZAL : BGEZAL_FT<"bgezal", CPURegsOpnd>, BGEZAL_FM<0x11>; -def BLTZAL : BGEZAL_FT<"bltzal", CPURegsOpnd>, BGEZAL_FM<0x10>; +def JALR : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM; +def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>; +def BGEZAL : BGEZAL_FT<"bgezal", GPR32Opnd>, BGEZAL_FM<0x11>; +def BLTZAL : BGEZAL_FT<"bltzal", GPR32Opnd>, BGEZAL_FM<0x10>; +def BAL_BR : BAL_BR_Pseudo<BGEZAL>; def TAILCALL : JumpFJ<calltarget, "j", MipsTailCall, imm>, FJ<2>, IsTailCall; -def TAILCALL_R : JumpFR<CPURegs, MipsTailCall>, MTLO_FM<8>, IsTailCall; +def TAILCALL_R : JumpFR<GPR32Opnd, MipsTailCall>, MTLO_FM<8>, IsTailCall; -def RET : RetBase<CPURegs>, MTLO_FM<8>; +def RET : RetBase<GPR32Opnd>, MTLO_FM<8>; // Exception handling related node and instructions. // The conversion sequence is: @@ -984,42 +1021,42 @@ def MIPSehret : SDNode<"MipsISD::EH_RETURN", SDT_MipsEHRET, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in { - def MIPSeh_return32 : MipsPseudo<(outs), (ins CPURegs:$spoff, CPURegs:$dst), - [(MIPSehret CPURegs:$spoff, CPURegs:$dst)]>; - def MIPSeh_return64 : MipsPseudo<(outs), (ins CPU64Regs:$spoff, - CPU64Regs:$dst), - [(MIPSehret CPU64Regs:$spoff, CPU64Regs:$dst)]>; + def MIPSeh_return32 : MipsPseudo<(outs), (ins GPR32:$spoff, GPR32:$dst), + [(MIPSehret GPR32:$spoff, GPR32:$dst)]>; + def MIPSeh_return64 : MipsPseudo<(outs), (ins GPR64:$spoff, + GPR64:$dst), + [(MIPSehret GPR64:$spoff, GPR64:$dst)]>; } /// Multiply and Divide Instructions. -def MULT : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>, +def MULT : MMRel, Mult<"mult", IIImult, GPR32Opnd, [HI, LO]>, MULT_FM<0, 0x18>; -def MULTu : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>, +def MULTu : MMRel, Mult<"multu", IIImult, GPR32Opnd, [HI, LO]>, MULT_FM<0, 0x19>; -def PseudoMULT : MultDivPseudo<MULT, ACRegs, CPURegsOpnd, MipsMult, IIImul>; -def PseudoMULTu : MultDivPseudo<MULTu, ACRegs, CPURegsOpnd, MipsMultu, IIImul>; -def SDIV : Div<"div", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1a>; -def UDIV : Div<"divu", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1b>; -def PseudoSDIV : MultDivPseudo<SDIV, ACRegs, CPURegsOpnd, MipsDivRem, IIIdiv, +def PseudoMULT : MultDivPseudo<MULT, ACRegs, GPR32Opnd, MipsMult, IIImult>; +def PseudoMULTu : MultDivPseudo<MULTu, ACRegs, GPR32Opnd, MipsMultu, IIImult>; +def SDIV : Div<"div", IIIdiv, GPR32Opnd, [HI, LO]>, MULT_FM<0, 0x1a>; +def UDIV : Div<"divu", IIIdiv, GPR32Opnd, [HI, LO]>, MULT_FM<0, 0x1b>; +def PseudoSDIV : MultDivPseudo<SDIV, ACRegs, GPR32Opnd, MipsDivRem, IIIdiv, 0, 1, 1>; -def PseudoUDIV : MultDivPseudo<UDIV, ACRegs, CPURegsOpnd, MipsDivRemU, IIIdiv, +def PseudoUDIV : MultDivPseudo<UDIV, ACRegs, GPR32Opnd, MipsDivRemU, IIIdiv, 0, 1, 1>; -def MTHI : MoveToLOHI<"mthi", CPURegs, [HI]>, MTLO_FM<0x11>; -def MTLO : MoveToLOHI<"mtlo", CPURegs, [LO]>, MTLO_FM<0x13>; -def MFHI : MoveFromLOHI<"mfhi", CPURegs, [HI]>, MFLO_FM<0x10>; -def MFLO : MoveFromLOHI<"mflo", CPURegs, [LO]>, MFLO_FM<0x12>; +def MTHI : MoveToLOHI<"mthi", GPR32Opnd, [HI]>, MTLO_FM<0x11>; +def MTLO : MoveToLOHI<"mtlo", GPR32Opnd, [LO]>, MTLO_FM<0x13>; +def MFHI : MoveFromLOHI<"mfhi", GPR32Opnd, [HI]>, MFLO_FM<0x10>; +def MFLO : MoveFromLOHI<"mflo", GPR32Opnd, [LO]>, MFLO_FM<0x12>; /// Sign Ext In Register Instructions. -def SEB : SignExtInReg<"seb", i8, CPURegs>, SEB_FM<0x10, 0x20>; -def SEH : SignExtInReg<"seh", i16, CPURegs>, SEB_FM<0x18, 0x20>; +def SEB : SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM<0x10, 0x20>; +def SEH : SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM<0x18, 0x20>; /// Count Leading -def CLZ : CountLeading0<"clz", CPURegsOpnd>, CLO_FM<0x20>; -def CLO : CountLeading1<"clo", CPURegsOpnd>, CLO_FM<0x21>; +def CLZ : CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>; +def CLO : CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>; /// Word Swap Bytes Within Halfwords -def WSBH : SubwordSwap<"wsbh", CPURegsOpnd>, SEB_FM<2, 0x20>; +def WSBH : SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM<2, 0x20>; /// No operation. def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>; @@ -1028,7 +1065,7 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>; // instructions. The same not happens for stack address copies, so an // add op with mem ComplexPattern is used and the stack address copy // can be matched. It's similar to Sparc LEA_ADDRi -def LEA_ADDiu : EffectiveAddress<"addiu", CPURegs, mem_ea>, LW_FM<9>; +def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd, mem_ea>, LW_FM<9>; // MADD*/MSUB* def MADD : MArithR<"madd", 1>, MULT_FM<0x1c, 0>; @@ -1040,79 +1077,74 @@ def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu>; def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub>; def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu>; -def RDHWR : ReadHardware<CPURegs, HWRegsOpnd>, RDHWR_FM; +def RDHWR : ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM; -def EXT : ExtBase<"ext", CPURegsOpnd>, EXT_FM<0>; -def INS : InsBase<"ins", CPURegsOpnd>, EXT_FM<4>; +def EXT : ExtBase<"ext", GPR32Opnd>, EXT_FM<0>; +def INS : InsBase<"ins", GPR32Opnd>, EXT_FM<4>; /// Move Control Registers From/To CPU Registers -def MFC0_3OP : MFC3OP<(outs CPURegsOpnd:$rt), - (ins CPURegsOpnd:$rd, uimm16:$sel), +def MFC0_3OP : MFC3OP<(outs GPR32Opnd:$rt), + (ins GPR32Opnd:$rd, uimm16:$sel), "mfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 0>; -def MTC0_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel), - (ins CPURegsOpnd:$rt), +def MTC0_3OP : MFC3OP<(outs GPR32Opnd:$rd, uimm16:$sel), + (ins GPR32Opnd:$rt), "mtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 4>; -def MFC2_3OP : MFC3OP<(outs CPURegsOpnd:$rt), - (ins CPURegsOpnd:$rd, uimm16:$sel), +def MFC2_3OP : MFC3OP<(outs GPR32Opnd:$rt), + (ins GPR32Opnd:$rd, uimm16:$sel), "mfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 0>; -def MTC2_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel), - (ins CPURegsOpnd:$rt), +def MTC2_3OP : MFC3OP<(outs GPR32Opnd:$rd, uimm16:$sel), + (ins GPR32Opnd:$rt), "mtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 4>; //===----------------------------------------------------------------------===// // Instruction aliases //===----------------------------------------------------------------------===// def : InstAlias<"move $dst, $src", - (ADDu CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>, - Requires<[NotMips64]>; -def : InstAlias<"move $dst, $src", - (OR CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>, + (ADDu GPR32Opnd:$dst, GPR32Opnd:$src,ZERO), 1>, Requires<[NotMips64]>; -def : InstAlias<"bal $offset", (BGEZAL RA, brtarget:$offset), 1>; +def : InstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>; def : InstAlias<"addu $rs, $rt, $imm", - (ADDiu CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>; + (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>; def : InstAlias<"add $rs, $rt, $imm", - (ADDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>; + (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>; def : InstAlias<"and $rs, $rt, $imm", - (ANDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>; -def : InstAlias<"j $rs", (JR CPURegs:$rs), 0>, - Requires<[NotMips64]>; -def : InstAlias<"jalr $rs", (JALR RA, CPURegs:$rs)>, Requires<[NotMips64]>; -def : InstAlias<"jal $rs", (JALR RA, CPURegs:$rs), 0>, Requires<[NotMips64]>; -def : InstAlias<"jal $rd,$rs", (JALR CPURegs:$rd, CPURegs:$rs), 0>, - Requires<[NotMips64]>; + (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>; +def : InstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>; +def : InstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>; +def : InstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>; +def : InstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>; def : InstAlias<"not $rt, $rs", - (NOR CPURegsOpnd:$rt, CPURegsOpnd:$rs, ZERO), 1>; + (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>; def : InstAlias<"neg $rt, $rs", - (SUB CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>; + (SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>; def : InstAlias<"negu $rt, $rs", - (SUBu CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>; + (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>; def : InstAlias<"slt $rs, $rt, $imm", - (SLTi CPURegsOpnd:$rs, CPURegs:$rt, simm16:$imm), 0>; + (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>; def : InstAlias<"xor $rs, $rt, $imm", - (XORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>, - Requires<[NotMips64]>; + (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>; def : InstAlias<"or $rs, $rt, $imm", - (ORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>, - Requires<[NotMips64]>; + (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>; def : InstAlias<"nop", (SLL ZERO, ZERO, 0), 1>; def : InstAlias<"mfc0 $rt, $rd", - (MFC0_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>; + (MFC0_3OP GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>; def : InstAlias<"mtc0 $rt, $rd", - (MTC0_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>; + (MTC0_3OP GPR32Opnd:$rd, 0, GPR32Opnd:$rt), 0>; def : InstAlias<"mfc2 $rt, $rd", - (MFC2_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>; + (MFC2_3OP GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>; def : InstAlias<"mtc2 $rt, $rd", - (MTC2_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>; + (MTC2_3OP GPR32Opnd:$rd, 0, GPR32Opnd:$rt), 0>; def : InstAlias<"bnez $rs,$offset", - (BNE CPURegsOpnd:$rs, ZERO, brtarget:$offset), 1>, - Requires<[NotMips64]>; + (BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>; def : InstAlias<"beqz $rs,$offset", - (BEQ CPURegsOpnd:$rs, ZERO, brtarget:$offset), 1>, - Requires<[NotMips64]>; + (BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>; +def : InstAlias<"syscall", (SYSCALL 0), 1>; + +def : InstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>; +def : InstAlias<"break", (BREAK 0, 0), 1>; //===----------------------------------------------------------------------===// // Assembler Pseudo Instructions //===----------------------------------------------------------------------===// @@ -1120,17 +1152,17 @@ def : InstAlias<"beqz $rs,$offset", class LoadImm32< string instr_asm, Operand Od, RegisterOperand RO> : MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32), !strconcat(instr_asm, "\t$rt, $imm32")> ; -def LoadImm32Reg : LoadImm32<"li", shamt,CPURegsOpnd>; +def LoadImm32Reg : LoadImm32<"li", shamt,GPR32Opnd>; class LoadAddress<string instr_asm, Operand MemOpnd, RegisterOperand RO> : MipsAsmPseudoInst<(outs RO:$rt), (ins MemOpnd:$addr), !strconcat(instr_asm, "\t$rt, $addr")> ; -def LoadAddr32Reg : LoadAddress<"la", mem, CPURegsOpnd>; +def LoadAddr32Reg : LoadAddress<"la", mem, GPR32Opnd>; class LoadAddressImm<string instr_asm, Operand Od, RegisterOperand RO> : MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32), !strconcat(instr_asm, "\t$rt, $imm32")> ; -def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegsOpnd>; +def LoadAddr32Imm : LoadAddressImm<"la", shamt,GPR32Opnd>; @@ -1158,13 +1190,13 @@ def : MipsPat<(i32 imm:$imm), (ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>; // Carry MipsPatterns -def : MipsPat<(subc CPURegs:$lhs, CPURegs:$rhs), - (SUBu CPURegs:$lhs, CPURegs:$rhs)>; +def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs), + (SUBu GPR32:$lhs, GPR32:$rhs)>; let Predicates = [HasStdEnc, NotDSP] in { - def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs), - (ADDu CPURegs:$lhs, CPURegs:$rhs)>; - def : MipsPat<(addc CPURegs:$src, immSExt16:$imm), - (ADDiu CPURegs:$src, imm:$imm)>; + def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs), + (ADDu GPR32:$lhs, GPR32:$rhs)>; + def : MipsPat<(addc GPR32:$src, immSExt16:$imm), + (ADDiu GPR32:$src, imm:$imm)>; } // Call @@ -1172,8 +1204,8 @@ def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)), (JAL tglobaladdr:$dst)>; def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)), (JAL texternalsym:$dst)>; -//def : MipsPat<(MipsJmpLink CPURegs:$dst), -// (JALR CPURegs:$dst)>; +//def : MipsPat<(MipsJmpLink GPR32:$dst), +// (JALR GPR32:$dst)>; // Tail call def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)), @@ -1195,38 +1227,38 @@ def : MipsPat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>; def : MipsPat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>; def : MipsPat<(MipsLo texternalsym:$in), (ADDiu ZERO, texternalsym:$in)>; -def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)), - (ADDiu CPURegs:$hi, tglobaladdr:$lo)>; -def : MipsPat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)), - (ADDiu CPURegs:$hi, tblockaddress:$lo)>; -def : MipsPat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)), - (ADDiu CPURegs:$hi, tjumptable:$lo)>; -def : MipsPat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)), - (ADDiu CPURegs:$hi, tconstpool:$lo)>; -def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)), - (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>; +def : MipsPat<(add GPR32:$hi, (MipsLo tglobaladdr:$lo)), + (ADDiu GPR32:$hi, tglobaladdr:$lo)>; +def : MipsPat<(add GPR32:$hi, (MipsLo tblockaddress:$lo)), + (ADDiu GPR32:$hi, tblockaddress:$lo)>; +def : MipsPat<(add GPR32:$hi, (MipsLo tjumptable:$lo)), + (ADDiu GPR32:$hi, tjumptable:$lo)>; +def : MipsPat<(add GPR32:$hi, (MipsLo tconstpool:$lo)), + (ADDiu GPR32:$hi, tconstpool:$lo)>; +def : MipsPat<(add GPR32:$hi, (MipsLo tglobaltlsaddr:$lo)), + (ADDiu GPR32:$hi, tglobaltlsaddr:$lo)>; // gp_rel relocs -def : MipsPat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)), - (ADDiu CPURegs:$gp, tglobaladdr:$in)>; -def : MipsPat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)), - (ADDiu CPURegs:$gp, tconstpool:$in)>; +def : MipsPat<(add GPR32:$gp, (MipsGPRel tglobaladdr:$in)), + (ADDiu GPR32:$gp, tglobaladdr:$in)>; +def : MipsPat<(add GPR32:$gp, (MipsGPRel tconstpool:$in)), + (ADDiu GPR32:$gp, tconstpool:$in)>; // wrapper_pic class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>: MipsPat<(MipsWrapper RC:$gp, node:$in), (ADDiuOp RC:$gp, node:$in)>; -def : WrapperPat<tglobaladdr, ADDiu, CPURegs>; -def : WrapperPat<tconstpool, ADDiu, CPURegs>; -def : WrapperPat<texternalsym, ADDiu, CPURegs>; -def : WrapperPat<tblockaddress, ADDiu, CPURegs>; -def : WrapperPat<tjumptable, ADDiu, CPURegs>; -def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>; +def : WrapperPat<tglobaladdr, ADDiu, GPR32>; +def : WrapperPat<tconstpool, ADDiu, GPR32>; +def : WrapperPat<texternalsym, ADDiu, GPR32>; +def : WrapperPat<tblockaddress, ADDiu, GPR32>; +def : WrapperPat<tjumptable, ADDiu, GPR32>; +def : WrapperPat<tglobaltlsaddr, ADDiu, GPR32>; // Mips does not have "not", so we expand our way -def : MipsPat<(not CPURegs:$in), - (NOR CPURegsOpnd:$in, ZERO)>; +def : MipsPat<(not GPR32:$in), + (NOR GPR32Opnd:$in, ZERO)>; // extended loads let Predicates = [NotN64, HasStdEnc] in { @@ -1279,7 +1311,7 @@ def : MipsPat<(brcond RC:$cond, bb:$dst), (BNEOp RC:$cond, ZEROReg, bb:$dst)>; } -defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>; +defm : BrcondPats<GPR32, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>; def : MipsPat<(brcond (i32 (setlt i32:$lhs, 1)), bb:$dst), (BLEZ i32:$lhs, bb:$dst)>; @@ -1328,14 +1360,14 @@ multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp, (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>; } -defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>; -defm : SetlePats<CPURegs, SLT, SLTu>; -defm : SetgtPats<CPURegs, SLT, SLTu>; -defm : SetgePats<CPURegs, SLT, SLTu>; -defm : SetgeImmPats<CPURegs, SLTi, SLTiu>; +defm : SeteqPats<GPR32, SLTiu, XOR, SLTu, ZERO>; +defm : SetlePats<GPR32, SLT, SLTu>; +defm : SetgtPats<GPR32, SLT, SLTu>; +defm : SetgePats<GPR32, SLT, SLTu>; +defm : SetgeImmPats<GPR32, SLTi, SLTiu>; // bswap pattern -def : MipsPat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>; +def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>; // mflo/hi patterns. def : MipsPat<(i32 (ExtractLOHI ACRegs:$ac, imm:$lohi_idx)), diff --git a/lib/Target/Mips/MipsJITInfo.cpp b/lib/Target/Mips/MipsJITInfo.cpp index 1b2a325..d76cb1d 100644 --- a/lib/Target/Mips/MipsJITInfo.cpp +++ b/lib/Target/Mips/MipsJITInfo.cpp @@ -218,9 +218,9 @@ void *MipsJITInfo::emitFunctionStub(const Function *F, void *Fn, Hi++; int Lo = (int)(EmittedAddr & 0xffff); - // lui t9, %hi(EmittedAddr) - // addiu t9, t9, %lo(EmittedAddr) - // jalr t8, t9 + // lui $t9, %hi(EmittedAddr) + // addiu $t9, $t9, %lo(EmittedAddr) + // jalr $t8, $t9 // nop if (IsLittleEndian) { JCE.emitWordLE(0xf << 26 | 25 << 16 | Hi); diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp index 073daba..971176e 100644 --- a/lib/Target/Mips/MipsLongBranch.cpp +++ b/lib/Target/Mips/MipsLongBranch.cpp @@ -420,7 +420,7 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) { MF = &F; initMBBInfo(); - SmallVector<MBBInfo, 16>::iterator I, E = MBBInfos.end(); + SmallVectorImpl<MBBInfo>::iterator I, E = MBBInfos.end(); bool EverMadeChange = false, MadeChange = true; while (MadeChange) { diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp index 59b23f7..a7299d7 100644 --- a/lib/Target/Mips/MipsMachineFunction.cpp +++ b/lib/Target/Mips/MipsMachineFunction.cpp @@ -38,8 +38,8 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() { RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass; else RC = ST.isABI_N64() ? - (const TargetRegisterClass*)&Mips::CPU64RegsRegClass : - (const TargetRegisterClass*)&Mips::CPURegsRegClass; + (const TargetRegisterClass*)&Mips::GPR64RegClass : + (const TargetRegisterClass*)&Mips::GPR32RegClass; return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC); } @@ -60,7 +60,7 @@ void MipsFunctionInfo::createEhDataRegsFI() { for (int I = 0; I < 4; ++I) { const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>(); const TargetRegisterClass *RC = ST.isABI_N64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; EhDataRegFI[I] = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment(), false); diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index ae25e45..0b5fc33 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -54,8 +54,8 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, switch (RC->getID()) { default: return 0; - case Mips::CPURegsRegClassID: - case Mips::CPU64RegsRegClassID: + case Mips::GPR32RegClassID: + case Mips::GPR64RegClassID: case Mips::DSPRegsRegClassID: { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); return 28 - TFI->hasFP(MF); @@ -106,22 +106,22 @@ const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() { BitVector MipsRegisterInfo:: getReservedRegs(const MachineFunction &MF) const { - static const uint16_t ReservedCPURegs[] = { + static const uint16_t ReservedGPR32[] = { Mips::ZERO, Mips::K0, Mips::K1, Mips::SP }; - static const uint16_t ReservedCPU64Regs[] = { + static const uint16_t ReservedGPR64[] = { Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64 }; BitVector Reserved(getNumRegs()); typedef TargetRegisterClass::const_iterator RegIter; - for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I) - Reserved.set(ReservedCPURegs[I]); + for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I) + Reserved.set(ReservedGPR32[I]); - for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I) - Reserved.set(ReservedCPU64Regs[I]); + for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I) + Reserved.set(ReservedGPR64[I]); if (Subtarget.hasMips64()) { // Reserve all registers in AFGR64. @@ -159,6 +159,8 @@ getReservedRegs(const MachineFunction &MF) const { if (Subtarget.inMips16Mode()) { Reserved.set(Mips::RA); Reserved.set(Mips::RA_64); + Reserved.set(Mips::T0); + Reserved.set(Mips::T1); } // Reserve GP if small section is used. diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td index ad6912c..c72c30d 100644 --- a/lib/Target/Mips/MipsRegisterInfo.td +++ b/lib/Target/Mips/MipsRegisterInfo.td @@ -147,91 +147,20 @@ let Namespace = "Mips" in { def RA_64 : Mips64GPRReg< 31, "ra", [RA]>, DwarfRegNum<[31]>; /// Mips Single point precision FPU Registers - def F0 : FPR< 0, "f0">, DwarfRegNum<[32]>; - def F1 : FPR< 1, "f1">, DwarfRegNum<[33]>; - def F2 : FPR< 2, "f2">, DwarfRegNum<[34]>; - def F3 : FPR< 3, "f3">, DwarfRegNum<[35]>; - def F4 : FPR< 4, "f4">, DwarfRegNum<[36]>; - def F5 : FPR< 5, "f5">, DwarfRegNum<[37]>; - def F6 : FPR< 6, "f6">, DwarfRegNum<[38]>; - def F7 : FPR< 7, "f7">, DwarfRegNum<[39]>; - def F8 : FPR< 8, "f8">, DwarfRegNum<[40]>; - def F9 : FPR< 9, "f9">, DwarfRegNum<[41]>; - def F10 : FPR<10, "f10">, DwarfRegNum<[42]>; - def F11 : FPR<11, "f11">, DwarfRegNum<[43]>; - def F12 : FPR<12, "f12">, DwarfRegNum<[44]>; - def F13 : FPR<13, "f13">, DwarfRegNum<[45]>; - def F14 : FPR<14, "f14">, DwarfRegNum<[46]>; - def F15 : FPR<15, "f15">, DwarfRegNum<[47]>; - def F16 : FPR<16, "f16">, DwarfRegNum<[48]>; - def F17 : FPR<17, "f17">, DwarfRegNum<[49]>; - def F18 : FPR<18, "f18">, DwarfRegNum<[50]>; - def F19 : FPR<19, "f19">, DwarfRegNum<[51]>; - def F20 : FPR<20, "f20">, DwarfRegNum<[52]>; - def F21 : FPR<21, "f21">, DwarfRegNum<[53]>; - def F22 : FPR<22, "f22">, DwarfRegNum<[54]>; - def F23 : FPR<23, "f23">, DwarfRegNum<[55]>; - def F24 : FPR<24, "f24">, DwarfRegNum<[56]>; - def F25 : FPR<25, "f25">, DwarfRegNum<[57]>; - def F26 : FPR<26, "f26">, DwarfRegNum<[58]>; - def F27 : FPR<27, "f27">, DwarfRegNum<[59]>; - def F28 : FPR<28, "f28">, DwarfRegNum<[60]>; - def F29 : FPR<29, "f29">, DwarfRegNum<[61]>; - def F30 : FPR<30, "f30">, DwarfRegNum<[62]>; - def F31 : FPR<31, "f31">, DwarfRegNum<[63]>; + foreach I = 0-31 in + def F#I : FPR<I, "f"#I>, DwarfRegNum<[!add(I, 32)]>; /// Mips Double point precision FPU Registers (aliased /// with the single precision to hold 64 bit values) - def D0 : AFPR< 0, "f0", [F0, F1]>; - def D1 : AFPR< 2, "f2", [F2, F3]>; - def D2 : AFPR< 4, "f4", [F4, F5]>; - def D3 : AFPR< 6, "f6", [F6, F7]>; - def D4 : AFPR< 8, "f8", [F8, F9]>; - def D5 : AFPR<10, "f10", [F10, F11]>; - def D6 : AFPR<12, "f12", [F12, F13]>; - def D7 : AFPR<14, "f14", [F14, F15]>; - def D8 : AFPR<16, "f16", [F16, F17]>; - def D9 : AFPR<18, "f18", [F18, F19]>; - def D10 : AFPR<20, "f20", [F20, F21]>; - def D11 : AFPR<22, "f22", [F22, F23]>; - def D12 : AFPR<24, "f24", [F24, F25]>; - def D13 : AFPR<26, "f26", [F26, F27]>; - def D14 : AFPR<28, "f28", [F28, F29]>; - def D15 : AFPR<30, "f30", [F30, F31]>; + foreach I = 0-15 in + def D#I : AFPR<!shl(I, 1), "f"#!shl(I, 1), + [!cast<FPR>("F"#!shl(I, 1)), + !cast<FPR>("F"#!add(!shl(I, 1), 1))]>; /// Mips Double point precision FPU Registers in MFP64 mode. - def D0_64 : AFPR64<0, "f0", [F0]>, DwarfRegNum<[32]>; - def D1_64 : AFPR64<1, "f1", [F1]>, DwarfRegNum<[33]>; - def D2_64 : AFPR64<2, "f2", [F2]>, DwarfRegNum<[34]>; - def D3_64 : AFPR64<3, "f3", [F3]>, DwarfRegNum<[35]>; - def D4_64 : AFPR64<4, "f4", [F4]>, DwarfRegNum<[36]>; - def D5_64 : AFPR64<5, "f5", [F5]>, DwarfRegNum<[37]>; - def D6_64 : AFPR64<6, "f6", [F6]>, DwarfRegNum<[38]>; - def D7_64 : AFPR64<7, "f7", [F7]>, DwarfRegNum<[39]>; - def D8_64 : AFPR64<8, "f8", [F8]>, DwarfRegNum<[40]>; - def D9_64 : AFPR64<9, "f9", [F9]>, DwarfRegNum<[41]>; - def D10_64 : AFPR64<10, "f10", [F10]>, DwarfRegNum<[42]>; - def D11_64 : AFPR64<11, "f11", [F11]>, DwarfRegNum<[43]>; - def D12_64 : AFPR64<12, "f12", [F12]>, DwarfRegNum<[44]>; - def D13_64 : AFPR64<13, "f13", [F13]>, DwarfRegNum<[45]>; - def D14_64 : AFPR64<14, "f14", [F14]>, DwarfRegNum<[46]>; - def D15_64 : AFPR64<15, "f15", [F15]>, DwarfRegNum<[47]>; - def D16_64 : AFPR64<16, "f16", [F16]>, DwarfRegNum<[48]>; - def D17_64 : AFPR64<17, "f17", [F17]>, DwarfRegNum<[49]>; - def D18_64 : AFPR64<18, "f18", [F18]>, DwarfRegNum<[50]>; - def D19_64 : AFPR64<19, "f19", [F19]>, DwarfRegNum<[51]>; - def D20_64 : AFPR64<20, "f20", [F20]>, DwarfRegNum<[52]>; - def D21_64 : AFPR64<21, "f21", [F21]>, DwarfRegNum<[53]>; - def D22_64 : AFPR64<22, "f22", [F22]>, DwarfRegNum<[54]>; - def D23_64 : AFPR64<23, "f23", [F23]>, DwarfRegNum<[55]>; - def D24_64 : AFPR64<24, "f24", [F24]>, DwarfRegNum<[56]>; - def D25_64 : AFPR64<25, "f25", [F25]>, DwarfRegNum<[57]>; - def D26_64 : AFPR64<26, "f26", [F26]>, DwarfRegNum<[58]>; - def D27_64 : AFPR64<27, "f27", [F27]>, DwarfRegNum<[59]>; - def D28_64 : AFPR64<28, "f28", [F28]>, DwarfRegNum<[60]>; - def D29_64 : AFPR64<29, "f29", [F29]>, DwarfRegNum<[61]>; - def D30_64 : AFPR64<30, "f30", [F30]>, DwarfRegNum<[62]>; - def D31_64 : AFPR64<31, "f31", [F31]>, DwarfRegNum<[63]>; + foreach I = 0-31 in + def D#I#_64 : AFPR64<I, "f"#I, [!cast<FPR>("F"#I)]>, + DwarfRegNum<[!add(I, 32)]>; // Hi/Lo registers def HI : Register<"ac0">, DwarfRegNum<[64]>; @@ -248,11 +177,13 @@ let Namespace = "Mips" in { def LO64 : RegisterWithSubRegs<"lo", [LO]>; } - // Status flags register - def FCR31 : Register<"31">; + // FP control registers. + foreach I = 0-31 in + def FCR#I : MipsReg<#I, ""#I>; - // fcc0 register - def FCC0 : MipsReg<0, "fcc0">; + // FP condition code registers. + foreach I = 0-7 in + def FCC#I : MipsReg<#I, "fcc"#I>; // PC register def PC : Register<"pc">; @@ -292,7 +223,7 @@ let Namespace = "Mips" in { // Register Classes //===----------------------------------------------------------------------===// -class CPURegsClass<list<ValueType> regTypes> : +class GPR32Class<list<ValueType> regTypes> : RegisterClass<"Mips", regTypes, 32, (add // Reserved ZERO, AT, @@ -307,10 +238,10 @@ class CPURegsClass<list<ValueType> regTypes> : // Reserved K0, K1, GP, SP, FP, RA)>; -def CPURegs : CPURegsClass<[i32]>; -def DSPRegs : CPURegsClass<[v4i8, v2i16]>; +def GPR32 : GPR32Class<[i32]>; +def DSPRegs : GPR32Class<[v4i8, v2i16]>; -def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add +def GPR64 : RegisterClass<"Mips", [i64], 64, (add // Reserved ZERO_64, AT_64, // Return Values and Arguments @@ -330,6 +261,13 @@ def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add // Callee save S0, S1)>; +def CPU16RegsPlusSP : RegisterClass<"Mips", [i32], 32, (add + // Return Values and Arguments + V0, V1, A0, A1, A2, A3, + // Callee save + S0, S1, + SP)>; + def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>, Unallocatable; def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable; @@ -357,8 +295,13 @@ def AFGR64 : RegisterClass<"Mips", [f64], 64, (add def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>; -// Condition Register for floating point operations -def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31,FCC0)>, Unallocatable; +// FP control registers. +def CCR : RegisterClass<"Mips", [i32], 32, (sequence "FCR%u", 0, 31)>, + Unallocatable; + +// FP condition code registers. +def FCC : RegisterClass<"Mips", [i32], 32, (sequence "FCC%u", 0, 7)>, + Unallocatable; // Hi/Lo Registers def LORegs : RegisterClass<"Mips", [i32], 32, (add LO)>; @@ -388,47 +331,96 @@ def ACRegsDSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> { def DSPCC : RegisterClass<"Mips", [v4i8, v2i16], 32, (add DSPCCond)>; // Register Operands. -def CPURegsAsmOperand : AsmOperandClass { - let Name = "CPURegsAsm"; - let ParserMethod = "parseCPURegs"; + +class MipsAsmRegOperand : AsmOperandClass { + let RenderMethod = "addRegAsmOperands"; +} +def GPR32AsmOperand : MipsAsmRegOperand { + let Name = "GPR32Asm"; + let ParserMethod = "parseGPR32"; } -def CPU64RegsAsmOperand : AsmOperandClass { - let Name = "CPU64RegsAsm"; - let ParserMethod = "parseCPU64Regs"; +def GPR64AsmOperand : MipsAsmRegOperand { + let Name = "GPR64Asm"; + let ParserMethod = "parseGPR64"; } -def CCRAsmOperand : AsmOperandClass { +def ACRegsDSPAsmOperand : MipsAsmRegOperand { + let Name = "ACRegsDSPAsm"; + let ParserMethod = "parseACRegsDSP"; +} + +def CCRAsmOperand : MipsAsmRegOperand { let Name = "CCRAsm"; let ParserMethod = "parseCCRRegs"; } -def CPURegsOpnd : RegisterOperand<CPURegs, "printCPURegs"> { - let ParserMatchClass = CPURegsAsmOperand; +def AFGR64AsmOperand : MipsAsmRegOperand { + let Name = "AFGR64Asm"; + let ParserMethod = "parseAFGR64Regs"; +} + +def FGR64AsmOperand : MipsAsmRegOperand { + let Name = "FGR64Asm"; + let ParserMethod = "parseFGR64Regs"; } -def CPU64RegsOpnd : RegisterOperand<CPU64Regs, "printCPURegs"> { - let ParserMatchClass = CPU64RegsAsmOperand; +def FGR32AsmOperand : MipsAsmRegOperand { + let Name = "FGR32Asm"; + let ParserMethod = "parseFGR32Regs"; } -def CCROpnd : RegisterOperand<CCR, "printCPURegs"> { +def FCCRegsAsmOperand : MipsAsmRegOperand { + let Name = "FCCRegsAsm"; + let ParserMethod = "parseFCCRegs"; +} + +def GPR32Opnd : RegisterOperand<GPR32> { + let ParserMatchClass = GPR32AsmOperand; +} + +def GPR64Opnd : RegisterOperand<GPR64> { + let ParserMatchClass = GPR64AsmOperand; +} + +def CCROpnd : RegisterOperand<CCR> { let ParserMatchClass = CCRAsmOperand; } -def HWRegsAsmOperand : AsmOperandClass { +def HWRegsAsmOperand : MipsAsmRegOperand { let Name = "HWRegsAsm"; let ParserMethod = "parseHWRegs"; } -def HW64RegsAsmOperand : AsmOperandClass { +def HW64RegsAsmOperand : MipsAsmRegOperand { let Name = "HW64RegsAsm"; let ParserMethod = "parseHW64Regs"; } -def HWRegsOpnd : RegisterOperand<HWRegs, "printCPURegs"> { +def HWRegsOpnd : RegisterOperand<HWRegs> { let ParserMatchClass = HWRegsAsmOperand; } -def HW64RegsOpnd : RegisterOperand<HWRegs64, "printCPURegs"> { +def HW64RegsOpnd : RegisterOperand<HWRegs64> { let ParserMatchClass = HW64RegsAsmOperand; } + +def AFGR64RegsOpnd : RegisterOperand<AFGR64> { + let ParserMatchClass = AFGR64AsmOperand; +} + +def FGR64RegsOpnd : RegisterOperand<FGR64> { + let ParserMatchClass = FGR64AsmOperand; +} + +def FGR32RegsOpnd : RegisterOperand<FGR32> { + let ParserMatchClass = FGR32AsmOperand; +} + +def FCCRegsOpnd : RegisterOperand<FCC> { + let ParserMatchClass = FCCRegsAsmOperand; +} + +def ACRegsDSPOpnd : RegisterOperand<ACRegsDSP> { + let ParserMatchClass = ACRegsDSPAsmOperand; +} diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 91ffb94..d9e0fa4 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -285,7 +285,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { if (StackSize == 0 && !MFI->adjustsStack()) return; MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); MachineLocation DstML, SrcML; // Adjust stack. @@ -321,9 +321,9 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { // one for each of the paired single precision registers. if (Mips::AFGR64RegClass.contains(Reg)) { unsigned Reg0 = - MRI.getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_fpeven), true); + MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_fpeven), true); unsigned Reg1 = - MRI.getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_fpodd), true); + MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_fpodd), true); if (!STI.isLittle()) std::swap(Reg0, Reg1); @@ -333,16 +333,16 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { MMI.addFrameInst( MCCFIInstruction::createOffset(CSLabel, Reg1, Offset + 4)); } else { - // Reg is either in CPURegs or FGR32. + // Reg is either in GPR32 or FGR32. MMI.addFrameInst(MCCFIInstruction::createOffset( - CSLabel, MRI.getDwarfRegNum(Reg, 1), Offset)); + CSLabel, MRI->getDwarfRegNum(Reg, 1), Offset)); } } } if (MipsFI->callsEhReturn()) { const TargetRegisterClass *RC = STI.isABI_N64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; // Insert instructions that spill eh data registers. for (int I = 0; I < 4; ++I) { @@ -358,7 +358,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel2); for (int I = 0; I < 4; ++I) { int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); - unsigned Reg = MRI.getDwarfRegNum(ehDataReg(I), true); + unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true); MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel2, Reg, Offset)); } } @@ -373,7 +373,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel); MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( - SetFPLabel, MRI.getDwarfRegNum(FP, true))); + SetFPLabel, MRI->getDwarfRegNum(FP, true))); } } @@ -408,7 +408,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, if (MipsFI->callsEhReturn()) { const TargetRegisterClass *RC = STI.isABI_N64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; // Find first instruction that restores a callee-saved register. MachineBasicBlock::iterator I = MBBI; @@ -516,7 +516,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // The spill slot should be half the size of the accumulator. If target is // mips64, it should be 64-bit, otherwise it should be 32-bt. const TargetRegisterClass *RC = STI.hasMips64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment(), false); RS->addScavengingFrameIndex(FI); @@ -530,7 +530,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, return; const TargetRegisterClass *RC = STI.isABI_N64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment(), false); RS->addScavengingFrameIndex(FI); diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 7684bec..3b6480a 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -119,9 +119,9 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) { const TargetRegisterClass *RC; if (Subtarget.isABI_N64()) - RC = (const TargetRegisterClass*)&Mips::CPU64RegsRegClass; + RC = (const TargetRegisterClass*)&Mips::GPR64RegClass; else - RC = (const TargetRegisterClass*)&Mips::CPURegsRegClass; + RC = (const TargetRegisterClass*)&Mips::GPR32RegClass; V0 = RegInfo.createVirtualRegister(RC); V1 = RegInfo.createVirtualRegister(RC); @@ -402,7 +402,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { } case MipsISD::ThreadPointer: { - EVT PtrVT = TLI->getPointerTy(); + EVT PtrVT = getTargetLowering()->getPointerTy(); unsigned RdhwrOpc, SrcReg, DestReg; if (PtrVT == MVT::i32) { diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index f640ecc..a0aacb5 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -31,10 +31,10 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) clearRegisterClasses(); - addRegisterClass(MVT::i32, &Mips::CPURegsRegClass); + addRegisterClass(MVT::i32, &Mips::GPR32RegClass); if (HasMips64) - addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass); + addRegisterClass(MVT::i64, &Mips::GPR64RegClass); if (Subtarget->hasDSP()) { MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8}; @@ -53,6 +53,20 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::BITCAST, VecTys[i], Legal); } + // Expand all truncating stores and extending loads. + unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE; + + for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) { + for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1) + setTruncStoreAction((MVT::SimpleValueType)VT0, + (MVT::SimpleValueType)VT1, Expand); + + setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand); + setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand); + setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand); + } + setTargetDAGCombine(ISD::SHL); setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::SRL); @@ -99,6 +113,7 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) setTargetDAGCombine(ISD::ADDE); setTargetDAGCombine(ISD::SUBE); + setTargetDAGCombine(ISD::MUL); computeRegisterProperties(); } @@ -320,6 +335,57 @@ static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } +static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT, + EVT ShiftTy, SelectionDAG &DAG) { + // Clear the upper (64 - VT.sizeInBits) bits. + C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits()); + + // Return 0. + if (C == 0) + return DAG.getConstant(0, VT); + + // Return x. + if (C == 1) + return X; + + // If c is power of 2, return (shl x, log2(c)). + if (isPowerOf2_64(C)) + return DAG.getNode(ISD::SHL, DL, VT, X, + DAG.getConstant(Log2_64(C), ShiftTy)); + + unsigned Log2Ceil = Log2_64_Ceil(C); + uint64_t Floor = 1LL << Log2_64(C); + uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil; + + // If |c - floor_c| <= |c - ceil_c|, + // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))), + // return (add constMult(x, floor_c), constMult(x, c - floor_c)). + if (C - Floor <= Ceil - C) { + SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG); + SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG); + return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); + } + + // If |c - floor_c| > |c - ceil_c|, + // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)). + SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG); + SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG); + return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); +} + +static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, + const TargetLowering::DAGCombinerInfo &DCI, + const MipsSETargetLowering *TL) { + EVT VT = N->getValueType(0); + + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) + if (!VT.isVector()) + return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N), + VT, TL->getScalarShiftAmountTy(VT), DAG); + + return SDValue(N, 0); +} + static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, SelectionDAG &DAG, const MipsSubtarget *Subtarget) { @@ -432,6 +498,8 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { return performADDECombine(N, DAG, DCI, Subtarget); case ISD::SUBE: return performSUBECombine(N, DAG, DCI, Subtarget); + case ISD::MUL: + return performMULCombine(N, DAG, DCI, this); case ISD::SHL: return performSHLCombine(N, DAG, DCI, Subtarget); case ISD::SRA: @@ -701,7 +769,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - const TargetRegisterClass *RC = &Mips::CPURegsRegClass; + const TargetRegisterClass *RC = &Mips::GPR32RegClass; DebugLoc DL = MI->getDebugLoc(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index e2a33dd..9521043 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -94,9 +94,9 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, bool KillSrc) const { unsigned Opc = 0, ZeroReg = 0; - if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg. - if (Mips::CPURegsRegClass.contains(SrcReg)) - Opc = Mips::OR, ZeroReg = Mips::ZERO; + if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg. + if (Mips::GPR32RegClass.contains(SrcReg)) + Opc = Mips::ADDu, ZeroReg = Mips::ZERO; else if (Mips::CCRRegClass.contains(SrcReg)) Opc = Mips::CFC1; else if (Mips::FGR32RegClass.contains(SrcReg)) @@ -115,7 +115,7 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, return; } } - else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg. + else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg. if (Mips::CCRRegClass.contains(DestReg)) Opc = Mips::CTC1; else if (Mips::FGR32RegClass.contains(DestReg)) @@ -141,11 +141,9 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = Mips::FMOV_D32; else if (Mips::FGR64RegClass.contains(DestReg, SrcReg)) Opc = Mips::FMOV_D64; - else if (Mips::CCRRegClass.contains(DestReg, SrcReg)) - Opc = Mips::MOVCCRToCCR; - else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg. - if (Mips::CPU64RegsRegClass.contains(SrcReg)) - Opc = Mips::OR64, ZeroReg = Mips::ZERO_64; + else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg. + if (Mips::GPR64RegClass.contains(SrcReg)) + Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64; else if (Mips::HIRegs64RegClass.contains(SrcReg)) Opc = Mips::MFHI64, SrcReg = 0; else if (Mips::LORegs64RegClass.contains(SrcReg)) @@ -153,7 +151,7 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (Mips::FGR64RegClass.contains(SrcReg)) Opc = Mips::DMFC1; } - else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg. + else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg. if (Mips::HIRegs64RegClass.contains(DestReg)) Opc = Mips::MTHI64, DestReg = 0; else if (Mips::LORegs64RegClass.contains(DestReg)) @@ -187,9 +185,9 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned Opc = 0; - if (Mips::CPURegsRegClass.hasSubClassEq(RC)) + if (Mips::GPR32RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::SW_P8 : Mips::SW; - else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC)) + else if (Mips::GPR64RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::SD_P8 : Mips::SD; else if (Mips::ACRegsRegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::STORE_AC64_P8 : Mips::STORE_AC64; @@ -220,9 +218,9 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); unsigned Opc = 0; - if (Mips::CPURegsRegClass.hasSubClassEq(RC)) + if (Mips::GPR32RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::LW_P8 : Mips::LW; - else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC)) + else if (Mips::GPR64RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::LD_P8 : Mips::LD; else if (Mips::ACRegsRegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::LOAD_AC64_P8 : Mips::LOAD_AC64; @@ -342,7 +340,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB, unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi; unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; const TargetRegisterClass *RC = STI.isABI_N64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + &Mips::GPR64RegClass : &Mips::GPR32RegClass; bool LastInstrIsADDiu = NewImm; const MipsAnalyzeImmediate::InstSeq &Seq = @@ -513,7 +511,6 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB, // indirect jump to TargetReg const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>(); unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu; - unsigned OR = STI.isABI_N64() ? Mips::OR64 : Mips::OR; unsigned JR = STI.isABI_N64() ? Mips::JR64 : Mips::JR; unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; unsigned RA = STI.isABI_N64() ? Mips::RA_64 : Mips::RA; @@ -522,13 +519,13 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB, unsigned OffsetReg = I->getOperand(0).getReg(); unsigned TargetReg = I->getOperand(1).getReg(); - // or $ra, $v0, $zero + // addu $ra, $v0, $zero // addu $sp, $sp, $v1 // jr $ra if (TM.getRelocationModel() == Reloc::PIC_) - BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), T9) + BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9) .addReg(TargetReg).addReg(ZERO); - BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), RA) + BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA) .addReg(TargetReg).addReg(ZERO); BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP) .addReg(SP).addReg(OffsetReg); diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp index 9763f85..286a2e2 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -56,10 +56,10 @@ requiresFrameIndexScavenging(const MachineFunction &MF) const { const TargetRegisterClass * MipsSERegisterInfo::intRegClass(unsigned Size) const { if (Size == 4) - return &Mips::CPURegsRegClass; + return &Mips::GPR32RegClass; assert(Size == 8); - return &Mips::CPU64RegsRegClass; + return &Mips::GPR64RegClass; } void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td index 1add02f..2779064 100644 --- a/lib/Target/Mips/MipsSchedule.td +++ b/lib/Target/Mips/MipsSchedule.td @@ -17,13 +17,18 @@ def IMULDIV : FuncUnit; // Instruction Itinerary classes used for Mips //===----------------------------------------------------------------------===// def IIAlu : InstrItinClass; +def IIArith : InstrItinClass; +def IILogic : InstrItinClass; def IILoad : InstrItinClass; def IIStore : InstrItinClass; def IIXfer : InstrItinClass; def IIBranch : InstrItinClass; def IIHiLo : InstrItinClass; def IIImul : InstrItinClass; +def IIImult : InstrItinClass; def IIIdiv : InstrItinClass; +def IIseb : InstrItinClass; +def IIslt : InstrItinClass; def IIFcvt : InstrItinClass; def IIFmove : InstrItinClass; def IIFcmp : InstrItinClass; @@ -35,6 +40,9 @@ def IIFdivDouble : InstrItinClass; def IIFsqrtSingle : InstrItinClass; def IIFsqrtDouble : InstrItinClass; def IIFrecipFsqrtStep : InstrItinClass; +def IIFLoad : InstrItinClass; +def IIFStore : InstrItinClass; +def IIFmoveC1 : InstrItinClass; def IIPseudo : InstrItinClass; //===----------------------------------------------------------------------===// @@ -42,6 +50,8 @@ def IIPseudo : InstrItinClass; //===----------------------------------------------------------------------===// def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [ InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>, + InstrItinData<IIArith , [InstrStage<1, [ALU]>]>, + InstrItinData<IILogic , [InstrStage<1, [ALU]>]>, InstrItinData<IILoad , [InstrStage<3, [ALU]>]>, InstrItinData<IIStore , [InstrStage<1, [ALU]>]>, InstrItinData<IIXfer , [InstrStage<2, [ALU]>]>, @@ -59,5 +69,8 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [ InstrItinData<IIFdivDouble , [InstrStage<36, [ALU]>]>, InstrItinData<IIFsqrtSingle , [InstrStage<54, [ALU]>]>, InstrItinData<IIFsqrtDouble , [InstrStage<12, [ALU]>]>, - InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]> + InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>, + InstrItinData<IIFLoad , [InstrStage<3, [ALU]>]>, + InstrItinData<IIFStore , [InstrStage<1, [ALU]>]>, + InstrItinData<IIFmoveC1 , [InstrStage<2, [ALU]>]> ]>; diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp index 259e68d..541e2ca 100644 --- a/lib/Target/Mips/MipsSubtarget.cpp +++ b/lib/Target/Mips/MipsSubtarget.cpp @@ -104,7 +104,7 @@ MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel, Mode = TargetSubtargetInfo::ANTIDEP_NONE; CriticalPathRCs.clear(); CriticalPathRCs.push_back(hasMips64() ? - &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass); + &Mips::GPR64RegClass : &Mips::GPR32RegClass); return OptLevel >= CodeGenOpt::Aggressive; } diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h index ef7568a..bfb13bb 100644 --- a/lib/Target/Mips/MipsSubtarget.h +++ b/lib/Target/Mips/MipsSubtarget.h @@ -194,6 +194,7 @@ public: bool hasBitCount() const { return HasBitCount; } bool hasFPIdx() const { return HasFPIdx; } + const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } bool allowMixed16_32() const { return inMips16ModeDefault() | AllowMixed16_32;} diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 9af2f1b..ced6a09 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -70,8 +70,8 @@ MipsTargetMachine(const Target &T, StringRef TT, "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64")), InstrInfo(MipsInstrInfo::create(*this)), FrameLowering(MipsFrameLowering::create(*this, Subtarget)), - TLInfo(MipsTargetLowering::create(*this)), - TSInfo(*this), JITInfo() { + TLInfo(MipsTargetLowering::create(*this)), TSInfo(*this), + InstrItins(Subtarget.getInstrItineraryData()), JITInfo() { initAsmInfo(); } diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h index ee55708..5a9a11d 100644 --- a/lib/Target/Mips/MipsTargetMachine.h +++ b/lib/Target/Mips/MipsTargetMachine.h @@ -44,6 +44,7 @@ class MipsTargetMachine : public LLVMTargetMachine { OwningPtr<const MipsFrameLowering> FrameLoweringSE; OwningPtr<const MipsTargetLowering> TLInfoSE; MipsSelectionDAGInfo TSInfo; + const InstrItineraryData &InstrItins; MipsJITInfo JITInfo; public: @@ -65,6 +66,11 @@ public: { return &Subtarget; } virtual const DataLayout *getDataLayout() const { return &DL;} + + virtual const InstrItineraryData *getInstrItineraryData() const { + return Subtarget.inMips16Mode() ? 0 : &InstrItins; + } + virtual MipsJITInfo *getJITInfo() { return &JITInfo; } diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt index a8293da..4f1324c 100644 --- a/lib/Target/NVPTX/CMakeLists.txt +++ b/lib/Target/NVPTX/CMakeLists.txt @@ -25,11 +25,12 @@ set(NVPTXCodeGen_sources NVVMReflect.cpp NVPTXGenericToNVVM.cpp NVPTXPrologEpilogPass.cpp + NVPTXMCExpr.cpp ) add_llvm_target(NVPTXCodeGen ${NVPTXCodeGen_sources}) -add_dependencies(LLVMNVPTXCodeGen intrinsics_gen) +add_dependencies(LLVMNVPTXCodeGen NVPTXCommonTableGen intrinsics_gen) add_subdirectory(TargetInfo) add_subdirectory(InstPrinter) diff --git a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp index 10051c7..c7b8aa4 100644 --- a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp +++ b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp @@ -1 +1,279 @@ -// Placeholder +//===-- NVPTXInstPrinter.cpp - PTX assembly instruction printing ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Print MCInst instructions to .ptx format. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "asm-printer" +#include "InstPrinter/NVPTXInstPrinter.h" +#include "NVPTX.h" +#include "MCTargetDesc/NVPTXBaseInfo.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormattedStream.h" +#include <cctype> +using namespace llvm; + +#include "NVPTXGenAsmWriter.inc" + + +NVPTXInstPrinter::NVPTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI) + : MCInstPrinter(MAI, MII, MRI) { + setAvailableFeatures(STI.getFeatureBits()); +} + +void NVPTXInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { + // Decode the virtual register + // Must be kept in sync with NVPTXAsmPrinter::encodeVirtualRegister + unsigned RCId = (RegNo >> 28); + switch (RCId) { + default: report_fatal_error("Bad virtual register encoding"); + case 0: + // This is actually a physical register, so defer to the autogenerated + // register printer + OS << getRegisterName(RegNo); + return; + case 1: + OS << "%p"; + break; + case 2: + OS << "%rs"; + break; + case 3: + OS << "%r"; + break; + case 4: + OS << "%rl"; + break; + case 5: + OS << "%f"; + break; + case 6: + OS << "%fl"; + break; + } + + unsigned VReg = RegNo & 0x0FFFFFFF; + OS << VReg; +} + +void NVPTXInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, + StringRef Annot) { + printInstruction(MI, OS); + + // Next always print the annotation. + printAnnotation(OS, Annot); +} + +void NVPTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isReg()) { + unsigned Reg = Op.getReg(); + printRegName(O, Reg); + } else if (Op.isImm()) { + O << markup("<imm:") << formatImm(Op.getImm()) << markup(">"); + } else { + assert(Op.isExpr() && "Unknown operand kind in printOperand"); + O << *Op.getExpr(); + } +} + +void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O, + const char *Modifier) { + const MCOperand &MO = MI->getOperand(OpNum); + int64_t Imm = MO.getImm(); + + if (strcmp(Modifier, "ftz") == 0) { + // FTZ flag + if (Imm & NVPTX::PTXCvtMode::FTZ_FLAG) + O << ".ftz"; + } else if (strcmp(Modifier, "sat") == 0) { + // SAT flag + if (Imm & NVPTX::PTXCvtMode::SAT_FLAG) + O << ".sat"; + } else if (strcmp(Modifier, "base") == 0) { + // Default operand + switch (Imm & NVPTX::PTXCvtMode::BASE_MASK) { + default: + return; + case NVPTX::PTXCvtMode::NONE: + break; + case NVPTX::PTXCvtMode::RNI: + O << ".rni"; + break; + case NVPTX::PTXCvtMode::RZI: + O << ".rzi"; + break; + case NVPTX::PTXCvtMode::RMI: + O << ".rmi"; + break; + case NVPTX::PTXCvtMode::RPI: + O << ".rpi"; + break; + case NVPTX::PTXCvtMode::RN: + O << ".rn"; + break; + case NVPTX::PTXCvtMode::RZ: + O << ".rz"; + break; + case NVPTX::PTXCvtMode::RM: + O << ".rm"; + break; + case NVPTX::PTXCvtMode::RP: + O << ".rp"; + break; + } + } else { + llvm_unreachable("Invalid conversion modifier"); + } +} + +void NVPTXInstPrinter::printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O, + const char *Modifier) { + const MCOperand &MO = MI->getOperand(OpNum); + int64_t Imm = MO.getImm(); + + if (strcmp(Modifier, "ftz") == 0) { + // FTZ flag + if (Imm & NVPTX::PTXCmpMode::FTZ_FLAG) + O << ".ftz"; + } else if (strcmp(Modifier, "base") == 0) { + switch (Imm & NVPTX::PTXCmpMode::BASE_MASK) { + default: + return; + case NVPTX::PTXCmpMode::EQ: + O << ".eq"; + break; + case NVPTX::PTXCmpMode::NE: + O << ".ne"; + break; + case NVPTX::PTXCmpMode::LT: + O << ".lt"; + break; + case NVPTX::PTXCmpMode::LE: + O << ".le"; + break; + case NVPTX::PTXCmpMode::GT: + O << ".gt"; + break; + case NVPTX::PTXCmpMode::GE: + O << ".ge"; + break; + case NVPTX::PTXCmpMode::LO: + O << ".lo"; + break; + case NVPTX::PTXCmpMode::LS: + O << ".ls"; + break; + case NVPTX::PTXCmpMode::HI: + O << ".hi"; + break; + case NVPTX::PTXCmpMode::HS: + O << ".hs"; + break; + case NVPTX::PTXCmpMode::EQU: + O << ".equ"; + break; + case NVPTX::PTXCmpMode::NEU: + O << ".neu"; + break; + case NVPTX::PTXCmpMode::LTU: + O << ".ltu"; + break; + case NVPTX::PTXCmpMode::LEU: + O << ".leu"; + break; + case NVPTX::PTXCmpMode::GTU: + O << ".gtu"; + break; + case NVPTX::PTXCmpMode::GEU: + O << ".geu"; + break; + case NVPTX::PTXCmpMode::NUM: + O << ".num"; + break; + case NVPTX::PTXCmpMode::NotANumber: + O << ".nan"; + break; + } + } else { + llvm_unreachable("Empty Modifier"); + } +} + +void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum, + raw_ostream &O, const char *Modifier) { + if (Modifier) { + const MCOperand &MO = MI->getOperand(OpNum); + int Imm = (int) MO.getImm(); + if (!strcmp(Modifier, "volatile")) { + if (Imm) + O << ".volatile"; + } else if (!strcmp(Modifier, "addsp")) { + switch (Imm) { + case NVPTX::PTXLdStInstCode::GLOBAL: + O << ".global"; + break; + case NVPTX::PTXLdStInstCode::SHARED: + O << ".shared"; + break; + case NVPTX::PTXLdStInstCode::LOCAL: + O << ".local"; + break; + case NVPTX::PTXLdStInstCode::PARAM: + O << ".param"; + break; + case NVPTX::PTXLdStInstCode::CONSTANT: + O << ".const"; + break; + case NVPTX::PTXLdStInstCode::GENERIC: + break; + default: + llvm_unreachable("Wrong Address Space"); + } + } else if (!strcmp(Modifier, "sign")) { + if (Imm == NVPTX::PTXLdStInstCode::Signed) + O << "s"; + else if (Imm == NVPTX::PTXLdStInstCode::Unsigned) + O << "u"; + else + O << "f"; + } else if (!strcmp(Modifier, "vec")) { + if (Imm == NVPTX::PTXLdStInstCode::V2) + O << ".v2"; + else if (Imm == NVPTX::PTXLdStInstCode::V4) + O << ".v4"; + } else + llvm_unreachable("Unknown Modifier"); + } else + llvm_unreachable("Empty Modifier"); +} + +void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum, + raw_ostream &O, const char *Modifier) { + printOperand(MI, OpNum, O); + + if (Modifier && !strcmp(Modifier, "add")) { + O << ", "; + printOperand(MI, OpNum + 1, O); + } else { + if (MI->getOperand(OpNum + 1).isImm() && + MI->getOperand(OpNum + 1).getImm() == 0) + return; // don't print ',0' or '+0' + O << "+"; + printOperand(MI, OpNum + 1, O); + } +} diff --git a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h new file mode 100644 index 0000000..e0f44da --- /dev/null +++ b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h @@ -0,0 +1,52 @@ +//= NVPTXInstPrinter.h - Convert NVPTX MCInst to assembly syntax --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class prints an NVPTX MCInst to .ptx file syntax. +// +//===----------------------------------------------------------------------===// + +#ifndef NVPTX_INST_PRINTER_H +#define NVPTX_INST_PRINTER_H + +#include "llvm/MC/MCInstPrinter.h" +#include "llvm/Support/raw_ostream.h" + +namespace llvm { + +class MCOperand; +class MCSubtargetInfo; + +class NVPTXInstPrinter : public MCInstPrinter { +public: + NVPTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI, const MCSubtargetInfo &STI); + + virtual void printRegName(raw_ostream &OS, unsigned RegNo) const; + virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot); + + // Autogenerated by tblgen. + void printInstruction(const MCInst *MI, raw_ostream &O); + static const char *getRegisterName(unsigned RegNo); + // End + + void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O, + const char *Modifier = 0); + void printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O, + const char *Modifier = 0); + void printLdStCode(const MCInst *MI, int OpNum, + raw_ostream &O, const char *Modifier = 0); + void printMemOperand(const MCInst *MI, int OpNum, + raw_ostream &O, const char *Modifier = 0); + +}; + +} + +#endif diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp index ccd2970..871bac9 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp @@ -13,6 +13,7 @@ #include "NVPTXMCTargetDesc.h" #include "NVPTXMCAsmInfo.h" +#include "InstPrinter/NVPTXInstPrinter.h" #include "llvm/MC/MCCodeGenInfo.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" @@ -57,6 +58,17 @@ static MCCodeGenInfo *createNVPTXMCCodeGenInfo( return X; } +static MCInstPrinter *createNVPTXMCInstPrinter(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI) { + if (SyntaxVariant == 0) + return new NVPTXInstPrinter(MAI, MII, MRI, STI); + return 0; +} + // Force static initialization. extern "C" void LLVMInitializeNVPTXTargetMC() { // Register the MC asm info. @@ -85,4 +97,9 @@ extern "C" void LLVMInitializeNVPTXTargetMC() { TargetRegistry::RegisterMCSubtargetInfo(TheNVPTXTarget64, createNVPTXMCSubtargetInfo); + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheNVPTXTarget32, + createNVPTXMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(TheNVPTXTarget64, + createNVPTXMCInstPrinter); } diff --git a/lib/Target/NVPTX/ManagedStringPool.h b/lib/Target/NVPTX/ManagedStringPool.h index d6c79b5..f9fb059 100644 --- a/lib/Target/NVPTX/ManagedStringPool.h +++ b/lib/Target/NVPTX/ManagedStringPool.h @@ -29,7 +29,7 @@ class ManagedStringPool { public: ManagedStringPool() {} ~ManagedStringPool() { - SmallVector<std::string *, 8>::iterator Current = Pool.begin(); + SmallVectorImpl<std::string *>::iterator Current = Pool.begin(); while (Current != Pool.end()) { delete *Current; Current++; diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h index 179dc27..490b49d 100644 --- a/lib/Target/NVPTX/NVPTX.h +++ b/lib/Target/NVPTX/NVPTX.h @@ -61,9 +61,6 @@ inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) { FunctionPass * createNVPTXISelDag(NVPTXTargetMachine &TM, llvm::CodeGenOpt::Level OptLevel); -FunctionPass *createLowerStructArgsPass(NVPTXTargetMachine &); -FunctionPass *createNVPTXReMatPass(NVPTXTargetMachine &); -FunctionPass *createNVPTXReMatBlockPass(NVPTXTargetMachine &); ModulePass *createGenericToNVVMPass(); ModulePass *createNVVMReflectPass(); ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping); @@ -77,8 +74,7 @@ extern Target TheNVPTXTarget64; namespace NVPTX { enum DrvInterface { NVCL, - CUDA, - TEST + CUDA }; // A field inside TSFlags needs a shift and a mask. The usage is @@ -132,6 +128,53 @@ enum VecType { V4 = 4 }; } + +/// PTXCvtMode - Conversion code enumeration +namespace PTXCvtMode { +enum CvtMode { + NONE = 0, + RNI, + RZI, + RMI, + RPI, + RN, + RZ, + RM, + RP, + + BASE_MASK = 0x0F, + FTZ_FLAG = 0x10, + SAT_FLAG = 0x20 +}; +} + +/// PTXCmpMode - Comparison mode enumeration +namespace PTXCmpMode { +enum CmpMode { + EQ = 0, + NE, + LT, + LE, + GT, + GE, + LO, + LS, + HI, + HS, + EQU, + NEU, + LTU, + LEU, + GTU, + GEU, + NUM, + // NAN is a MACRO + NotANumber, + + BASE_MASK = 0xFF, + FTZ_FLAG = 0x100 +}; +} } } // end namespace llvm; diff --git a/lib/Target/NVPTX/NVPTX.td b/lib/Target/NVPTX/NVPTX.td index d78b4e8..6183a75 100644 --- a/lib/Target/NVPTX/NVPTX.td +++ b/lib/Target/NVPTX/NVPTX.td @@ -57,6 +57,12 @@ def : Proc<"sm_35", [SM35]>; def NVPTXInstrInfo : InstrInfo { } +def NVPTXAsmWriter : AsmWriter { + bit isMCAsmWriter = 1; + string AsmWriterClassName = "InstPrinter"; +} + def NVPTX : Target { let InstructionSet = NVPTXInstrInfo; + let AssemblyWriters = [NVPTXAsmWriter]; } diff --git a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp index 0f792ec..1f37696 100644 --- a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp +++ b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp @@ -37,7 +37,7 @@ bool NVPTXAllocaHoisting::runOnFunction(Function &function) { } char NVPTXAllocaHoisting::ID = 1; -RegisterPass<NVPTXAllocaHoisting> +static RegisterPass<NVPTXAllocaHoisting> X("alloca-hoisting", "Hoisting alloca instructions in non-entry " "blocks to the entry block"); diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index ff73931..a2b9bec 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -16,7 +16,7 @@ #include "MCTargetDesc/NVPTXMCAsmInfo.h" #include "NVPTX.h" #include "NVPTXInstrInfo.h" -#include "NVPTXNumRegisters.h" +#include "NVPTXMCExpr.h" #include "NVPTXRegisterInfo.h" #include "NVPTXTargetMachine.h" #include "NVPTXUtilities.h" @@ -47,8 +47,6 @@ #include <sstream> using namespace llvm; -#include "NVPTXGenAsmWriter.inc" - bool RegAllocNilUsed = true; #define DEPOTNAME "__local_depot" @@ -279,8 +277,10 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) { const LLVMContext &ctx = MF->getFunction()->getContext(); DIScope Scope(curLoc.getScope(ctx)); - if (!Scope.Verify()) - return; + assert((!Scope || Scope.isScope()) && + "Scope of a DebugLoc should be null or a DIScope."); + if (!Scope) + return; StringRef fileName(Scope.getFilename()); StringRef dirName(Scope.getDirectory()); @@ -308,8 +308,107 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) { raw_svector_ostream OS(Str); if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) emitLineNumberAsDotLoc(*MI); - printInstruction(MI, OS); - OutStreamer.EmitRawText(OS.str()); + + MCInst Inst; + lowerToMCInst(MI, Inst); + OutStreamer.EmitInstruction(Inst); +} + +void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) { + OutMI.setOpcode(MI->getOpcode()); + + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + + MCOperand MCOp; + if (lowerOperand(MO, MCOp)) + OutMI.addOperand(MCOp); + } +} + +bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO, + MCOperand &MCOp) { + switch (MO.getType()) { + default: llvm_unreachable("unknown operand type"); + case MachineOperand::MO_Register: + MCOp = MCOperand::CreateReg(encodeVirtualRegister(MO.getReg())); + break; + case MachineOperand::MO_Immediate: + MCOp = MCOperand::CreateImm(MO.getImm()); + break; + case MachineOperand::MO_MachineBasicBlock: + MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( + MO.getMBB()->getSymbol(), OutContext)); + break; + case MachineOperand::MO_ExternalSymbol: + MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName())); + break; + case MachineOperand::MO_GlobalAddress: + MCOp = GetSymbolRef(MO, Mang->getSymbol(MO.getGlobal())); + break; + case MachineOperand::MO_FPImmediate: { + const ConstantFP *Cnt = MO.getFPImm(); + APFloat Val = Cnt->getValueAPF(); + + switch (Cnt->getType()->getTypeID()) { + default: report_fatal_error("Unsupported FP type"); break; + case Type::FloatTyID: + MCOp = MCOperand::CreateExpr( + NVPTXFloatMCExpr::CreateConstantFPSingle(Val, OutContext)); + break; + case Type::DoubleTyID: + MCOp = MCOperand::CreateExpr( + NVPTXFloatMCExpr::CreateConstantFPDouble(Val, OutContext)); + break; + } + break; + } + } + return true; +} + +unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) { + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + + DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC]; + unsigned RegNum = RegMap[Reg]; + + // Encode the register class in the upper 4 bits + // Must be kept in sync with NVPTXInstPrinter::printRegName + unsigned Ret = 0; + if (RC == &NVPTX::Int1RegsRegClass) { + Ret = (1 << 28); + } else if (RC == &NVPTX::Int16RegsRegClass) { + Ret = (2 << 28); + } else if (RC == &NVPTX::Int32RegsRegClass) { + Ret = (3 << 28); + } else if (RC == &NVPTX::Int64RegsRegClass) { + Ret = (4 << 28); + } else if (RC == &NVPTX::Float32RegsRegClass) { + Ret = (5 << 28); + } else if (RC == &NVPTX::Float64RegsRegClass) { + Ret = (6 << 28); + } else { + report_fatal_error("Bad register class"); + } + + // Insert the vreg number + Ret |= (RegNum & 0x0FFFFFFF); + return Ret; + } else { + // Some special-use registers are actually physical registers. + // Encode this as the register class ID of 0 and the real register ID. + return Reg & 0x0FFFFFFF; + } +} + +MCOperand NVPTXAsmPrinter::GetSymbolRef(const MachineOperand &MO, + const MCSymbol *Symbol) { + const MCExpr *Expr; + Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, + OutContext); + return MCOperand::CreateExpr(Expr); } void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { @@ -551,145 +650,7 @@ void NVPTXAsmPrinter::printVecModifiedImmediate( llvm_unreachable("Unknown Modifier on immediate operand"); } -void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum, - raw_ostream &O, const char *Modifier) { - const MachineOperand &MO = MI->getOperand(opNum); - switch (MO.getType()) { - case MachineOperand::MO_Register: - if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { - if (MO.getReg() == NVPTX::VRDepot) - O << DEPOTNAME << getFunctionNumber(); - else - O << getRegisterName(MO.getReg()); - } else { - if (!Modifier) - emitVirtualRegister(MO.getReg(), false, O); - else { - if (strcmp(Modifier, "vecfull") == 0) - emitVirtualRegister(MO.getReg(), true, O); - else - llvm_unreachable( - "Don't know how to handle the modifier on virtual register."); - } - } - return; - - case MachineOperand::MO_Immediate: - if (!Modifier) - O << MO.getImm(); - else if (strstr(Modifier, "vec") == Modifier) - printVecModifiedImmediate(MO, Modifier, O); - else - llvm_unreachable( - "Don't know how to handle modifier on immediate operand"); - return; - - case MachineOperand::MO_FPImmediate: - printFPConstant(MO.getFPImm(), O); - break; - - case MachineOperand::MO_GlobalAddress: - O << *Mang->getSymbol(MO.getGlobal()); - break; - - case MachineOperand::MO_ExternalSymbol: { - const char *symbname = MO.getSymbolName(); - if (strstr(symbname, ".PARAM") == symbname) { - unsigned index; - sscanf(symbname + 6, "%u[];", &index); - printParamName(index, O); - } else if (strstr(symbname, ".HLPPARAM") == symbname) { - unsigned index; - sscanf(symbname + 9, "%u[];", &index); - O << *CurrentFnSym << "_param_" << index << "_offset"; - } else - O << symbname; - break; - } - - case MachineOperand::MO_MachineBasicBlock: - O << *MO.getMBB()->getSymbol(); - return; - - default: - llvm_unreachable("Operand type not supported."); - } -} - -void NVPTXAsmPrinter::printImplicitDef(const MachineInstr *MI, - raw_ostream &O) const { -#ifndef __OPTIMIZE__ - O << "\t// Implicit def :"; - //printOperand(MI, 0); - O << "\n"; -#endif -} - -void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, - raw_ostream &O, const char *Modifier) { - printOperand(MI, opNum, O); - - if (Modifier && !strcmp(Modifier, "add")) { - O << ", "; - printOperand(MI, opNum + 1, O); - } else { - if (MI->getOperand(opNum + 1).isImm() && - MI->getOperand(opNum + 1).getImm() == 0) - return; // don't print ',0' or '+0' - O << "+"; - printOperand(MI, opNum + 1, O); - } -} -void NVPTXAsmPrinter::printLdStCode(const MachineInstr *MI, int opNum, - raw_ostream &O, const char *Modifier) { - if (Modifier) { - const MachineOperand &MO = MI->getOperand(opNum); - int Imm = (int) MO.getImm(); - if (!strcmp(Modifier, "volatile")) { - if (Imm) - O << ".volatile"; - } else if (!strcmp(Modifier, "addsp")) { - switch (Imm) { - case NVPTX::PTXLdStInstCode::GLOBAL: - O << ".global"; - break; - case NVPTX::PTXLdStInstCode::SHARED: - O << ".shared"; - break; - case NVPTX::PTXLdStInstCode::LOCAL: - O << ".local"; - break; - case NVPTX::PTXLdStInstCode::PARAM: - O << ".param"; - break; - case NVPTX::PTXLdStInstCode::CONSTANT: - O << ".const"; - break; - case NVPTX::PTXLdStInstCode::GENERIC: - if (!nvptxSubtarget.hasGenericLdSt()) - O << ".global"; - break; - default: - llvm_unreachable("Wrong Address Space"); - } - } else if (!strcmp(Modifier, "sign")) { - if (Imm == NVPTX::PTXLdStInstCode::Signed) - O << "s"; - else if (Imm == NVPTX::PTXLdStInstCode::Unsigned) - O << "u"; - else - O << "f"; - } else if (!strcmp(Modifier, "vec")) { - if (Imm == NVPTX::PTXLdStInstCode::V2) - O << ".v2"; - else if (Imm == NVPTX::PTXLdStInstCode::V4) - O << ".v4"; - } else - llvm_unreachable("Unknown Modifier"); - } else - llvm_unreachable("Empty Modifier"); -} void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) { @@ -918,6 +879,16 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) { // Already commented out //bool Result = AsmPrinter::doInitialization(M); + // Emit module-level inline asm if it exists. + if (!M.getModuleInlineAsm().empty()) { + OutStreamer.AddComment("Start of file scope inline assembly"); + OutStreamer.AddBlankLine(); + OutStreamer.EmitRawText(StringRef(M.getModuleInlineAsm())); + OutStreamer.AddBlankLine(); + OutStreamer.AddComment("End of file scope inline assembly"); + OutStreamer.AddBlankLine(); + } + if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) recordAndEmitFilenames(M); @@ -1965,41 +1936,6 @@ bool NVPTXAsmPrinter::isImageType(const Type *Ty) { return false; } -/// PrintAsmOperand - Print out an operand for an inline asm expression. -/// -bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, - const char *ExtraCode, raw_ostream &O) { - if (ExtraCode && ExtraCode[0]) { - if (ExtraCode[1] != 0) - return true; // Unknown modifier. - - switch (ExtraCode[0]) { - default: - // See if this is a generic print operand - return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O); - case 'r': - break; - } - } - - printOperand(MI, OpNo, O); - - return false; -} - -bool NVPTXAsmPrinter::PrintAsmMemoryOperand( - const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, - const char *ExtraCode, raw_ostream &O) { - if (ExtraCode && ExtraCode[0]) - return true; // Unknown modifier - - O << '['; - printMemOperand(MI, OpNo, O); - O << ']'; - - return false; -} bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { switch (MI.getOpcode()) { @@ -2014,7 +1950,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { case NVPTX::CallArgI32: case NVPTX::CallArgI32imm: case NVPTX::CallArgI64: - case NVPTX::CallArgI8: case NVPTX::CallArgParam: case NVPTX::CallVoidInst: case NVPTX::CallVoidInstReg: @@ -2032,10 +1967,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { case NVPTX::StoreParamI32: case NVPTX::StoreParamI64: case NVPTX::StoreParamI8: - case NVPTX::StoreParamS32I8: - case NVPTX::StoreParamU32I8: - case NVPTX::StoreParamS32I16: - case NVPTX::StoreParamU32I16: case NVPTX::StoreRetvalF32: case NVPTX::StoreRetvalF64: case NVPTX::StoreRetvalI16: @@ -2048,7 +1979,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { case NVPTX::LastCallArgI32: case NVPTX::LastCallArgI32imm: case NVPTX::LastCallArgI64: - case NVPTX::LastCallArgI8: case NVPTX::LastCallArgParam: case NVPTX::LoadParamMemF32: case NVPTX::LoadParamMemF64: @@ -2056,12 +1986,6 @@ bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { case NVPTX::LoadParamMemI32: case NVPTX::LoadParamMemI64: case NVPTX::LoadParamMemI8: - case NVPTX::LoadParamRegF32: - case NVPTX::LoadParamRegF64: - case NVPTX::LoadParamRegI16: - case NVPTX::LoadParamRegI32: - case NVPTX::LoadParamRegI64: - case NVPTX::LoadParamRegI8: case NVPTX::PrototypeInst: case NVPTX::DBG_VALUE: return true; diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h index 55f2943..27bfa54 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.h +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h @@ -190,14 +190,14 @@ private: void EmitFunctionBodyEnd(); void EmitInstruction(const MachineInstr *); + void lowerToMCInst(const MachineInstr *MI, MCInst &OutMI); + bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp); + MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol); + unsigned encodeVirtualRegister(unsigned Reg); void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const {} void printGlobalVariable(const GlobalVariable *GVar); - void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier = 0); - void printLdStCode(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier = 0); void printVecModifiedImmediate(const MachineOperand &MO, const char *Modifier, raw_ostream &O); void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O, @@ -220,12 +220,6 @@ private: void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF); void emitFunctionTempData(const MachineFunction &MF, unsigned &FrameSize); bool isImageType(const Type *Ty); - bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &); - bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &); void printReturnValStr(const Function *, raw_ostream &O); void printReturnValStr(const MachineFunction &MF, raw_ostream &O); diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/lib/Target/NVPTX/NVPTXFrameLowering.cpp index 6533da5..9030584 100644 --- a/lib/Target/NVPTX/NVPTXFrameLowering.cpp +++ b/lib/Target/NVPTX/NVPTXFrameLowering.cpp @@ -20,6 +20,7 @@ #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/MC/MachineLocation.h" #include "llvm/Target/TargetInstrInfo.h" @@ -36,30 +37,24 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const { // in the BB, so giving it no debug location. DebugLoc dl = DebugLoc(); - if (tm.getSubtargetImpl()->hasGenericLdSt()) { - // mov %SPL, %depot; - // cvta.local %SP, %SPL; - if (is64bit) { - MachineInstr *MI = BuildMI( - MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64), - NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); - BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr), - NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot); - } else { - MachineInstr *MI = BuildMI( - MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes), - NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); - BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr), - NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot); - } + MachineRegisterInfo &MRI = MF.getRegInfo(); + + // mov %SPL, %depot; + // cvta.local %SP, %SPL; + if (is64bit) { + unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int64RegsRegClass); + MachineInstr *MI = BuildMI( + MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64), + NVPTX::VRFrame).addReg(LocalReg); + BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR_64), + LocalReg).addImm(MF.getFunctionNumber()); } else { - // mov %SP, %depot; - if (is64bit) - BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr), - NVPTX::VRFrame).addReg(NVPTX::VRDepot); - else - BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr), - NVPTX::VRFrame).addReg(NVPTX::VRDepot); + unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int32RegsRegClass); + MachineInstr *MI = BuildMI( + MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes), + NVPTX::VRFrame).addReg(LocalReg); + BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR), + LocalReg).addImm(MF.getFunctionNumber()); } } } diff --git a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp index 1077c46..9f92a5b 100644 --- a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp +++ b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp @@ -384,7 +384,7 @@ void GenericToNVVM::remapNamedMDNode(Module *M, NamedMDNode *N) { // Replace the old operands with the new operands. N->dropAllReferences(); - for (SmallVector<MDNode *, 16>::iterator I = NewOperands.begin(), + for (SmallVectorImpl<MDNode *>::iterator I = NewOperands.begin(), E = NewOperands.end(); I != E; ++I) { N->addOperand(*I); diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index ac6dbb9..ba85e35 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -25,11 +25,6 @@ using namespace llvm; -static cl::opt<bool> UseFMADInstruction( - "nvptx-mad-enable", cl::ZeroOrMore, - cl::desc("NVPTX Specific: Enable generating FMAD instructions"), - cl::init(false)); - static cl::opt<int> FMAContractLevel("nvptx-fma-level", cl::ZeroOrMore, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" @@ -47,6 +42,12 @@ UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true)); +static cl::opt<bool> +FtzEnabled("nvptx-f32ftz", cl::ZeroOrMore, + cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."), + cl::init(false)); + + /// createNVPTXISelDag - This pass converts a legalized DAG into a /// NVPTX-specific DAG, ready for instruction scheduling. FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM, @@ -58,12 +59,7 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm, CodeGenOpt::Level OptLevel) : SelectionDAGISel(tm, OptLevel), Subtarget(tm.getSubtarget<NVPTXSubtarget>()) { - // Always do fma.f32 fpcontract if the target supports the instruction. - // Always do fma.f64 fpcontract if the target supports the instruction. - // Do mad.f32 is nvptx-mad-enable is specified and the target does not - // support fma.f32. - doFMADF32 = (OptLevel > 0) && UseFMADInstruction && !Subtarget.hasFMAF32(); doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel >= 1); doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel >= 1); doFMAF32AGG = @@ -71,20 +67,51 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm, doFMAF64AGG = (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel == 2); - allowFMA = (FMAContractLevel >= 1) || UseFMADInstruction; - - UseF32FTZ = false; + allowFMA = (FMAContractLevel >= 1); doMulWide = (OptLevel > 0); +} - // Decide how to translate f32 div - do_DIVF32_PREC = UsePrecDivF32; - // Decide how to translate f32 sqrt - do_SQRTF32_PREC = UsePrecSqrtF32; - // sm less than sm_20 does not support div.rnd. Use div.full. - if (do_DIVF32_PREC == 2 && !Subtarget.reqPTX20()) - do_DIVF32_PREC = 1; +int NVPTXDAGToDAGISel::getDivF32Level() const { + if (UsePrecDivF32.getNumOccurrences() > 0) { + // If nvptx-prec-div32=N is used on the command-line, always honor it + return UsePrecDivF32; + } else { + // Otherwise, use div.approx if fast math is enabled + if (TM.Options.UnsafeFPMath) + return 0; + else + return 2; + } +} +bool NVPTXDAGToDAGISel::usePrecSqrtF32() const { + if (UsePrecSqrtF32.getNumOccurrences() > 0) { + // If nvptx-prec-sqrtf32 is used on the command-line, always honor it + return UsePrecSqrtF32; + } else { + // Otherwise, use sqrt.approx if fast math is enabled + if (TM.Options.UnsafeFPMath) + return false; + else + return true; + } +} + +bool NVPTXDAGToDAGISel::useF32FTZ() const { + if (FtzEnabled.getNumOccurrences() > 0) { + // If nvptx-f32ftz is used on the command-line, always honor it + return FtzEnabled; + } else { + const Function *F = MF->getFunction(); + // Otherwise, check for an nvptx-f32ftz attribute on the function + if (F->hasFnAttribute("nvptx-f32ftz")) + return (F->getAttributes().getAttribute(AttributeSet::FunctionIndex, + "nvptx-f32ftz") + .getValueAsString() == "true"); + else + return false; + } } /// Select - Select instructions not customized! Used for @@ -116,6 +143,23 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) { case NVPTXISD::StoreV4: ResNode = SelectStoreVector(N); break; + case NVPTXISD::LoadParam: + case NVPTXISD::LoadParamV2: + case NVPTXISD::LoadParamV4: + ResNode = SelectLoadParam(N); + break; + case NVPTXISD::StoreRetval: + case NVPTXISD::StoreRetvalV2: + case NVPTXISD::StoreRetvalV4: + ResNode = SelectStoreRetval(N); + break; + case NVPTXISD::StoreParam: + case NVPTXISD::StoreParamV2: + case NVPTXISD::StoreParamV4: + case NVPTXISD::StoreParamS32: + case NVPTXISD::StoreParamU32: + ResNode = SelectStoreParam(N); + break; default: break; } @@ -770,192 +814,476 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { unsigned Opcode; SDLoc DL(N); SDNode *LD; + MemSDNode *Mem = cast<MemSDNode>(N); + SDValue Base, Offset, Addr; - EVT RetVT = N->getValueType(0); + EVT EltVT = Mem->getMemoryVT().getVectorElementType(); - // Select opcode - if (Subtarget.is64Bit()) { + if (SelectDirectAddr(Op1, Addr)) { switch (N->getOpcode()) { default: return NULL; case NVPTXISD::LDGV2: - switch (RetVT.getSimpleVT().SimpleTy) { + switch (EltVT.getSimpleVT().SimpleTy) { default: return NULL; case MVT::i8: - Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_avar; break; case MVT::i16: - Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_avar; break; case MVT::i32: - Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_avar; break; case MVT::i64: - Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_avar; break; case MVT::f32: - Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_avar; break; case MVT::f64: - Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_avar; break; } break; - case NVPTXISD::LDGV4: - switch (RetVT.getSimpleVT().SimpleTy) { + case NVPTXISD::LDUV2: + switch (EltVT.getSimpleVT().SimpleTy) { default: return NULL; case MVT::i8: - Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_avar; break; case MVT::i16: - Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_avar; break; case MVT::i32: - Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_avar; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_avar; break; case MVT::f32: - Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_avar; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_avar; break; } break; - case NVPTXISD::LDUV2: - switch (RetVT.getSimpleVT().SimpleTy) { + case NVPTXISD::LDGV4: + switch (EltVT.getSimpleVT().SimpleTy) { default: return NULL; case MVT::i8: - Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_avar; break; case MVT::i16: - Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_avar; break; case MVT::i32: - Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_64; - break; - case MVT::i64: - Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_avar; break; case MVT::f32: - Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_64; - break; - case MVT::f64: - Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_64; + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_avar; break; } break; case NVPTXISD::LDUV4: - switch (RetVT.getSimpleVT().SimpleTy) { + switch (EltVT.getSimpleVT().SimpleTy) { default: return NULL; case MVT::i8: - Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_avar; break; case MVT::i16: - Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_avar; break; case MVT::i32: - Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_avar; break; case MVT::f32: - Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_64; + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_avar; break; } break; } - } else { - switch (N->getOpcode()) { - default: - return NULL; - case NVPTXISD::LDGV2: - switch (RetVT.getSimpleVT().SimpleTy) { + + SDValue Ops[] = { Addr, Chain }; + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), + ArrayRef<SDValue>(Ops, 2)); + } else if (Subtarget.is64Bit() + ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset) + : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) { + if (Subtarget.is64Bit()) { + switch (N->getOpcode()) { default: return NULL; - case MVT::i8: - Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_32; - break; - case MVT::i16: - Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_32; - break; - case MVT::i32: - Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_32; + case NVPTXISD::LDGV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_ari64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_ari64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_ari64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_ari64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_ari64; + break; + } break; - case MVT::i64: - Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_32; + case NVPTXISD::LDUV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_ari64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_ari64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_ari64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_ari64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_ari64; + break; + } break; - case MVT::f32: - Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_32; + case NVPTXISD::LDGV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_ari64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_ari64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_ari64; + break; + } break; - case MVT::f64: - Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_32; + case NVPTXISD::LDUV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_ari64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_ari64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_ari64; + break; + } break; } - break; - case NVPTXISD::LDGV4: - switch (RetVT.getSimpleVT().SimpleTy) { + } else { + switch (N->getOpcode()) { default: return NULL; - case MVT::i8: - Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_32; + case NVPTXISD::LDGV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_ari32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_ari32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_ari32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_ari32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_ari32; + break; + } break; - case MVT::i16: - Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_32; + case NVPTXISD::LDUV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_ari32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_ari32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_ari32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_ari32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_ari32; + break; + } break; - case MVT::i32: - Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_32; + case NVPTXISD::LDGV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_ari32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_ari32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_ari32; + break; + } break; - case MVT::f32: - Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_32; + case NVPTXISD::LDUV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_ari32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_ari32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_ari32; + break; + } break; } - break; - case NVPTXISD::LDUV2: - switch (RetVT.getSimpleVT().SimpleTy) { + } + + SDValue Ops[] = { Base, Offset, Chain }; + + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), + ArrayRef<SDValue>(Ops, 3)); + } else { + if (Subtarget.is64Bit()) { + switch (N->getOpcode()) { default: return NULL; - case MVT::i8: - Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_32; - break; - case MVT::i16: - Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_32; - break; - case MVT::i32: - Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_32; + case NVPTXISD::LDGV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg64; + break; + } break; - case MVT::i64: - Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_32; + case NVPTXISD::LDUV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg64; + break; + } break; - case MVT::f32: - Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_32; + case NVPTXISD::LDGV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg64; + break; + } break; - case MVT::f64: - Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_32; + case NVPTXISD::LDUV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg64; + break; + } break; } - break; - case NVPTXISD::LDUV4: - switch (RetVT.getSimpleVT().SimpleTy) { + } else { + switch (N->getOpcode()) { default: return NULL; - case MVT::i8: - Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_32; + case NVPTXISD::LDGV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_areg32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_areg32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_areg32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_areg32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_areg32; + break; + } break; - case MVT::i16: - Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_32; + case NVPTXISD::LDUV2: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_areg32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_areg32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_areg32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_areg32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_areg32; + break; + } break; - case MVT::i32: - Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_32; + case NVPTXISD::LDGV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_areg32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_areg32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_areg32; + break; + } break; - case MVT::f32: - Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_32; + case NVPTXISD::LDUV4: + switch (EltVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_areg32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_areg32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_areg32; + break; + } break; } - break; } - } - SDValue Ops[] = { Op1, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); + SDValue Ops[] = { Op1, Chain }; + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), + ArrayRef<SDValue>(Ops, 2)); + } MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand(); @@ -1571,6 +1899,414 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { return ST; } +SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) { + SDValue Chain = Node->getOperand(0); + SDValue Offset = Node->getOperand(2); + SDValue Flag = Node->getOperand(3); + SDLoc DL(Node); + MemSDNode *Mem = cast<MemSDNode>(Node); + + unsigned VecSize; + switch (Node->getOpcode()) { + default: + return NULL; + case NVPTXISD::LoadParam: + VecSize = 1; + break; + case NVPTXISD::LoadParamV2: + VecSize = 2; + break; + case NVPTXISD::LoadParamV4: + VecSize = 4; + break; + } + + EVT EltVT = Node->getValueType(0); + EVT MemVT = Mem->getMemoryVT(); + + unsigned Opc = 0; + + switch (VecSize) { + default: + return NULL; + case 1: + switch (MemVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opc = NVPTX::LoadParamMemI8; + break; + case MVT::i8: + Opc = NVPTX::LoadParamMemI8; + break; + case MVT::i16: + Opc = NVPTX::LoadParamMemI16; + break; + case MVT::i32: + Opc = NVPTX::LoadParamMemI32; + break; + case MVT::i64: + Opc = NVPTX::LoadParamMemI64; + break; + case MVT::f32: + Opc = NVPTX::LoadParamMemF32; + break; + case MVT::f64: + Opc = NVPTX::LoadParamMemF64; + break; + } + break; + case 2: + switch (MemVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opc = NVPTX::LoadParamMemV2I8; + break; + case MVT::i8: + Opc = NVPTX::LoadParamMemV2I8; + break; + case MVT::i16: + Opc = NVPTX::LoadParamMemV2I16; + break; + case MVT::i32: + Opc = NVPTX::LoadParamMemV2I32; + break; + case MVT::i64: + Opc = NVPTX::LoadParamMemV2I64; + break; + case MVT::f32: + Opc = NVPTX::LoadParamMemV2F32; + break; + case MVT::f64: + Opc = NVPTX::LoadParamMemV2F64; + break; + } + break; + case 4: + switch (MemVT.getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opc = NVPTX::LoadParamMemV4I8; + break; + case MVT::i8: + Opc = NVPTX::LoadParamMemV4I8; + break; + case MVT::i16: + Opc = NVPTX::LoadParamMemV4I16; + break; + case MVT::i32: + Opc = NVPTX::LoadParamMemV4I32; + break; + case MVT::f32: + Opc = NVPTX::LoadParamMemV4F32; + break; + } + break; + } + + SDVTList VTs; + if (VecSize == 1) { + VTs = CurDAG->getVTList(EltVT, MVT::Other, MVT::Glue); + } else if (VecSize == 2) { + VTs = CurDAG->getVTList(EltVT, EltVT, MVT::Other, MVT::Glue); + } else { + EVT EVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other, MVT::Glue }; + VTs = CurDAG->getVTList(&EVTs[0], 5); + } + + unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue(); + + SmallVector<SDValue, 2> Ops; + Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32)); + Ops.push_back(Chain); + Ops.push_back(Flag); + + SDNode *Ret = + CurDAG->getMachineNode(Opc, DL, VTs, Ops); + return Ret; +} + +SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) { + SDLoc DL(N); + SDValue Chain = N->getOperand(0); + SDValue Offset = N->getOperand(1); + unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue(); + MemSDNode *Mem = cast<MemSDNode>(N); + + // How many elements do we have? + unsigned NumElts = 1; + switch (N->getOpcode()) { + default: + return NULL; + case NVPTXISD::StoreRetval: + NumElts = 1; + break; + case NVPTXISD::StoreRetvalV2: + NumElts = 2; + break; + case NVPTXISD::StoreRetvalV4: + NumElts = 4; + break; + } + + // Build vector of operands + SmallVector<SDValue, 6> Ops; + for (unsigned i = 0; i < NumElts; ++i) + Ops.push_back(N->getOperand(i + 2)); + Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32)); + Ops.push_back(Chain); + + // Determine target opcode + // If we have an i1, use an 8-bit store. The lowering code in + // NVPTXISelLowering will have already emitted an upcast. + unsigned Opcode = 0; + switch (NumElts) { + default: + return NULL; + case 1: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreRetvalI8; + break; + case MVT::i8: + Opcode = NVPTX::StoreRetvalI8; + break; + case MVT::i16: + Opcode = NVPTX::StoreRetvalI16; + break; + case MVT::i32: + Opcode = NVPTX::StoreRetvalI32; + break; + case MVT::i64: + Opcode = NVPTX::StoreRetvalI64; + break; + case MVT::f32: + Opcode = NVPTX::StoreRetvalF32; + break; + case MVT::f64: + Opcode = NVPTX::StoreRetvalF64; + break; + } + break; + case 2: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreRetvalV2I8; + break; + case MVT::i8: + Opcode = NVPTX::StoreRetvalV2I8; + break; + case MVT::i16: + Opcode = NVPTX::StoreRetvalV2I16; + break; + case MVT::i32: + Opcode = NVPTX::StoreRetvalV2I32; + break; + case MVT::i64: + Opcode = NVPTX::StoreRetvalV2I64; + break; + case MVT::f32: + Opcode = NVPTX::StoreRetvalV2F32; + break; + case MVT::f64: + Opcode = NVPTX::StoreRetvalV2F64; + break; + } + break; + case 4: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreRetvalV4I8; + break; + case MVT::i8: + Opcode = NVPTX::StoreRetvalV4I8; + break; + case MVT::i16: + Opcode = NVPTX::StoreRetvalV4I16; + break; + case MVT::i32: + Opcode = NVPTX::StoreRetvalV4I32; + break; + case MVT::f32: + Opcode = NVPTX::StoreRetvalV4F32; + break; + } + break; + } + + SDNode *Ret = + CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops); + MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); + MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand(); + cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1); + + return Ret; +} + +SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) { + SDLoc DL(N); + SDValue Chain = N->getOperand(0); + SDValue Param = N->getOperand(1); + unsigned ParamVal = cast<ConstantSDNode>(Param)->getZExtValue(); + SDValue Offset = N->getOperand(2); + unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue(); + MemSDNode *Mem = cast<MemSDNode>(N); + SDValue Flag = N->getOperand(N->getNumOperands() - 1); + + // How many elements do we have? + unsigned NumElts = 1; + switch (N->getOpcode()) { + default: + return NULL; + case NVPTXISD::StoreParamU32: + case NVPTXISD::StoreParamS32: + case NVPTXISD::StoreParam: + NumElts = 1; + break; + case NVPTXISD::StoreParamV2: + NumElts = 2; + break; + case NVPTXISD::StoreParamV4: + NumElts = 4; + break; + } + + // Build vector of operands + SmallVector<SDValue, 8> Ops; + for (unsigned i = 0; i < NumElts; ++i) + Ops.push_back(N->getOperand(i + 3)); + Ops.push_back(CurDAG->getTargetConstant(ParamVal, MVT::i32)); + Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32)); + Ops.push_back(Chain); + Ops.push_back(Flag); + + // Determine target opcode + // If we have an i1, use an 8-bit store. The lowering code in + // NVPTXISelLowering will have already emitted an upcast. + unsigned Opcode = 0; + switch (N->getOpcode()) { + default: + switch (NumElts) { + default: + return NULL; + case 1: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreParamI8; + break; + case MVT::i8: + Opcode = NVPTX::StoreParamI8; + break; + case MVT::i16: + Opcode = NVPTX::StoreParamI16; + break; + case MVT::i32: + Opcode = NVPTX::StoreParamI32; + break; + case MVT::i64: + Opcode = NVPTX::StoreParamI64; + break; + case MVT::f32: + Opcode = NVPTX::StoreParamF32; + break; + case MVT::f64: + Opcode = NVPTX::StoreParamF64; + break; + } + break; + case 2: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreParamV2I8; + break; + case MVT::i8: + Opcode = NVPTX::StoreParamV2I8; + break; + case MVT::i16: + Opcode = NVPTX::StoreParamV2I16; + break; + case MVT::i32: + Opcode = NVPTX::StoreParamV2I32; + break; + case MVT::i64: + Opcode = NVPTX::StoreParamV2I64; + break; + case MVT::f32: + Opcode = NVPTX::StoreParamV2F32; + break; + case MVT::f64: + Opcode = NVPTX::StoreParamV2F64; + break; + } + break; + case 4: + switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { + default: + return NULL; + case MVT::i1: + Opcode = NVPTX::StoreParamV4I8; + break; + case MVT::i8: + Opcode = NVPTX::StoreParamV4I8; + break; + case MVT::i16: + Opcode = NVPTX::StoreParamV4I16; + break; + case MVT::i32: + Opcode = NVPTX::StoreParamV4I32; + break; + case MVT::f32: + Opcode = NVPTX::StoreParamV4F32; + break; + } + break; + } + break; + // Special case: if we have a sign-extend/zero-extend node, insert the + // conversion instruction first, and use that as the value operand to + // the selected StoreParam node. + case NVPTXISD::StoreParamU32: { + Opcode = NVPTX::StoreParamI32; + SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, + MVT::i32); + SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_u32_u16, DL, + MVT::i32, Ops[0], CvtNone); + Ops[0] = SDValue(Cvt, 0); + break; + } + case NVPTXISD::StoreParamS32: { + Opcode = NVPTX::StoreParamI32; + SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, + MVT::i32); + SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_s32_s16, DL, + MVT::i32, Ops[0], CvtNone); + Ops[0] = SDValue(Cvt, 0); + break; + } + } + + SDVTList RetVTs = CurDAG->getVTList(MVT::Other, MVT::Glue); + SDNode *Ret = + CurDAG->getMachineNode(Opcode, DL, RetVTs, Ops); + MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); + MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand(); + cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1); + + return Ret; +} + // SelectDirectAddr - Match a direct address for DAG. // A direct address could be a globaladdress or externalsymbol. bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) { diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index ed16d44..d961e50 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -28,38 +28,22 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel { // If true, generate corresponding FPCONTRACT. This is // language dependent (i.e. CUDA and OpenCL works differently). - bool doFMADF32; bool doFMAF64; bool doFMAF32; bool doFMAF64AGG; bool doFMAF32AGG; bool allowFMA; - // 0: use div.approx - // 1: use div.full - // 2: For sm_20 and later, ieee-compliant div.rnd.f32 can be generated; - // Otherwise, use div.full - int do_DIVF32_PREC; - - // If true, generate sqrt.rn, else generate sqrt.approx. If FTZ - // is true, then generate the corresponding FTZ version. - bool do_SQRTF32_PREC; - - // If true, add .ftz to f32 instructions. - // This is only meaningful for sm_20 and later, as the default - // is not ftz. - // For sm earlier than sm_20, f32 denorms are always ftz by the - // hardware. - // We always add the .ftz modifier regardless of the sm value - // when Use32FTZ is true. - bool UseF32FTZ; - // If true, generate mul.wide from sext and mul bool doMulWide; + int getDivF32Level() const; + bool usePrecSqrtF32() const; + bool useF32FTZ() const; + public: explicit NVPTXDAGToDAGISel(NVPTXTargetMachine &tm, - CodeGenOpt::Level OptLevel); + CodeGenOpt::Level OptLevel); // Pass Name virtual const char *getPassName() const { @@ -80,7 +64,10 @@ private: SDNode *SelectLDGLDUVector(SDNode *N); SDNode *SelectStore(SDNode *N); SDNode *SelectStoreVector(SDNode *N); - + SDNode *SelectLoadParam(SDNode *N); + SDNode *SelectStoreRetval(SDNode *N); + SDNode *SelectStoreParam(SDNode *N); + inline SDValue getI32Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 6cc850e..828242d 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -51,6 +51,8 @@ static bool IsPTXVectorType(MVT VT) { switch (VT.SimpleTy) { default: return false; + case MVT::v2i1: + case MVT::v4i1: case MVT::v2i8: case MVT::v4i8: case MVT::v2i16: @@ -65,6 +67,37 @@ static bool IsPTXVectorType(MVT VT) { } } +/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive +/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors +/// into their primitive components. +/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the +/// same number of types as the Ins/Outs arrays in LowerFormalArguments, +/// LowerCall, and LowerReturn. +static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty, + SmallVectorImpl<EVT> &ValueVTs, + SmallVectorImpl<uint64_t> *Offsets = 0, + uint64_t StartingOffset = 0) { + SmallVector<EVT, 16> TempVTs; + SmallVector<uint64_t, 16> TempOffsets; + + ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset); + for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) { + EVT VT = TempVTs[i]; + uint64_t Off = TempOffsets[i]; + if (VT.isVector()) + for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) { + ValueVTs.push_back(VT.getVectorElementType()); + if (Offsets) + Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize()); + } + else { + ValueVTs.push_back(VT); + if (Offsets) + Offsets->push_back(Off); + } + } +} + // NVPTXTargetLowering Constructor. NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM), @@ -90,7 +123,6 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) setSchedulingPreference(Sched::Source); addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass); - addRegisterClass(MVT::i8, &NVPTX::Int8RegsRegClass); addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass); addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass); addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass); @@ -106,10 +138,12 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) setOperationAction(ISD::BR_CC, MVT::i16, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Expand); setOperationAction(ISD::BR_CC, MVT::i64, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + // Some SIGN_EXTEND_INREG can be done using cvt instruction. + // For others we will expand to a SHL/SRA pair. + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); if (nvptxSubtarget.hasROT64()) { @@ -170,6 +204,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) // TRAP can be lowered to PTX trap setOperationAction(ISD::TRAP, MVT::Other, Legal); + setOperationAction(ISD::ADDC, MVT::i64, Expand); + setOperationAction(ISD::ADDE, MVT::i64, Expand); + // Register custom handling for vector loads/stores for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { @@ -181,6 +218,25 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) } } + // Custom handling for i8 intrinsics + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); + + setOperationAction(ISD::CTLZ, MVT::i16, Legal); + setOperationAction(ISD::CTLZ, MVT::i32, Legal); + setOperationAction(ISD::CTLZ, MVT::i64, Legal); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal); + setOperationAction(ISD::CTTZ, MVT::i16, Expand); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); + setOperationAction(ISD::CTPOP, MVT::i16, Legal); + setOperationAction(ISD::CTPOP, MVT::i32, Legal); + setOperationAction(ISD::CTPOP, MVT::i64, Legal); + // Now deduce the information based on the above mentioned // actions computeRegisterProperties(); @@ -196,8 +252,6 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { return "NVPTXISD::RET_FLAG"; case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper"; - case NVPTXISD::NVBuiltin: - return "NVPTXISD::NVBuiltin"; case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam"; case NVPTXISD::DeclareScalarParam: @@ -210,14 +264,20 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { return "NVPTXISD::PrintCall"; case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam"; + case NVPTXISD::LoadParamV2: + return "NVPTXISD::LoadParamV2"; + case NVPTXISD::LoadParamV4: + return "NVPTXISD::LoadParamV4"; case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam"; + case NVPTXISD::StoreParamV2: + return "NVPTXISD::StoreParamV2"; + case NVPTXISD::StoreParamV4: + return "NVPTXISD::StoreParamV4"; case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32"; case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32"; - case NVPTXISD::MoveToParam: - return "NVPTXISD::MoveToParam"; case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin"; case NVPTXISD::CallArg: @@ -236,12 +296,12 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { return "NVPTXISD::Prototype"; case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam"; - case NVPTXISD::MoveRetval: - return "NVPTXISD::MoveRetval"; - case NVPTXISD::MoveToRetval: - return "NVPTXISD::MoveToRetval"; case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval"; + case NVPTXISD::StoreRetvalV2: + return "NVPTXISD::StoreRetvalV2"; + case NVPTXISD::StoreRetvalV4: + return "NVPTXISD::StoreRetvalV4"; case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam"; case NVPTXISD::RETURN: @@ -281,83 +341,62 @@ NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op); } -std::string NVPTXTargetLowering::getPrototype( - Type *retTy, const ArgListTy &Args, - const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment) const { +std::string +NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args, + const SmallVectorImpl<ISD::OutputArg> &Outs, + unsigned retAlignment, + const ImmutableCallSite *CS) const { bool isABI = (nvptxSubtarget.getSmVersion() >= 20); + assert(isABI && "Non-ABI compilation is not supported"); + if (!isABI) + return ""; std::stringstream O; O << "prototype_" << uniqueCallSite << " : .callprototype "; - if (retTy->getTypeID() == Type::VoidTyID) + if (retTy->getTypeID() == Type::VoidTyID) { O << "()"; - else { + } else { O << "("; - if (isABI) { - if (retTy->isPrimitiveType() || retTy->isIntegerTy()) { - unsigned size = 0; - if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) { - size = ITy->getBitWidth(); - if (size < 32) - size = 32; - } else { - assert(retTy->isFloatingPointTy() && - "Floating point type expected here"); - size = retTy->getPrimitiveSizeInBits(); - } - - O << ".param .b" << size << " _"; - } else if (isa<PointerType>(retTy)) - O << ".param .b" << getPointerTy().getSizeInBits() << " _"; - else { - if ((retTy->getTypeID() == Type::StructTyID) || - isa<VectorType>(retTy)) { - SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*this, retTy, vtparts); - unsigned totalsz = 0; - for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { - unsigned elems = 1; - EVT elemtype = vtparts[i]; - if (vtparts[i].isVector()) { - elems = vtparts[i].getVectorNumElements(); - elemtype = vtparts[i].getVectorElementType(); - } - for (unsigned j = 0, je = elems; j != je; ++j) { - unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 8)) - sz = 8; - totalsz += sz / 8; - } - } - O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]"; - } else { - assert(false && "Unknown return type"); - } + if (retTy->isPrimitiveType() || retTy->isIntegerTy()) { + unsigned size = 0; + if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) { + size = ITy->getBitWidth(); + if (size < 32) + size = 32; + } else { + assert(retTy->isFloatingPointTy() && + "Floating point type expected here"); + size = retTy->getPrimitiveSizeInBits(); } - } else { - SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*this, retTy, vtparts); - unsigned idx = 0; - for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { - unsigned elems = 1; - EVT elemtype = vtparts[i]; - if (vtparts[i].isVector()) { - elems = vtparts[i].getVectorNumElements(); - elemtype = vtparts[i].getVectorElementType(); - } - for (unsigned j = 0, je = elems; j != je; ++j) { - unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) - sz = 32; - O << ".reg .b" << sz << " _"; - if (j < je - 1) - O << ", "; - ++idx; + O << ".param .b" << size << " _"; + } else if (isa<PointerType>(retTy)) { + O << ".param .b" << getPointerTy().getSizeInBits() << " _"; + } else { + if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) { + SmallVector<EVT, 16> vtparts; + ComputeValueVTs(*this, retTy, vtparts); + unsigned totalsz = 0; + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { + unsigned elems = 1; + EVT elemtype = vtparts[i]; + if (vtparts[i].isVector()) { + elems = vtparts[i].getVectorNumElements(); + elemtype = vtparts[i].getVectorElementType(); + } + // TODO: no need to loop + for (unsigned j = 0, je = elems; j != je; ++j) { + unsigned sz = elemtype.getSizeInBits(); + if (elemtype.isInteger() && (sz < 8)) + sz = 8; + totalsz += sz / 8; + } } - if (i < e - 1) - O << ", "; + O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]"; + } else { + assert(false && "Unknown return type"); } } O << ") "; @@ -367,14 +406,38 @@ std::string NVPTXTargetLowering::getPrototype( bool first = true; MVT thePointerTy = getPointerTy(); - for (unsigned i = 0, e = Args.size(); i != e; ++i) { - const Type *Ty = Args[i].Ty; + unsigned OIdx = 0; + for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) { + Type *Ty = Args[i].Ty; if (!first) { O << ", "; } first = false; - if (Outs[i].Flags.isByVal() == false) { + if (Outs[OIdx].Flags.isByVal() == false) { + if (Ty->isAggregateType() || Ty->isVectorTy()) { + unsigned align = 0; + const CallInst *CallI = cast<CallInst>(CS->getInstruction()); + const DataLayout *TD = getDataLayout(); + // +1 because index 0 is reserved for return type alignment + if (!llvm::getAlign(*CallI, i + 1, align)) + align = TD->getABITypeAlignment(Ty); + unsigned sz = TD->getTypeAllocSize(Ty); + O << ".param .align " << align << " .b8 "; + O << "_"; + O << "[" << sz << "]"; + // update the index for Outs + SmallVector<EVT, 16> vtparts; + ComputeValueVTs(*this, Ty, vtparts); + if (unsigned len = vtparts.size()) + OIdx += len - 1; + continue; + } + // i8 types in IR will be i16 types in SDAG + assert((getValueType(Ty) == Outs[OIdx].VT || + (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) && + "type mismatch between callee prototype and arguments"); + // scalar type unsigned sz = 0; if (isa<IntegerType>(Ty)) { sz = cast<IntegerType>(Ty)->getBitWidth(); @@ -384,10 +447,7 @@ std::string NVPTXTargetLowering::getPrototype( sz = thePointerTy.getSizeInBits(); else sz = Ty->getPrimitiveSizeInBits(); - if (isABI) - O << ".param .b" << sz << " "; - else - O << ".reg .b" << sz << " "; + O << ".param .b" << sz << " "; O << "_"; continue; } @@ -395,50 +455,47 @@ std::string NVPTXTargetLowering::getPrototype( assert(PTy && "Param with byval attribute should be a pointer type"); Type *ETy = PTy->getElementType(); - if (isABI) { - unsigned align = Outs[i].Flags.getByValAlign(); - unsigned sz = getDataLayout()->getTypeAllocSize(ETy); - O << ".param .align " << align << " .b8 "; - O << "_"; - O << "[" << sz << "]"; - continue; - } else { - SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*this, ETy, vtparts); - for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { - unsigned elems = 1; - EVT elemtype = vtparts[i]; - if (vtparts[i].isVector()) { - elems = vtparts[i].getVectorNumElements(); - elemtype = vtparts[i].getVectorElementType(); - } - - for (unsigned j = 0, je = elems; j != je; ++j) { - unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) - sz = 32; - O << ".reg .b" << sz << " "; - O << "_"; - if (j < je - 1) - O << ", "; - } - if (i < e - 1) - O << ", "; - } - continue; - } + unsigned align = Outs[OIdx].Flags.getByValAlign(); + unsigned sz = getDataLayout()->getTypeAllocSize(ETy); + O << ".param .align " << align << " .b8 "; + O << "_"; + O << "[" << sz << "]"; } O << ");"; return O.str(); } +unsigned +NVPTXTargetLowering::getArgumentAlignment(SDValue Callee, + const ImmutableCallSite *CS, + Type *Ty, + unsigned Idx) const { + const DataLayout *TD = getDataLayout(); + unsigned align = 0; + GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode()); + + if (Func) { // direct call + assert(CS->getCalledFunction() && + "direct call cannot find callee"); + if (!llvm::getAlign(*(CS->getCalledFunction()), Idx, align)) + align = TD->getABITypeAlignment(Ty); + } + else { // indirect call + const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction()); + if (!llvm::getAlign(*CallI, Idx, align)) + align = TD->getABITypeAlignment(Ty); + } + + return align; +} + SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -447,54 +504,258 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite *CS = CLI.CS; bool isABI = (nvptxSubtarget.getSmVersion() >= 20); + assert(isABI && "Non-ABI compilation is not supported"); + if (!isABI) + return Chain; + const DataLayout *TD = getDataLayout(); + MachineFunction &MF = DAG.getMachineFunction(); + const Function *F = MF.getFunction(); SDValue tempChain = Chain; - Chain = DAG.getCALLSEQ_START(Chain, - DAG.getIntPtrConstant(uniqueCallSite, true), - dl); + Chain = + DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true), + dl); SDValue InFlag = Chain.getValue(1); - assert((Outs.size() == Args.size()) && - "Unexpected number of arguments to function call"); unsigned paramCount = 0; + // Args.size() and Outs.size() need not match. + // Outs.size() will be larger + // * if there is an aggregate argument with multiple fields (each field + // showing up separately in Outs) + // * if there is a vector argument with more than typical vector-length + // elements (generally if more than 4) where each vector element is + // individually present in Outs. + // So a different index should be used for indexing into Outs/OutVals. + // See similar issue in LowerFormalArguments. + unsigned OIdx = 0; // Declare the .params or .reg need to pass values // to the function - for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT VT = Outs[i].VT; + for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) { + EVT VT = Outs[OIdx].VT; + Type *Ty = Args[i].Ty; + + if (Outs[OIdx].Flags.isByVal() == false) { + if (Ty->isAggregateType()) { + // aggregate + SmallVector<EVT, 16> vtparts; + ComputeValueVTs(*this, Ty, vtparts); + + unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1); + // declare .param .align <align> .b8 .param<n>[<size>]; + unsigned sz = TD->getTypeAllocSize(Ty); + SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32), + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(sz, MVT::i32), InFlag }; + Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, + DeclareParamOps, 5); + InFlag = Chain.getValue(1); + unsigned curOffset = 0; + for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { + unsigned elems = 1; + EVT elemtype = vtparts[j]; + if (vtparts[j].isVector()) { + elems = vtparts[j].getVectorNumElements(); + elemtype = vtparts[j].getVectorElementType(); + } + for (unsigned k = 0, ke = elems; k != ke; ++k) { + unsigned sz = elemtype.getSizeInBits(); + if (elemtype.isInteger() && (sz < 8)) + sz = 8; + SDValue StVal = OutVals[OIdx]; + if (elemtype.getSizeInBits() < 16) { + StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal); + } + SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue CopyParamOps[] = { Chain, + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(curOffset, MVT::i32), + StVal, InFlag }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, + CopyParamVTs, &CopyParamOps[0], 5, + elemtype, MachinePointerInfo()); + InFlag = Chain.getValue(1); + curOffset += sz / 8; + ++OIdx; + } + } + if (vtparts.size() > 0) + --OIdx; + ++paramCount; + continue; + } + if (Ty->isVectorTy()) { + EVT ObjectVT = getValueType(Ty); + unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1); + // declare .param .align <align> .b8 .param<n>[<size>]; + unsigned sz = TD->getTypeAllocSize(Ty); + SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32), + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(sz, MVT::i32), InFlag }; + Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, + DeclareParamOps, 5); + InFlag = Chain.getValue(1); + unsigned NumElts = ObjectVT.getVectorNumElements(); + EVT EltVT = ObjectVT.getVectorElementType(); + EVT MemVT = EltVT; + bool NeedExtend = false; + if (EltVT.getSizeInBits() < 16) { + NeedExtend = true; + EltVT = MVT::i16; + } + + // V1 store + if (NumElts == 1) { + SDValue Elt = OutVals[OIdx++]; + if (NeedExtend) + Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt); + + SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue CopyParamOps[] = { Chain, + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(0, MVT::i32), Elt, + InFlag }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, + CopyParamVTs, &CopyParamOps[0], 5, + MemVT, MachinePointerInfo()); + InFlag = Chain.getValue(1); + } else if (NumElts == 2) { + SDValue Elt0 = OutVals[OIdx++]; + SDValue Elt1 = OutVals[OIdx++]; + if (NeedExtend) { + Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0); + Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1); + } + + SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue CopyParamOps[] = { Chain, + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(0, MVT::i32), Elt0, Elt1, + InFlag }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl, + CopyParamVTs, &CopyParamOps[0], 6, + MemVT, MachinePointerInfo()); + InFlag = Chain.getValue(1); + } else { + unsigned curOffset = 0; + // V4 stores + // We have at least 4 elements (<3 x Ty> expands to 4 elements) and + // the + // vector will be expanded to a power of 2 elements, so we know we can + // always round up to the next multiple of 4 when creating the vector + // stores. + // e.g. 4 elem => 1 st.v4 + // 6 elem => 2 st.v4 + // 8 elem => 2 st.v4 + // 11 elem => 3 st.v4 + unsigned VecSize = 4; + if (EltVT.getSizeInBits() == 64) + VecSize = 2; + + // This is potentially only part of a vector, so assume all elements + // are packed together. + unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize; + + for (unsigned i = 0; i < NumElts; i += VecSize) { + // Get values + SDValue StoreVal; + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(DAG.getConstant(paramCount, MVT::i32)); + Ops.push_back(DAG.getConstant(curOffset, MVT::i32)); + + unsigned Opc = NVPTXISD::StoreParamV2; + + StoreVal = OutVals[OIdx++]; + if (NeedExtend) + StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal); + Ops.push_back(StoreVal); + + if (i + 1 < NumElts) { + StoreVal = OutVals[OIdx++]; + if (NeedExtend) + StoreVal = + DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal); + } else { + StoreVal = DAG.getUNDEF(EltVT); + } + Ops.push_back(StoreVal); + + if (VecSize == 4) { + Opc = NVPTXISD::StoreParamV4; + if (i + 2 < NumElts) { + StoreVal = OutVals[OIdx++]; + if (NeedExtend) + StoreVal = + DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal); + } else { + StoreVal = DAG.getUNDEF(EltVT); + } + Ops.push_back(StoreVal); + + if (i + 3 < NumElts) { + StoreVal = OutVals[OIdx++]; + if (NeedExtend) + StoreVal = + DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal); + } else { + StoreVal = DAG.getUNDEF(EltVT); + } + Ops.push_back(StoreVal); + } + + Ops.push_back(InFlag); - if (Outs[i].Flags.isByVal() == false) { + SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0], + Ops.size(), MemVT, + MachinePointerInfo()); + InFlag = Chain.getValue(1); + curOffset += PerStoreOffset; + } + } + ++paramCount; + --OIdx; + continue; + } // Plain scalar // for ABI, declare .param .b<size> .param<n>; - // for nonABI, declare .reg .b<size> .param<n>; - unsigned isReg = 1; - if (isABI) - isReg = 0; unsigned sz = VT.getSizeInBits(); - if (VT.isInteger() && (sz < 32)) - sz = 32; + bool needExtend = false; + if (VT.isInteger()) { + if (sz < 16) + needExtend = true; + if (sz < 32) + sz = 32; + } SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32), - DAG.getConstant(isReg, MVT::i32), InFlag }; + DAG.getConstant(0, MVT::i32), InFlag }; Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs, DeclareParamOps, 5); InFlag = Chain.getValue(1); + SDValue OutV = OutVals[OIdx]; + if (needExtend) { + // zext/sext i1 to i16 + unsigned opc = ISD::ZERO_EXTEND; + if (Outs[OIdx].Flags.isSExt()) + opc = ISD::SIGN_EXTEND; + OutV = DAG.getNode(opc, dl, MVT::i16, OutV); + } SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(0, MVT::i32), OutVals[i], - InFlag }; + DAG.getConstant(0, MVT::i32), OutV, InFlag }; unsigned opcode = NVPTXISD::StoreParam; - if (isReg) - opcode = NVPTXISD::MoveToParam; - else { - if (Outs[i].Flags.isZExt()) - opcode = NVPTXISD::StoreParamU32; - else if (Outs[i].Flags.isSExt()) - opcode = NVPTXISD::StoreParamS32; - } - Chain = DAG.getNode(opcode, dl, CopyParamVTs, CopyParamOps, 5); + if (Outs[OIdx].Flags.isZExt()) + opcode = NVPTXISD::StoreParamU32; + else if (Outs[OIdx].Flags.isSExt()) + opcode = NVPTXISD::StoreParamS32; + Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5, + VT, MachinePointerInfo()); InFlag = Chain.getValue(1); ++paramCount; @@ -506,55 +767,20 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, assert(PTy && "Type of a byval parameter should be pointer"); ComputeValueVTs(*this, PTy->getElementType(), vtparts); - if (isABI) { - // declare .param .align 16 .b8 .param<n>[<size>]; - unsigned sz = Outs[i].Flags.getByValSize(); - SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); - // The ByValAlign in the Outs[i].Flags is alway set at this point, so we - // don't need to - // worry about natural alignment or not. See TargetLowering::LowerCallTo() - SDValue DeclareParamOps[] = { - Chain, DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32), - DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32), - InFlag - }; - Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, - DeclareParamOps, 5); - InFlag = Chain.getValue(1); - unsigned curOffset = 0; - for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { - unsigned elems = 1; - EVT elemtype = vtparts[j]; - if (vtparts[j].isVector()) { - elems = vtparts[j].getVectorNumElements(); - elemtype = vtparts[j].getVectorElementType(); - } - for (unsigned k = 0, ke = elems; k != ke; ++k) { - unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 8)) - sz = 8; - SDValue srcAddr = - DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i], - DAG.getConstant(curOffset, getPointerTy())); - SDValue theVal = - DAG.getLoad(elemtype, dl, tempChain, srcAddr, - MachinePointerInfo(), false, false, false, 0); - SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue CopyParamOps[] = { Chain, - DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(curOffset, MVT::i32), - theVal, InFlag }; - Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs, - CopyParamOps, 5); - InFlag = Chain.getValue(1); - curOffset += sz / 8; - } - } - ++paramCount; - continue; - } - // Non-abi, struct or vector - // Declare a bunch or .reg .b<size> .param<n> + // declare .param .align <align> .b8 .param<n>[<size>]; + unsigned sz = Outs[OIdx].Flags.getByValSize(); + SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); + // The ByValAlign in the Outs[OIdx].Flags is alway set at this point, + // so we don't need to worry about natural alignment or not. + // See TargetLowering::LowerCallTo(). + SDValue DeclareParamOps[] = { + Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32), + DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32), + InFlag + }; + Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, + DeclareParamOps, 5); + InFlag = Chain.getValue(1); unsigned curOffset = 0; for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { unsigned elems = 1; @@ -565,107 +791,66 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } for (unsigned k = 0, ke = elems; k != ke; ++k) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) - sz = 32; - SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareParamOps[] = { Chain, - DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(sz, MVT::i32), - DAG.getConstant(1, MVT::i32), InFlag }; - Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs, - DeclareParamOps, 5); - InFlag = Chain.getValue(1); + if (elemtype.isInteger() && (sz < 8)) + sz = 8; SDValue srcAddr = - DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i], + DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx], DAG.getConstant(curOffset, getPointerTy())); - SDValue theVal = - DAG.getLoad(elemtype, dl, tempChain, srcAddr, MachinePointerInfo(), - false, false, false, 0); + SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr, + MachinePointerInfo(), false, false, false, + 0); + if (elemtype.getSizeInBits() < 16) { + theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal); + } SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(0, MVT::i32), theVal, + DAG.getConstant(curOffset, MVT::i32), theVal, InFlag }; - Chain = DAG.getNode(NVPTXISD::MoveToParam, dl, CopyParamVTs, - CopyParamOps, 5); + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs, + CopyParamOps, 5, elemtype, + MachinePointerInfo()); + InFlag = Chain.getValue(1); - ++paramCount; + curOffset += sz / 8; } } + ++paramCount; } GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode()); unsigned retAlignment = 0; // Handle Result - unsigned retCount = 0; if (Ins.size() > 0) { SmallVector<EVT, 16> resvtparts; ComputeValueVTs(*this, retTy, resvtparts); - // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or - // individual .reg .b<size> func_retval<0..> for non ABI - unsigned resultsz = 0; - for (unsigned i = 0, e = resvtparts.size(); i != e; ++i) { - unsigned elems = 1; - EVT elemtype = resvtparts[i]; - if (resvtparts[i].isVector()) { - elems = resvtparts[i].getVectorNumElements(); - elemtype = resvtparts[i].getVectorElementType(); - } - for (unsigned j = 0, je = elems; j != je; ++j) { - unsigned sz = elemtype.getSizeInBits(); - if (isABI == false) { - if (elemtype.isInteger() && (sz < 32)) - sz = 32; - } else { - if (elemtype.isInteger() && (sz < 8)) - sz = 8; - } - if (isABI == false) { - SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareRetOps[] = { Chain, DAG.getConstant(2, MVT::i32), - DAG.getConstant(sz, MVT::i32), - DAG.getConstant(retCount, MVT::i32), - InFlag }; - Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs, - DeclareRetOps, 5); - InFlag = Chain.getValue(1); - ++retCount; - } - resultsz += sz; - } - } - if (isABI) { - if (retTy->isPrimitiveType() || retTy->isIntegerTy() || - retTy->isPointerTy()) { - // Scalar needs to be at least 32bit wide - if (resultsz < 32) - resultsz = 32; - SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32), - DAG.getConstant(resultsz, MVT::i32), - DAG.getConstant(0, MVT::i32), InFlag }; - Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs, - DeclareRetOps, 5); - InFlag = Chain.getValue(1); - } else { - if (Func) { // direct call - if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment)) - retAlignment = getDataLayout()->getABITypeAlignment(retTy); - } else { // indirect call - const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction()); - if (!llvm::getAlign(*CallI, 0, retAlignment)) - retAlignment = getDataLayout()->getABITypeAlignment(retTy); - } - SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareRetOps[] = { Chain, - DAG.getConstant(retAlignment, MVT::i32), - DAG.getConstant(resultsz / 8, MVT::i32), - DAG.getConstant(0, MVT::i32), InFlag }; - Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs, - DeclareRetOps, 5); - InFlag = Chain.getValue(1); - } + // Declare + // .param .align 16 .b8 retval0[<size-in-bytes>], or + // .param .b<size-in-bits> retval0 + unsigned resultsz = TD->getTypeAllocSizeInBits(retTy); + if (retTy->isPrimitiveType() || retTy->isIntegerTy() || + retTy->isPointerTy()) { + // Scalar needs to be at least 32bit wide + if (resultsz < 32) + resultsz = 32; + SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32), + DAG.getConstant(resultsz, MVT::i32), + DAG.getConstant(0, MVT::i32), InFlag }; + Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs, + DeclareRetOps, 5); + InFlag = Chain.getValue(1); + } else { + retAlignment = getArgumentAlignment(Callee, CS, retTy, 0); + SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue DeclareRetOps[] = { Chain, + DAG.getConstant(retAlignment, MVT::i32), + DAG.getConstant(resultsz / 8, MVT::i32), + DAG.getConstant(0, MVT::i32), InFlag }; + Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs, + DeclareRetOps, 5); + InFlag = Chain.getValue(1); } } @@ -678,7 +863,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // The prototype is embedded in a string and put as the operand for an // INLINEASM SDNode. SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue); - std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment); + std::string proto_string = + getPrototype(retTy, Args, Outs, retAlignment, CS); const char *asmstr = nvTM->getManagedStrPool() ->getManagedString(proto_string.c_str())->c_str(); SDValue InlineAsmOps[] = { @@ -691,9 +877,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Op to just print "call" SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue PrintCallOps[] = { - Chain, - DAG.getConstant(isABI ? ((Ins.size() == 0) ? 0 : 1) : retCount, MVT::i32), - InFlag + Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag }; Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall), dl, PrintCallVTs, PrintCallOps, 3); @@ -741,59 +925,180 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Generate loads from param memory/moves from registers for result if (Ins.size() > 0) { - if (isABI) { - unsigned resoffset = 0; - for (unsigned i = 0, e = Ins.size(); i != e; ++i) { - unsigned sz = Ins[i].VT.getSizeInBits(); - if (Ins[i].VT.isInteger() && (sz < 8)) - sz = 8; - EVT LoadRetVTs[] = { Ins[i].VT, MVT::Other, MVT::Glue }; - SDValue LoadRetOps[] = { Chain, DAG.getConstant(1, MVT::i32), - DAG.getConstant(resoffset, MVT::i32), InFlag }; - SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs, - LoadRetOps, array_lengthof(LoadRetOps)); + unsigned resoffset = 0; + if (retTy && retTy->isVectorTy()) { + EVT ObjectVT = getValueType(retTy); + unsigned NumElts = ObjectVT.getVectorNumElements(); + EVT EltVT = ObjectVT.getVectorElementType(); + assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(), + ObjectVT) == NumElts && + "Vector was not scalarized"); + unsigned sz = EltVT.getSizeInBits(); + bool needTruncate = sz < 16 ? true : false; + + if (NumElts == 1) { + // Just a simple load + std::vector<EVT> LoadRetVTs; + if (needTruncate) { + // If loading i1 result, generate + // load i16 + // trunc i16 to i1 + LoadRetVTs.push_back(MVT::i16); + } else + LoadRetVTs.push_back(EltVT); + LoadRetVTs.push_back(MVT::Other); + LoadRetVTs.push_back(MVT::Glue); + std::vector<SDValue> LoadRetOps; + LoadRetOps.push_back(Chain); + LoadRetOps.push_back(DAG.getConstant(1, MVT::i32)); + LoadRetOps.push_back(DAG.getConstant(0, MVT::i32)); + LoadRetOps.push_back(InFlag); + SDValue retval = DAG.getMemIntrinsicNode( + NVPTXISD::LoadParam, dl, + DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0], + LoadRetOps.size(), EltVT, MachinePointerInfo()); Chain = retval.getValue(1); InFlag = retval.getValue(2); - InVals.push_back(retval); - resoffset += sz / 8; + SDValue Ret0 = retval; + if (needTruncate) + Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0); + InVals.push_back(Ret0); + } else if (NumElts == 2) { + // LoadV2 + std::vector<EVT> LoadRetVTs; + if (needTruncate) { + // If loading i1 result, generate + // load i16 + // trunc i16 to i1 + LoadRetVTs.push_back(MVT::i16); + LoadRetVTs.push_back(MVT::i16); + } else { + LoadRetVTs.push_back(EltVT); + LoadRetVTs.push_back(EltVT); + } + LoadRetVTs.push_back(MVT::Other); + LoadRetVTs.push_back(MVT::Glue); + std::vector<SDValue> LoadRetOps; + LoadRetOps.push_back(Chain); + LoadRetOps.push_back(DAG.getConstant(1, MVT::i32)); + LoadRetOps.push_back(DAG.getConstant(0, MVT::i32)); + LoadRetOps.push_back(InFlag); + SDValue retval = DAG.getMemIntrinsicNode( + NVPTXISD::LoadParamV2, dl, + DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0], + LoadRetOps.size(), EltVT, MachinePointerInfo()); + Chain = retval.getValue(2); + InFlag = retval.getValue(3); + SDValue Ret0 = retval.getValue(0); + SDValue Ret1 = retval.getValue(1); + if (needTruncate) { + Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0); + InVals.push_back(Ret0); + Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1); + InVals.push_back(Ret1); + } else { + InVals.push_back(Ret0); + InVals.push_back(Ret1); + } + } else { + // Split into N LoadV4 + unsigned Ofst = 0; + unsigned VecSize = 4; + unsigned Opc = NVPTXISD::LoadParamV4; + if (EltVT.getSizeInBits() == 64) { + VecSize = 2; + Opc = NVPTXISD::LoadParamV2; + } + EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize); + for (unsigned i = 0; i < NumElts; i += VecSize) { + SmallVector<EVT, 8> LoadRetVTs; + if (needTruncate) { + // If loading i1 result, generate + // load i16 + // trunc i16 to i1 + for (unsigned j = 0; j < VecSize; ++j) + LoadRetVTs.push_back(MVT::i16); + } else { + for (unsigned j = 0; j < VecSize; ++j) + LoadRetVTs.push_back(EltVT); + } + LoadRetVTs.push_back(MVT::Other); + LoadRetVTs.push_back(MVT::Glue); + SmallVector<SDValue, 4> LoadRetOps; + LoadRetOps.push_back(Chain); + LoadRetOps.push_back(DAG.getConstant(1, MVT::i32)); + LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32)); + LoadRetOps.push_back(InFlag); + SDValue retval = DAG.getMemIntrinsicNode( + Opc, dl, DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), + &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo()); + if (VecSize == 2) { + Chain = retval.getValue(2); + InFlag = retval.getValue(3); + } else { + Chain = retval.getValue(4); + InFlag = retval.getValue(5); + } + + for (unsigned j = 0; j < VecSize; ++j) { + if (i + j >= NumElts) + break; + SDValue Elt = retval.getValue(j); + if (needTruncate) + Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt); + InVals.push_back(Elt); + } + Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + } } } else { - SmallVector<EVT, 16> resvtparts; - ComputeValueVTs(*this, retTy, resvtparts); - - assert(Ins.size() == resvtparts.size() && - "Unexpected number of return values in non-ABI case"); - unsigned paramNum = 0; + SmallVector<EVT, 16> VTs; + ComputePTXValueVTs(*this, retTy, VTs); + assert(VTs.size() == Ins.size() && "Bad value decomposition"); for (unsigned i = 0, e = Ins.size(); i != e; ++i) { - assert(EVT(Ins[i].VT) == resvtparts[i] && - "Unexpected EVT type in non-ABI case"); - unsigned numelems = 1; - EVT elemtype = Ins[i].VT; - if (Ins[i].VT.isVector()) { - numelems = Ins[i].VT.getVectorNumElements(); - elemtype = Ins[i].VT.getVectorElementType(); - } - std::vector<SDValue> tempRetVals; - for (unsigned j = 0; j < numelems; ++j) { - EVT MoveRetVTs[] = { elemtype, MVT::Other, MVT::Glue }; - SDValue MoveRetOps[] = { Chain, DAG.getConstant(0, MVT::i32), - DAG.getConstant(paramNum, MVT::i32), - InFlag }; - SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs, - MoveRetOps, array_lengthof(MoveRetOps)); - Chain = retval.getValue(1); - InFlag = retval.getValue(2); - tempRetVals.push_back(retval); - ++paramNum; - } - if (Ins[i].VT.isVector()) - InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, Ins[i].VT, - &tempRetVals[0], tempRetVals.size())); - else - InVals.push_back(tempRetVals[0]); + unsigned sz = VTs[i].getSizeInBits(); + bool needTruncate = sz < 8 ? true : false; + if (VTs[i].isInteger() && (sz < 8)) + sz = 8; + + SmallVector<EVT, 4> LoadRetVTs; + EVT TheLoadType = VTs[i]; + if (retTy->isIntegerTy() && + TD->getTypeAllocSizeInBits(retTy) < 32) { + // This is for integer types only, and specifically not for + // aggregates. + LoadRetVTs.push_back(MVT::i32); + TheLoadType = MVT::i32; + } else if (sz < 16) { + // If loading i1/i8 result, generate + // load i8 (-> i16) + // trunc i16 to i1/i8 + LoadRetVTs.push_back(MVT::i16); + } else + LoadRetVTs.push_back(Ins[i].VT); + LoadRetVTs.push_back(MVT::Other); + LoadRetVTs.push_back(MVT::Glue); + + SmallVector<SDValue, 4> LoadRetOps; + LoadRetOps.push_back(Chain); + LoadRetOps.push_back(DAG.getConstant(1, MVT::i32)); + LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32)); + LoadRetOps.push_back(InFlag); + SDValue retval = DAG.getMemIntrinsicNode( + NVPTXISD::LoadParam, dl, + DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0], + LoadRetOps.size(), TheLoadType, MachinePointerInfo()); + Chain = retval.getValue(1); + InFlag = retval.getValue(2); + SDValue Ret0 = retval.getValue(0); + if (needTruncate) + Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0); + InVals.push_back(Ret0); + resoffset += sz / 8; } } } + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true), DAG.getIntPtrConstant(uniqueCallSite + 1, true), InFlag, dl); @@ -862,8 +1167,8 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { // v = ld i1* addr // => -// v1 = ld i8* addr -// v = trunc v1 to i1 +// v1 = ld i8* addr (-> i16) +// v = trunc i16 to i1 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); LoadSDNode *LD = cast<LoadSDNode>(Node); @@ -872,7 +1177,7 @@ SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const { assert(Node->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only"); SDValue newLD = - DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(), + DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(), LD->getAlignment()); SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD); @@ -956,8 +1261,6 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val, DAG.getIntPtrConstant(i)); if (NeedExt) - // ANY_EXTEND is correct here since the store will only look at the - // lower-order bits anyway. ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal); Ops.push_back(ExtVal); } @@ -982,8 +1285,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { // st i1 v, addr // => -// v1 = zxt v to i8 -// st i8, addr +// v1 = zxt v to i16 +// st.u8 i16, addr SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); SDLoc dl(Node); @@ -995,9 +1298,10 @@ SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); bool isNonTemporal = ST->isNonTemporal(); - Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Tmp3); - SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), - isVolatile, isNonTemporal, Alignment); + Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3); + SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, + ST->getPointerInfo(), MVT::i8, isNonTemporal, + isVolatile, Alignment); return Result; } @@ -1012,7 +1316,15 @@ SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const { - return getExtSymb(DAG, ".PARAM", idx, v); + std::string ParamSym; + raw_string_ostream ParamStr(ParamSym); + + ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx; + ParamStr.flush(); + + std::string *SavedStr = + nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str()); + return DAG.getTargetExternalSymbol(SavedStr->c_str(), v); } SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) { @@ -1054,12 +1366,16 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( const Function *F = MF.getFunction(); const AttributeSet &PAL = F->getAttributes(); + const TargetLowering *TLI = nvTM->getTargetLowering(); SDValue Root = DAG.getRoot(); std::vector<SDValue> OutChains; bool isKernel = llvm::isKernelFunction(*F); bool isABI = (nvptxSubtarget.getSmVersion() >= 20); + assert(isABI && "Non-ABI compilation is not supported"); + if (!isABI) + return Chain; std::vector<Type *> argTypes; std::vector<const Argument *> theArgs; @@ -1068,15 +1384,20 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( theArgs.push_back(I); argTypes.push_back(I->getType()); } - //assert(argTypes.size() == Ins.size() && - // "Ins types and function types did not match"); + // argTypes.size() (or theArgs.size()) and Ins.size() need not match. + // Ins.size() will be larger + // * if there is an aggregate argument with multiple fields (each field + // showing up separately in Ins) + // * if there is a vector argument with more than typical vector-length + // elements (generally if more than 4) where each vector element is + // individually present in Ins. + // So a different index should be used for indexing into Ins. + // See similar issue in LowerCall. + unsigned InsIdx = 0; int idx = 0; - for (unsigned i = 0, e = argTypes.size(); i != e; ++i, ++idx) { + for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) { Type *Ty = argTypes[i]; - EVT ObjectVT = getValueType(Ty); - //assert(ObjectVT == Ins[i].VT && - // "Ins type did not match function type"); // If the kernel argument is image*_t or sampler_t, convert it to // a i32 constant holding the parameter position. This can later @@ -1092,142 +1413,248 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( if (theArgs[i]->use_empty()) { // argument is dead - if (ObjectVT.isVector()) { - EVT EltVT = ObjectVT.getVectorElementType(); - unsigned NumElts = ObjectVT.getVectorNumElements(); - for (unsigned vi = 0; vi < NumElts; ++vi) { - InVals.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT)); + if (Ty->isAggregateType()) { + SmallVector<EVT, 16> vtparts; + + ComputePTXValueVTs(*this, Ty, vtparts); + assert(vtparts.size() > 0 && "empty aggregate type not expected"); + for (unsigned parti = 0, parte = vtparts.size(); parti != parte; + ++parti) { + EVT partVT = vtparts[parti]; + InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT)); + ++InsIdx; } - } else { - InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT)); + if (vtparts.size() > 0) + --InsIdx; + continue; + } + if (Ty->isVectorTy()) { + EVT ObjectVT = getValueType(Ty); + unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT); + for (unsigned parti = 0; parti < NumRegs; ++parti) { + InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT)); + ++InsIdx; + } + if (NumRegs > 0) + --InsIdx; + continue; } + InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT)); continue; } // In the following cases, assign a node order of "idx+1" - // to newly created nodes. The SDNOdes for params have to + // to newly created nodes. The SDNodes for params have to // appear in the same order as their order of appearance // in the original function. "idx+1" holds that order. if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) { - if (ObjectVT.isVector()) { + if (Ty->isAggregateType()) { + SmallVector<EVT, 16> vtparts; + SmallVector<uint64_t, 16> offsets; + + // NOTE: Here, we lose the ability to issue vector loads for vectors + // that are a part of a struct. This should be investigated in the + // future. + ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0); + assert(vtparts.size() > 0 && "empty aggregate type not expected"); + bool aggregateIsPacked = false; + if (StructType *STy = llvm::dyn_cast<StructType>(Ty)) + aggregateIsPacked = STy->isPacked(); + + SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); + for (unsigned parti = 0, parte = vtparts.size(); parti != parte; + ++parti) { + EVT partVT = vtparts[parti]; + Value *srcValue = Constant::getNullValue( + PointerType::get(partVT.getTypeForEVT(F->getContext()), + llvm::ADDRESS_SPACE_PARAM)); + SDValue srcAddr = + DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, + DAG.getConstant(offsets[parti], getPointerTy())); + unsigned partAlign = + aggregateIsPacked ? 1 + : TD->getABITypeAlignment( + partVT.getTypeForEVT(F->getContext())); + SDValue p; + if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) { + ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? + ISD::SEXTLOAD : ISD::ZEXTLOAD; + p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr, + MachinePointerInfo(srcValue), partVT, false, + false, partAlign); + } else { + p = DAG.getLoad(partVT, dl, Root, srcAddr, + MachinePointerInfo(srcValue), false, false, false, + partAlign); + } + if (p.getNode()) + p.getNode()->setIROrder(idx + 1); + InVals.push_back(p); + ++InsIdx; + } + if (vtparts.size() > 0) + --InsIdx; + continue; + } + if (Ty->isVectorTy()) { + EVT ObjectVT = getValueType(Ty); + SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); unsigned NumElts = ObjectVT.getVectorNumElements(); + assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts && + "Vector was not scalarized"); + unsigned Ofst = 0; EVT EltVT = ObjectVT.getVectorElementType(); - unsigned Offset = 0; - for (unsigned vi = 0; vi < NumElts; ++vi) { - SDValue A = getParamSymbol(DAG, idx, getPointerTy()); - SDValue B = DAG.getIntPtrConstant(Offset); - SDValue Addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), - //getParamSymbol(DAG, idx, EltVT), - //DAG.getConstant(Offset, getPointerTy())); - A, B); + + // V1 load + // f32 = load ... + if (NumElts == 1) { + // We only have one element, so just directly load it Value *SrcValue = Constant::getNullValue(PointerType::get( EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); - SDValue Ld = DAG.getLoad( - EltVT, dl, Root, Addr, MachinePointerInfo(SrcValue), false, false, - false, + SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, + DAG.getConstant(Ofst, getPointerTy())); + SDValue P = DAG.getLoad( + EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false, + false, true, TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext()))); - Offset += EltVT.getStoreSizeInBits() / 8; - InVals.push_back(Ld); + if (P.getNode()) + P.getNode()->setIROrder(idx + 1); + + if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) + P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P); + InVals.push_back(P); + Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext())); + ++InsIdx; + } else if (NumElts == 2) { + // V2 load + // f32,f32 = load ... + EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2); + Value *SrcValue = Constant::getNullValue(PointerType::get( + VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); + SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, + DAG.getConstant(Ofst, getPointerTy())); + SDValue P = DAG.getLoad( + VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false, + false, true, + TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); + if (P.getNode()) + P.getNode()->setIROrder(idx + 1); + + SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P, + DAG.getIntPtrConstant(0)); + SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P, + DAG.getIntPtrConstant(1)); + + if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) { + Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0); + Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1); + } + + InVals.push_back(Elt0); + InVals.push_back(Elt1); + Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + InsIdx += 2; + } else { + // V4 loads + // We have at least 4 elements (<3 x Ty> expands to 4 elements) and + // the + // vector will be expanded to a power of 2 elements, so we know we can + // always round up to the next multiple of 4 when creating the vector + // loads. + // e.g. 4 elem => 1 ld.v4 + // 6 elem => 2 ld.v4 + // 8 elem => 2 ld.v4 + // 11 elem => 3 ld.v4 + unsigned VecSize = 4; + if (EltVT.getSizeInBits() == 64) { + VecSize = 2; + } + EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize); + for (unsigned i = 0; i < NumElts; i += VecSize) { + Value *SrcValue = Constant::getNullValue( + PointerType::get(VecVT.getTypeForEVT(F->getContext()), + llvm::ADDRESS_SPACE_PARAM)); + SDValue SrcAddr = + DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, + DAG.getConstant(Ofst, getPointerTy())); + SDValue P = DAG.getLoad( + VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false, + false, true, + TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); + if (P.getNode()) + P.getNode()->setIROrder(idx + 1); + + for (unsigned j = 0; j < VecSize; ++j) { + if (i + j >= NumElts) + break; + SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P, + DAG.getIntPtrConstant(j)); + if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) + Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt); + InVals.push_back(Elt); + } + Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + } + InsIdx += VecSize; } + + if (NumElts > 0) + --InsIdx; continue; } - // A plain scalar. - if (isABI || isKernel) { - // If ABI, load from the param symbol - SDValue Arg = getParamSymbol(DAG, idx); - // Conjure up a value that we can get the address space from. - // FIXME: Using a constant here is a hack. - Value *srcValue = Constant::getNullValue( - PointerType::get(ObjectVT.getTypeForEVT(F->getContext()), - llvm::ADDRESS_SPACE_PARAM)); - SDValue p = DAG.getLoad( - ObjectVT, dl, Root, Arg, MachinePointerInfo(srcValue), false, false, - false, - TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); - if (p.getNode()) - p.getNode()->setIROrder(idx + 1); - InVals.push_back(p); + EVT ObjectVT = getValueType(Ty); + // If ABI, load from the param symbol + SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); + Value *srcValue = Constant::getNullValue(PointerType::get( + ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); + SDValue p; + if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) { + ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? + ISD::SEXTLOAD : ISD::ZEXTLOAD; + p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg, + MachinePointerInfo(srcValue), ObjectVT, false, false, + TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); } else { - // If no ABI, just move the param symbol - SDValue Arg = getParamSymbol(DAG, idx, ObjectVT); - SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); - if (p.getNode()) - p.getNode()->setIROrder(idx + 1); - InVals.push_back(p); + p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg, + MachinePointerInfo(srcValue), false, false, false, + TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); } + if (p.getNode()) + p.getNode()->setIROrder(idx + 1); + InVals.push_back(p); continue; } // Param has ByVal attribute - if (isABI || isKernel) { - // Return MoveParam(param symbol). - // Ideally, the param symbol can be returned directly, - // but when SDNode builder decides to use it in a CopyToReg(), - // machine instruction fails because TargetExternalSymbol - // (not lowered) is target dependent, and CopyToReg assumes - // the source is lowered. - SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); - SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); - if (p.getNode()) - p.getNode()->setIROrder(idx + 1); - if (isKernel) - InVals.push_back(p); - else { - SDValue p2 = DAG.getNode( - ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT, - DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p); - InVals.push_back(p2); - } - } else { - // Have to move a set of param symbols to registers and - // store them locally and return the local pointer in InVals - const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]); - assert(elemPtrType && "Byval parameter should be a pointer type"); - Type *elemType = elemPtrType->getElementType(); - // Compute the constituent parts - SmallVector<EVT, 16> vtparts; - SmallVector<uint64_t, 16> offsets; - ComputeValueVTs(*this, elemType, vtparts, &offsets, 0); - unsigned totalsize = 0; - for (unsigned j = 0, je = vtparts.size(); j != je; ++j) - totalsize += vtparts[j].getStoreSizeInBits(); - SDValue localcopy = DAG.getFrameIndex( - MF.getFrameInfo()->CreateStackObject(totalsize / 8, 16, false), - getPointerTy()); - unsigned sizesofar = 0; - std::vector<SDValue> theChains; - for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { - unsigned numElems = 1; - if (vtparts[j].isVector()) - numElems = vtparts[j].getVectorNumElements(); - for (unsigned k = 0, ke = numElems; k != ke; ++k) { - EVT tmpvt = vtparts[j]; - if (tmpvt.isVector()) - tmpvt = tmpvt.getVectorElementType(); - SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt, - getParamSymbol(DAG, idx, tmpvt)); - SDValue addr = - DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy, - DAG.getConstant(sizesofar, getPointerTy())); - theChains.push_back(DAG.getStore( - Chain, dl, arg, addr, MachinePointerInfo(), false, false, 0)); - sizesofar += tmpvt.getStoreSizeInBits() / 8; - ++idx; - } - } - --idx; - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0], - theChains.size()); - InVals.push_back(localcopy); + // Return MoveParam(param symbol). + // Ideally, the param symbol can be returned directly, + // but when SDNode builder decides to use it in a CopyToReg(), + // machine instruction fails because TargetExternalSymbol + // (not lowered) is target dependent, and CopyToReg assumes + // the source is lowered. + EVT ObjectVT = getValueType(Ty); + assert(ObjectVT == Ins[InsIdx].VT && + "Ins type did not match function type"); + SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); + SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); + if (p.getNode()) + p.getNode()->setIROrder(idx + 1); + if (isKernel) + InVals.push_back(p); + else { + SDValue p2 = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT, + DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p); + InVals.push_back(p2); } } // Clang will check explicit VarArg and issue error if any. However, Clang // will let code with - // implicit var arg like f() pass. + // implicit var arg like f() pass. See bug 617733. // We treat this case as if the arg list is empty. - //if (F.isVarArg()) { + // if (F.isVarArg()) { // assert(0 && "VarArg not supported yet!"); //} @@ -1238,43 +1665,185 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( return Chain; } -SDValue NVPTXTargetLowering::LowerReturn( - SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, SDLoc dl, - SelectionDAG &DAG) const { + +SDValue +NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + SDLoc dl, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + const Function *F = MF.getFunction(); + Type *RetTy = F->getReturnType(); + const DataLayout *TD = getDataLayout(); bool isABI = (nvptxSubtarget.getSmVersion() >= 20); + assert(isABI && "Non-ABI compilation is not supported"); + if (!isABI) + return Chain; + + if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) { + // If we have a vector type, the OutVals array will be the scalarized + // components and we have combine them into 1 or more vector stores. + unsigned NumElts = VTy->getNumElements(); + assert(NumElts == Outs.size() && "Bad scalarization of return value"); + + // const_cast can be removed in later LLVM versions + EVT EltVT = getValueType(RetTy).getVectorElementType(); + bool NeedExtend = false; + if (EltVT.getSizeInBits() < 16) + NeedExtend = true; + + // V1 store + if (NumElts == 1) { + SDValue StoreVal = OutVals[0]; + // We only have one element, so just directly store it + if (NeedExtend) + StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal); + SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl, + DAG.getVTList(MVT::Other), &Ops[0], 3, + EltVT, MachinePointerInfo()); + + } else if (NumElts == 2) { + // V2 store + SDValue StoreVal0 = OutVals[0]; + SDValue StoreVal1 = OutVals[1]; + + if (NeedExtend) { + StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0); + StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1); + } - unsigned sizesofar = 0; - unsigned idx = 0; - for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - SDValue theVal = OutVals[i]; - EVT theValType = theVal.getValueType(); - unsigned numElems = 1; - if (theValType.isVector()) - numElems = theValType.getVectorNumElements(); - for (unsigned j = 0, je = numElems; j != je; ++j) { - SDValue tmpval = theVal; - if (theValType.isVector()) - tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, - theValType.getVectorElementType(), tmpval, - DAG.getIntPtrConstant(j)); - Chain = DAG.getNode( - isABI ? NVPTXISD::StoreRetval : NVPTXISD::MoveToRetval, dl, - MVT::Other, Chain, DAG.getConstant(isABI ? sizesofar : idx, MVT::i32), - tmpval); - if (theValType.isVector()) - sizesofar += theValType.getVectorElementType().getStoreSizeInBits() / 8; - else - sizesofar += theValType.getStoreSizeInBits() / 8; - ++idx; + SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0, + StoreVal1 }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl, + DAG.getVTList(MVT::Other), &Ops[0], 4, + EltVT, MachinePointerInfo()); + } else { + // V4 stores + // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the + // vector will be expanded to a power of 2 elements, so we know we can + // always round up to the next multiple of 4 when creating the vector + // stores. + // e.g. 4 elem => 1 st.v4 + // 6 elem => 2 st.v4 + // 8 elem => 2 st.v4 + // 11 elem => 3 st.v4 + + unsigned VecSize = 4; + if (OutVals[0].getValueType().getSizeInBits() == 64) + VecSize = 2; + + unsigned Offset = 0; + + EVT VecVT = + EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize); + unsigned PerStoreOffset = + TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + + for (unsigned i = 0; i < NumElts; i += VecSize) { + // Get values + SDValue StoreVal; + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(DAG.getConstant(Offset, MVT::i32)); + unsigned Opc = NVPTXISD::StoreRetvalV2; + EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType(); + + StoreVal = OutVals[i]; + if (NeedExtend) + StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal); + Ops.push_back(StoreVal); + + if (i + 1 < NumElts) { + StoreVal = OutVals[i + 1]; + if (NeedExtend) + StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal); + } else { + StoreVal = DAG.getUNDEF(ExtendedVT); + } + Ops.push_back(StoreVal); + + if (VecSize == 4) { + Opc = NVPTXISD::StoreRetvalV4; + if (i + 2 < NumElts) { + StoreVal = OutVals[i + 2]; + if (NeedExtend) + StoreVal = + DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal); + } else { + StoreVal = DAG.getUNDEF(ExtendedVT); + } + Ops.push_back(StoreVal); + + if (i + 3 < NumElts) { + StoreVal = OutVals[i + 3]; + if (NeedExtend) + StoreVal = + DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal); + } else { + StoreVal = DAG.getUNDEF(ExtendedVT); + } + Ops.push_back(StoreVal); + } + + // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size()); + Chain = + DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0], + Ops.size(), EltVT, MachinePointerInfo()); + Offset += PerStoreOffset; + } + } + } else { + SmallVector<EVT, 16> ValVTs; + // const_cast is necessary since we are still using an LLVM version from + // before the type system re-write. + ComputePTXValueVTs(*this, RetTy, ValVTs); + assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition"); + + unsigned SizeSoFar = 0; + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { + SDValue theVal = OutVals[i]; + EVT TheValType = theVal.getValueType(); + unsigned numElems = 1; + if (TheValType.isVector()) + numElems = TheValType.getVectorNumElements(); + for (unsigned j = 0, je = numElems; j != je; ++j) { + SDValue TmpVal = theVal; + if (TheValType.isVector()) + TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, + TheValType.getVectorElementType(), TmpVal, + DAG.getIntPtrConstant(j)); + EVT TheStoreType = ValVTs[i]; + if (RetTy->isIntegerTy() && + TD->getTypeAllocSizeInBits(RetTy) < 32) { + // The following zero-extension is for integer types only, and + // specifically not for aggregates. + TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal); + TheStoreType = MVT::i32; + } + else if (TmpVal.getValueType().getSizeInBits() < 16) + TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal); + + SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal }; + Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl, + DAG.getVTList(MVT::Other), &Ops[0], + 3, TheStoreType, + MachinePointerInfo()); + if(TheValType.isVector()) + SizeSoFar += + TheStoreType.getVectorElementType().getStoreSizeInBits() / 8; + else + SizeSoFar += TheStoreType.getStoreSizeInBits()/8; + } } } return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain); } + void NVPTXTargetLowering::LowerAsmOperandForConstraint( SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, SelectionDAG &DAG) const { @@ -1338,9 +1907,9 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( Info.opc = ISD::INTRINSIC_W_CHAIN; if (Intrinsic == Intrinsic::nvvm_ldu_global_i) - Info.memVT = MVT::i32; + Info.memVT = getValueType(I.getType()); else if (Intrinsic == Intrinsic::nvvm_ldu_global_p) - Info.memVT = getPointerTy(); + Info.memVT = getValueType(I.getType()); else Info.memVT = MVT::f32; Info.ptrVal = I.getArgOperand(0); @@ -1421,11 +1990,11 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass *> NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'c': - return std::make_pair(0U, &NVPTX::Int8RegsRegClass); + return std::make_pair(0U, &NVPTX::Int16RegsRegClass); case 'h': return std::make_pair(0U, &NVPTX::Int16RegsRegClass); case 'r': @@ -1565,7 +2134,8 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, unsigned NumElts = ResVT.getVectorNumElements(); EVT EltVT = ResVT.getVectorElementType(); - // Since LDU/LDG are target nodes, we cannot rely on DAG type legalization. + // Since LDU/LDG are target nodes, we cannot rely on DAG type + // legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the // loaded type to i16 and propogate the "real" type as the memory type. bool NeedTrunc = false; @@ -1624,7 +2194,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, OtherOps.push_back(Chain); // Chain // Skip operand 1 (intrinsic ID) - // Others + // Others for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) OtherOps.push_back(N->getOperand(i)); @@ -1672,7 +2242,8 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0], Ops.size(), MVT::i8, MemSD->getMemOperand()); - Results.push_back(NewLD.getValue(0)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, + NewLD.getValue(0))); Results.push_back(NewLD.getValue(1)); } } diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h index d3ed63a..3418437 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/lib/Target/NVPTX/NVPTXISelLowering.h @@ -29,17 +29,11 @@ enum NodeType { CALL, RET_FLAG, LOAD_PARAM, - NVBuiltin, DeclareParam, DeclareScalarParam, DeclareRetParam, DeclareRet, DeclareScalarRet, - LoadParam, - StoreParam, - StoreParamS32, // to sext and store a <32bit value, not used currently - StoreParamU32, // to zext and store a <32bit value, not used currently - MoveToParam, PrintCall, PrintCallUni, CallArgBegin, @@ -51,9 +45,6 @@ enum NodeType { CallSymbol, Prototype, MoveParam, - MoveRetval, - MoveToRetval, - StoreRetval, PseudoUseParam, RETURN, CallSeqBegin, @@ -67,7 +58,18 @@ enum NodeType { LDUV2, // LDU.v2 LDUV4, // LDU.v4 StoreV2, - StoreV4 + StoreV4, + LoadParam, + LoadParamV2, + LoadParamV4, + StoreParam, + StoreParamV2, + StoreParamV4, + StoreParamS32, // to sext and store a <32bit value, not used currently + StoreParamU32, // to zext and store a <32bit value, not used currently + StoreRetval, + StoreRetvalV2, + StoreRetvalV4 }; } @@ -108,7 +110,7 @@ public: ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass *> - getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; + getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; virtual SDValue LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, @@ -120,7 +122,8 @@ public: std::string getPrototype(Type *, const ArgListTy &, const SmallVectorImpl<ISD::OutputArg> &, - unsigned retAlignment) const; + unsigned retAlignment, + const ImmutableCallSite *CS) const; virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, @@ -144,7 +147,7 @@ private: SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx, EVT = MVT::i32) const; - SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT = MVT::i32) const; + SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT) const; SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx); SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; @@ -158,6 +161,9 @@ private: virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const; + + unsigned getArgumentAlignment(SDValue Callee, const ImmutableCallSite *CS, + Type *Ty, unsigned Idx) const; }; } // namespace llvm diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp index 52be287..b406aa9 100644 --- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp +++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp @@ -27,7 +27,7 @@ using namespace llvm; // FIXME: Add the subtarget support on this constructor. NVPTXInstrInfo::NVPTXInstrInfo(NVPTXTargetMachine &tm) - : NVPTXGenInstrInfo(), TM(tm), RegInfo(*this, *TM.getSubtargetImpl()) {} + : NVPTXGenInstrInfo(), TM(tm), RegInfo(*TM.getSubtargetImpl()) {} void NVPTXInstrInfo::copyPhysReg( MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, @@ -51,9 +51,6 @@ void NVPTXInstrInfo::copyPhysReg( else if (DestRC == &NVPTX::Int16RegsRegClass) BuildMI(MBB, I, DL, get(NVPTX::IMOV16rr), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)); - else if (DestRC == &NVPTX::Int8RegsRegClass) - BuildMI(MBB, I, DL, get(NVPTX::IMOV8rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); else if (DestRC == &NVPTX::Int64RegsRegClass) BuildMI(MBB, I, DL, get(NVPTX::IMOV64rr), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)); diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td index da6dd39..3e430bf 100644 --- a/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -32,6 +32,86 @@ def isVecOther : VecInstTypeEnum<15>; def brtarget : Operand<OtherVT>; +// CVT conversion modes +// These must match the enum in NVPTX.h +def CvtNONE : PatLeaf<(i32 0x0)>; +def CvtRNI : PatLeaf<(i32 0x1)>; +def CvtRZI : PatLeaf<(i32 0x2)>; +def CvtRMI : PatLeaf<(i32 0x3)>; +def CvtRPI : PatLeaf<(i32 0x4)>; +def CvtRN : PatLeaf<(i32 0x5)>; +def CvtRZ : PatLeaf<(i32 0x6)>; +def CvtRM : PatLeaf<(i32 0x7)>; +def CvtRP : PatLeaf<(i32 0x8)>; + +def CvtNONE_FTZ : PatLeaf<(i32 0x10)>; +def CvtRNI_FTZ : PatLeaf<(i32 0x11)>; +def CvtRZI_FTZ : PatLeaf<(i32 0x12)>; +def CvtRMI_FTZ : PatLeaf<(i32 0x13)>; +def CvtRPI_FTZ : PatLeaf<(i32 0x14)>; +def CvtRN_FTZ : PatLeaf<(i32 0x15)>; +def CvtRZ_FTZ : PatLeaf<(i32 0x16)>; +def CvtRM_FTZ : PatLeaf<(i32 0x17)>; +def CvtRP_FTZ : PatLeaf<(i32 0x18)>; + +def CvtSAT : PatLeaf<(i32 0x20)>; +def CvtSAT_FTZ : PatLeaf<(i32 0x30)>; + +def CvtMode : Operand<i32> { + let PrintMethod = "printCvtMode"; +} + +// Compare modes +// These must match the enum in NVPTX.h +def CmpEQ : PatLeaf<(i32 0)>; +def CmpNE : PatLeaf<(i32 1)>; +def CmpLT : PatLeaf<(i32 2)>; +def CmpLE : PatLeaf<(i32 3)>; +def CmpGT : PatLeaf<(i32 4)>; +def CmpGE : PatLeaf<(i32 5)>; +def CmpLO : PatLeaf<(i32 6)>; +def CmpLS : PatLeaf<(i32 7)>; +def CmpHI : PatLeaf<(i32 8)>; +def CmpHS : PatLeaf<(i32 9)>; +def CmpEQU : PatLeaf<(i32 10)>; +def CmpNEU : PatLeaf<(i32 11)>; +def CmpLTU : PatLeaf<(i32 12)>; +def CmpLEU : PatLeaf<(i32 13)>; +def CmpGTU : PatLeaf<(i32 14)>; +def CmpGEU : PatLeaf<(i32 15)>; +def CmpNUM : PatLeaf<(i32 16)>; +def CmpNAN : PatLeaf<(i32 17)>; + +def CmpEQ_FTZ : PatLeaf<(i32 0x100)>; +def CmpNE_FTZ : PatLeaf<(i32 0x101)>; +def CmpLT_FTZ : PatLeaf<(i32 0x102)>; +def CmpLE_FTZ : PatLeaf<(i32 0x103)>; +def CmpGT_FTZ : PatLeaf<(i32 0x104)>; +def CmpGE_FTZ : PatLeaf<(i32 0x105)>; +def CmpLO_FTZ : PatLeaf<(i32 0x106)>; +def CmpLS_FTZ : PatLeaf<(i32 0x107)>; +def CmpHI_FTZ : PatLeaf<(i32 0x108)>; +def CmpHS_FTZ : PatLeaf<(i32 0x109)>; +def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>; +def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>; +def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>; +def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>; +def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>; +def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>; +def CmpNUM_FTZ : PatLeaf<(i32 0x110)>; +def CmpNAN_FTZ : PatLeaf<(i32 0x111)>; + +def CmpMode : Operand<i32> { + let PrintMethod = "printCmpMode"; +} + +def F32ConstZero : Operand<f32>, PatLeaf<(f32 fpimm)>, SDNodeXForm<fpimm, [{ + return CurDAG->getTargetConstantFP(0.0, MVT::f32); + }]>; +def F32ConstOne : Operand<f32>, PatLeaf<(f32 fpimm)>, SDNodeXForm<fpimm, [{ + return CurDAG->getTargetConstantFP(1.0, MVT::f32); + }]>; + //===----------------------------------------------------------------------===// // NVPTX Instruction Predicate Definitions //===----------------------------------------------------------------------===// @@ -56,127 +136,31 @@ def hasLDG : Predicate<"Subtarget.hasLDG()">; def hasLDU : Predicate<"Subtarget.hasLDU()">; def hasGenericLdSt : Predicate<"Subtarget.hasGenericLdSt()">; -def doF32FTZ : Predicate<"UseF32FTZ">; +def doF32FTZ : Predicate<"useF32FTZ()">; +def doNoF32FTZ : Predicate<"!useF32FTZ()">; def doFMAF32 : Predicate<"doFMAF32">; -def doFMAF32_ftz : Predicate<"(doFMAF32 && UseF32FTZ)">; +def doFMAF32_ftz : Predicate<"(doFMAF32 && useF32FTZ())">; def doFMAF32AGG : Predicate<"doFMAF32AGG">; -def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && UseF32FTZ)">; +def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && useF32FTZ())">; def doFMAF64 : Predicate<"doFMAF64">; def doFMAF64AGG : Predicate<"doFMAF64AGG">; -def doFMADF32 : Predicate<"doFMADF32">; -def doFMADF32_ftz : Predicate<"(doFMADF32 && UseF32FTZ)">; def doMulWide : Predicate<"doMulWide">; def allowFMA : Predicate<"allowFMA">; -def allowFMA_ftz : Predicate<"(allowFMA && UseF32FTZ)">; +def allowFMA_ftz : Predicate<"(allowFMA && useF32FTZ())">; -def do_DIVF32_APPROX : Predicate<"do_DIVF32_PREC==0">; -def do_DIVF32_FULL : Predicate<"do_DIVF32_PREC==1">; +def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">; +def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">; -def do_SQRTF32_APPROX : Predicate<"do_SQRTF32_PREC==0">; -def do_SQRTF32_RN : Predicate<"do_SQRTF32_PREC==1">; +def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">; +def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">; def hasHWROT32 : Predicate<"Subtarget.hasHWROT32()">; def true : Predicate<"1">; -//===----------------------------------------------------------------------===// -// Special Handling for 8-bit Operands and Operations -// -// PTX supports 8-bit signed and unsigned types, but does not support 8-bit -// operations (like add, shift, etc) except for ld/st/cvt. SASS does not have -// 8-bit registers. -// -// PTX ld, st and cvt instructions permit source and destination data operands -// to be wider than the instruction-type size, so that narrow values may be -// loaded, stored, and converted using regular-width registers. -// -// So in PTX generation, we -// - always use 16-bit registers in place in 8-bit registers. -// (8-bit variables should stay as 8-bit as they represent memory layout.) -// - for the following 8-bit operations, we sign-ext/zero-ext the 8-bit values -// before operation -// . div -// . rem -// . neg (sign) -// . set, setp -// . shr -// -// We are patching the operations by inserting the cvt instructions in the -// asm strings of the affected instructions. -// -// Since vector operations, except for ld/st, are eventually elementized. We -// do not need to special-hand the vector 8-bit operations. -// -// -//===----------------------------------------------------------------------===// - -// Generate string block like -// { -// .reg .s16 %temp1; -// .reg .s16 %temp2; -// cvt.s16.s8 %temp1, %a; -// cvt.s16.s8 %temp2, %b; -// opc.s16 %dst, %temp1, %temp2; -// } -// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8 -class Handle_i8rr<string OpcStr, string TypeStr, string CVTStr> { - string s = !strconcat("{{\n\t", - !strconcat(".reg .", !strconcat(TypeStr, - !strconcat(" \t%temp1;\n\t", - !strconcat(".reg .", !strconcat(TypeStr, - !strconcat(" \t%temp2;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t", - !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}")))))))))))); -} - -// Generate string block like -// { -// .reg .s16 %temp1; -// .reg .s16 %temp2; -// cvt.s16.s8 %temp1, %a; -// mov.b16 %temp2, %b; -// cvt.s16.s8 %temp2, %temp2; -// opc.s16 %dst, %temp1, %temp2; -// } -// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8 -class Handle_i8ri<string OpcStr, string TypeStr, string CVTStr> { - string s = !strconcat("{{\n\t", - !strconcat(".reg .", !strconcat(TypeStr, - !strconcat(" \t%temp1;\n\t", - !strconcat(".reg .", - !strconcat(TypeStr, !strconcat(" \t%temp2;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t", - !strconcat("mov.b16 \t%temp2, $b;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp2, %temp2;\n\t", - !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}"))))))))))))); -} - -// Generate string block like -// { -// .reg .s16 %temp1; -// .reg .s16 %temp2; -// mov.b16 %temp1, %b; -// cvt.s16.s8 %temp1, %temp1; -// cvt.s16.s8 %temp2, %a; -// opc.s16 %dst, %temp1, %temp2; -// } -// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8 -class Handle_i8ir<string OpcStr, string TypeStr, string CVTStr> { - string s = !strconcat("{{\n\t", - !strconcat(".reg .", !strconcat(TypeStr, - !strconcat(" \t%temp1;\n\t", - !strconcat(".reg .", !strconcat(TypeStr, - !strconcat(" \t%temp2;\n\t", - !strconcat("mov.b16 \t%temp1, $a;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp1, %temp1;\n\t", - !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t", - !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}"))))))))))))); -} - //===----------------------------------------------------------------------===// // Some Common Instruction Class Templates @@ -204,66 +188,6 @@ multiclass I3<string OpcStr, SDNode OpNode> { def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b), !strconcat(OpcStr, "16 \t$dst, $a, $b;"), [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>; - def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>; - def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>; -} - -multiclass I3_i8<string OpcStr, SDNode OpNode, string TypeStr, string CVTStr> { - def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int64Regs:$dst, (OpNode Int64Regs:$a, - Int64Regs:$b))]>; - def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>; - def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, - Int32Regs:$b))]>; - def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>; - def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int16Regs:$dst, (OpNode Int16Regs:$a, - Int16Regs:$b))]>; - def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>; - def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - Handle_i8rr<OpcStr, TypeStr, CVTStr>.s, - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>; - def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - Handle_i8ri<OpcStr, TypeStr, CVTStr>.s, - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>; -} - -multiclass I3_noi8<string OpcStr, SDNode OpNode> { - def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int64Regs:$dst, (OpNode Int64Regs:$a, - Int64Regs:$b))]>; - def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>; - def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, - Int32Regs:$b))]>; - def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>; - def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int16Regs:$dst, (OpNode Int16Regs:$a, - Int16Regs:$b))]>; - def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>; } multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> { @@ -369,6 +293,90 @@ multiclass F2<string OpcStr, SDNode OpNode> { //===----------------------------------------------------------------------===// //----------------------------------- +// General Type Conversion +//----------------------------------- + +let neverHasSideEffects = 1 in { +// Generate a cvt to the given type from all possible types. +// Each instance takes a CvtMode immediate that defines the conversion mode to +// use. It can be CvtNONE to omit a conversion mode. +multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> { + def _s16 : NVPTXInst<(outs RC:$dst), + (ins Int16Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".s16\t$dst, $src;"), + []>; + def _u16 : NVPTXInst<(outs RC:$dst), + (ins Int16Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".u16\t$dst, $src;"), + []>; + def _f16 : NVPTXInst<(outs RC:$dst), + (ins Int16Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".f16\t$dst, $src;"), + []>; + def _s32 : NVPTXInst<(outs RC:$dst), + (ins Int32Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".s32\t$dst, $src;"), + []>; + def _u32 : NVPTXInst<(outs RC:$dst), + (ins Int32Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".u32\t$dst, $src;"), + []>; + def _s64 : NVPTXInst<(outs RC:$dst), + (ins Int64Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".s64\t$dst, $src;"), + []>; + def _u64 : NVPTXInst<(outs RC:$dst), + (ins Int64Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".u64\t$dst, $src;"), + []>; + def _f32 : NVPTXInst<(outs RC:$dst), + (ins Float32Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".f32\t$dst, $src;"), + []>; + def _f64 : NVPTXInst<(outs RC:$dst), + (ins Float64Regs:$src, CvtMode:$mode), + !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.", + FromName, ".f64\t$dst, $src;"), + []>; +} + +// Generate a cvt to all possible types. +defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>; +defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>; +defm CVT_f16 : CVT_FROM_ALL<"f16", Int16Regs>; +defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>; +defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>; +defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>; +defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>; +defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>; +defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>; + +// This set of cvt is different from the above. The type of the source +// and target are the same. +// +def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src), + "cvt.s16.s8 \t$dst, $src;", []>; +def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src), + "cvt.s32.s8 \t$dst, $src;", []>; +def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src), + "cvt.s32.s16 \t$dst, $src;", []>; +def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src), + "cvt.s64.s8 \t$dst, $src;", []>; +def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src), + "cvt.s64.s16 \t$dst, $src;", []>; +def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src), + "cvt.s64.s32 \t$dst, $src;", []>; +} + +//----------------------------------- // Integer Arithmetic //----------------------------------- @@ -522,81 +530,17 @@ def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)), defm MULT : I3<"mul.lo.s", mul>; -defm MULTHS : I3_noi8<"mul.hi.s", mulhs>; -defm MULTHU : I3_noi8<"mul.hi.u", mulhu>; -def MULTHSi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - !strconcat("{{ \n\t", - !strconcat(".reg \t.s16 temp1; \n\t", - !strconcat(".reg \t.s16 temp2; \n\t", - !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t", - !strconcat("cvt.s16.s8 \ttemp2, $b; \n\t", - !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t", - !strconcat("shr.s16 \t$dst, $dst, 8; \n\t", - !strconcat("}}", "")))))))), - [(set Int8Regs:$dst, (mulhs Int8Regs:$a, Int8Regs:$b))]>; -def MULTHSi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - !strconcat("{{ \n\t", - !strconcat(".reg \t.s16 temp1; \n\t", - !strconcat(".reg \t.s16 temp2; \n\t", - !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t", - !strconcat("mov.b16 \ttemp2, $b; \n\t", - !strconcat("cvt.s16.s8 \ttemp2, temp2; \n\t", - !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t", - !strconcat("shr.s16 \t$dst, $dst, 8; \n\t", - !strconcat("}}", ""))))))))), - [(set Int8Regs:$dst, (mulhs Int8Regs:$a, imm:$b))]>; -def MULTHUi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - !strconcat("{{ \n\t", - !strconcat(".reg \t.u16 temp1; \n\t", - !strconcat(".reg \t.u16 temp2; \n\t", - !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t", - !strconcat("cvt.u16.u8 \ttemp2, $b; \n\t", - !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t", - !strconcat("shr.u16 \t$dst, $dst, 8; \n\t", - !strconcat("}}", "")))))))), - [(set Int8Regs:$dst, (mulhu Int8Regs:$a, Int8Regs:$b))]>; -def MULTHUi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - !strconcat("{{ \n\t", - !strconcat(".reg \t.u16 temp1; \n\t", - !strconcat(".reg \t.u16 temp2; \n\t", - !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t", - !strconcat("mov.b16 \ttemp2, $b; \n\t", - !strconcat("cvt.u16.u8 \ttemp2, temp2; \n\t", - !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t", - !strconcat("shr.u16 \t$dst, $dst, 8; \n\t", - !strconcat("}}", ""))))))))), - [(set Int8Regs:$dst, (mulhu Int8Regs:$a, imm:$b))]>; - - -defm SDIV : I3_i8<"div.s", sdiv, "s16", "cvt.s16.s8">; -defm UDIV : I3_i8<"div.u", udiv, "u16", "cvt.u16.u8">; - -defm SREM : I3_i8<"rem.s", srem, "s16", "cvt.s16.s8">; +defm MULTHS : I3<"mul.hi.s", mulhs>; +defm MULTHU : I3<"mul.hi.u", mulhu>; + +defm SDIV : I3<"div.s", sdiv>; +defm UDIV : I3<"div.u", udiv>; + +defm SREM : I3<"rem.s", srem>; // The ri version will not be selected as DAGCombiner::visitSREM will lower it. -defm UREM : I3_i8<"rem.u", urem, "u16", "cvt.u16.u8">; +defm UREM : I3<"rem.u", urem>; // The ri version will not be selected as DAGCombiner::visitUREM will lower it. -def MAD8rrr : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, Int8Regs:$b, Int8Regs:$c), - "mad.lo.s16 \t$dst, $a, $b, $c;", - [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b), - Int8Regs:$c))]>; -def MAD8rri : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, Int8Regs:$b, i8imm:$c), - "mad.lo.s16 \t$dst, $a, $b, $c;", - [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b), - imm:$c))]>; -def MAD8rir : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, i8imm:$b, Int8Regs:$c), - "mad.lo.s16 \t$dst, $a, $b, $c;", - [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b), - Int8Regs:$c))]>; -def MAD8rii : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, i8imm:$b, i8imm:$c), - "mad.lo.s16 \t$dst, $a, $b, $c;", - [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b), - imm:$c))]>; - def MAD16rrr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c), "mad.lo.s16 \t$dst, $a, $b, $c;", @@ -661,10 +605,6 @@ def MAD64rii : NVPTXInst<(outs Int64Regs:$dst), (mul Int64Regs:$a, imm:$b), imm:$c))]>; -def INEG8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src), - !strconcat("cvt.s16.s8 \t$dst, $src;\n\t", - "neg.s16 \t$dst, $dst;"), - [(set Int8Regs:$dst, (ineg Int8Regs:$src))]>; def INEG16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src), "neg.s16 \t$dst, $src;", [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>; @@ -842,6 +782,16 @@ def FDIV32ri_prec : NVPTXInst<(outs Float32Regs:$dst), (fdiv Float32Regs:$a, fpimm:$b))]>, Requires<[reqPTX20]>; +// +// F32 rsqrt +// + +def RSQRTF32approx1r : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$b), + "rsqrt.approx.f32 \t$dst, $b;", []>; + +def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f Float32Regs:$b)), + (RSQRTF32approx1r Float32Regs:$b)>, + Requires<[do_DIVF32_FULL, do_SQRTF32_APPROX, doNoF32FTZ]>; multiclass FPCONTRACT32<string OpcStr, Predicate Pred> { def rrr : NVPTXInst<(outs Float32Regs:$dst), @@ -912,8 +862,6 @@ multiclass FPCONTRACT64<string OpcStr, Predicate Pred> { // If we reverse the order of the following two lines, then rrr2 rule will be // generated for FMA32, but not for rrr. // Therefore, we manually write the rrr2 rule in FPCONTRACT32. -defm FMAD32_ftz : FPCONTRACT32<"mad.ftz.f32", doFMADF32_ftz>; -defm FMAD32 : FPCONTRACT32<"mad.f32", doFMADF32>; defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doFMAF32_ftz>; defm FMA32 : FPCONTRACT32<"fma.rn.f32", doFMAF32>; defm FMA64 : FPCONTRACT64<"fma.rn.f64", doFMAF64>; @@ -952,8 +900,6 @@ multiclass FPCONTRACT64_SUB_PAT<NVPTXInst Inst, Predicate Pred> { defm FMAF32ext_ftz : FPCONTRACT32_SUB_PAT<FMA32_ftzrrr, doFMAF32AGG_ftz>; defm FMAF32ext : FPCONTRACT32_SUB_PAT<FMA32rrr, doFMAF32AGG>; -defm FMADF32ext_ftz : FPCONTRACT32_SUB_PAT_MAD<FMAD32_ftzrrr, doFMADF32_ftz>; -defm FMADF32ext : FPCONTRACT32_SUB_PAT_MAD<FMAD32rrr, doFMADF32>; defm FMAF64ext : FPCONTRACT64_SUB_PAT<FMA64rrr, doFMAF64AGG>; def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src), @@ -963,6 +909,41 @@ def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src), "cos.approx.f32 \t$dst, $src;", [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>; +// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)) +// e.g. "poor man's fmod()" + +// frem - f32 FTZ +def : Pat<(frem Float32Regs:$x, Float32Regs:$y), + (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32 + (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ), + Float32Regs:$y))>, + Requires<[doF32FTZ]>; +def : Pat<(frem Float32Regs:$x, fpimm:$y), + (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32 + (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ), + fpimm:$y))>, + Requires<[doF32FTZ]>; + +// frem - f32 +def : Pat<(frem Float32Regs:$x, Float32Regs:$y), + (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32 + (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI), + Float32Regs:$y))>; +def : Pat<(frem Float32Regs:$x, fpimm:$y), + (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32 + (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI), + fpimm:$y))>; + +// frem - f64 +def : Pat<(frem Float64Regs:$x, Float64Regs:$y), + (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64 + (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI), + Float64Regs:$y))>; +def : Pat<(frem Float64Regs:$x, fpimm:$y), + (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64 + (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI), + fpimm:$y))>; + //----------------------------------- // Logical Arithmetic //----------------------------------- @@ -974,12 +955,6 @@ multiclass LOG_FORMAT<string OpcStr, SDNode OpNode> { def b1ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b), !strconcat(OpcStr, ".pred \t$dst, $a, $b;"), [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>; - def b8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>; - def b8ri: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>; def b16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b), !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"), [(set Int16Regs:$dst, (OpNode Int16Regs:$a, @@ -1010,9 +985,6 @@ defm XOR : LOG_FORMAT<"xor", xor>; def NOT1: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src), "not.pred \t$dst, $src;", [(set Int1Regs:$dst, (not Int1Regs:$src))]>; -def NOT8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src), - "not.b16 \t$dst, $src;", - [(set Int8Regs:$dst, (not Int8Regs:$src))]>; def NOT16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src), "not.b16 \t$dst, $src;", [(set Int16Regs:$dst, (not Int16Regs:$src))]>; @@ -1056,21 +1028,13 @@ multiclass LSHIFT_FORMAT<string OpcStr, SDNode OpNode> { !strconcat(OpcStr, "16 \t$dst, $a, $b;"), [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>; - def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, - Int32Regs:$b))]>; - def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, - (i32 imm:$b)))]>; } defm SHL : LSHIFT_FORMAT<"shl.b", shl>; // For shifts, the second src operand must be 32-bit value // Need to add cvt for the 8-bits. -multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode, string CVTStr> { +multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode> { def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b), !strconcat(OpcStr, "64 \t$dst, $a, $b;"), @@ -1102,20 +1066,10 @@ multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode, string CVTStr> { !strconcat(OpcStr, "16 \t$dst, $a, $b;"), [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>; - def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b), - !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t", - !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, - Int32Regs:$b))]>; - def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b), - !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t", - !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))), - [(set Int8Regs:$dst, (OpNode Int8Regs:$a, - (i32 imm:$b)))]>; } -defm SRA : RSHIFT_FORMAT<"shr.s", sra, "cvt.s16.s8">; -defm SRL : RSHIFT_FORMAT<"shr.u", srl, "cvt.u16.u8">; +defm SRA : RSHIFT_FORMAT<"shr.s", sra>; +defm SRL : RSHIFT_FORMAT<"shr.u", srl>; // 32bit def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst), @@ -1213,6 +1167,120 @@ def ROTR64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, //----------------------------------- +// General Comparison +//----------------------------------- + +// General setp instructions +multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> { + def rr : NVPTXInst<(outs Int1Regs:$dst), + (ins RC:$a, RC:$b, CmpMode:$cmp), + !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"), + []>; + def ri : NVPTXInst<(outs Int1Regs:$dst), + (ins RC:$a, ImmCls:$b, CmpMode:$cmp), + !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"), + []>; + def ir : NVPTXInst<(outs Int1Regs:$dst), + (ins ImmCls:$a, RC:$b, CmpMode:$cmp), + !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"), + []>; +} + +defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>; +defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>; +defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>; +defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>; +defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>; +defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>; +defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>; +defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>; +defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>; +defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>; +defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>; + +// General set instructions +multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> { + def rr : NVPTXInst<(outs Int32Regs:$dst), + (ins RC:$a, RC:$b, CmpMode:$cmp), + !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>; + def ri : NVPTXInst<(outs Int32Regs:$dst), + (ins RC:$a, ImmCls:$b, CmpMode:$cmp), + !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>; + def ir : NVPTXInst<(outs Int32Regs:$dst), + (ins ImmCls:$a, RC:$b, CmpMode:$cmp), + !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>; +} + +defm SET_b16 : SET<"b16", Int16Regs, i16imm>; +defm SET_s16 : SET<"s16", Int16Regs, i16imm>; +defm SET_u16 : SET<"u16", Int16Regs, i16imm>; +defm SET_b32 : SET<"b32", Int32Regs, i32imm>; +defm SET_s32 : SET<"s32", Int32Regs, i32imm>; +defm SET_u32 : SET<"u32", Int32Regs, i32imm>; +defm SET_b64 : SET<"b64", Int64Regs, i64imm>; +defm SET_s64 : SET<"s64", Int64Regs, i64imm>; +defm SET_u64 : SET<"u64", Int64Regs, i64imm>; +defm SET_f32 : SET<"f32", Float32Regs, f32imm>; +defm SET_f64 : SET<"f64", Float64Regs, f64imm>; + +//----------------------------------- +// General Selection +//----------------------------------- + +// General selp instructions +multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> { + def rr : NVPTXInst<(outs RC:$dst), + (ins RC:$a, RC:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>; + def ri : NVPTXInst<(outs RC:$dst), + (ins RC:$a, ImmCls:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>; + def ir : NVPTXInst<(outs RC:$dst), + (ins ImmCls:$a, RC:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>; + def ii : NVPTXInst<(outs RC:$dst), + (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>; +} + +multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls, + SDNode ImmNode> { + def rr : NVPTXInst<(outs RC:$dst), + (ins RC:$a, RC:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), + [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>; + def ri : NVPTXInst<(outs RC:$dst), + (ins RC:$a, ImmCls:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), + [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>; + def ir : NVPTXInst<(outs RC:$dst), + (ins ImmCls:$a, RC:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), + [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>; + def ii : NVPTXInst<(outs RC:$dst), + (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p), + !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), + [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>; +} + +defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>; +defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>; +defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>; +defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>; +defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>; +defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>; +defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>; +defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>; +defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>; +defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>; +defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>; + +// Special select for predicate operands +def : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)), + (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a), + (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>; + +//----------------------------------- // Data Movement (Load / Store, Move) //----------------------------------- @@ -1253,12 +1321,19 @@ def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a), "mov.u64 \t$dst, $a;", [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>; +// Get pointer to local stack +def MOV_DEPOT_ADDR + : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num), + "mov.u32 \t$d, __local_depot$num;", []>; +def MOV_DEPOT_ADDR_64 + : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num), + "mov.u64 \t$d, __local_depot$num;", []>; + + // copyPhysreg is hard-coded in NVPTXInstrInfo.cpp let IsSimpleMove=1 in { def IMOV1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss), "mov.pred \t$dst, $sss;", []>; -def IMOV8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$sss), - "mov.u16 \t$dst, $sss;", []>; def IMOV16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss), "mov.u16 \t$dst, $sss;", []>; def IMOV32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss), @@ -1274,9 +1349,6 @@ def FMOV64rr: NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src), def IMOV1ri: NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src), "mov.pred \t$dst, $src;", [(set Int1Regs:$dst, imm:$src)]>; -def IMOV8ri: NVPTXInst<(outs Int8Regs:$dst), (ins i8imm:$src), - "mov.u16 \t$dst, $src;", - [(set Int8Regs:$dst, imm:$src)]>; def IMOV16ri: NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src), "mov.u16 \t$dst, $src;", [(set Int16Regs:$dst, imm:$src)]>; @@ -1308,440 +1380,194 @@ def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr), // Comparison and Selection //----------------------------------- -// Generate string block like -// { -// .reg .pred p; -// setp.gt.s16 p, %a, %b; -// selp.s16 %dst, -1, 0, p; -// } -// when OpcStr=setp.gt.s sz1=16 sz2=16 d=%dst a=%a b=%b -class Set_Str<string OpcStr, string sz1, string sz2, string d, string a, - string b> { - string t1 = "{{\n\t.reg .pred p;\n\t"; - string t2 = !strconcat(t1 , OpcStr); - string t3 = !strconcat(t2 , sz1); - string t4 = !strconcat(t3 , " \tp, "); - string t5 = !strconcat(t4 , a); - string t6 = !strconcat(t5 , ", "); - string t7 = !strconcat(t6 , b); - string t8 = !strconcat(t7 , ";\n\tselp.s"); - string t9 = !strconcat(t8 , sz2); - string t10 = !strconcat(t9, " \t"); - string t11 = !strconcat(t10, d); - string s = !strconcat(t11, ", -1, 0, p;\n\t}}"); +multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode, + Instruction setp_16rr, + Instruction setp_16ri, + Instruction setp_16ir, + Instruction setp_32rr, + Instruction setp_32ri, + Instruction setp_32ir, + Instruction setp_64rr, + Instruction setp_64ri, + Instruction setp_64ir, + Instruction set_16rr, + Instruction set_16ri, + Instruction set_16ir, + Instruction set_32rr, + Instruction set_32ri, + Instruction set_32ir, + Instruction set_64rr, + Instruction set_64ri, + Instruction set_64ir> { + // i16 -> pred + def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)), + (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>; + def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)), + (setp_16ri Int16Regs:$a, imm:$b, Mode)>; + def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)), + (setp_16ir imm:$a, Int16Regs:$b, Mode)>; + // i32 -> pred + def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)), + (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>; + def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)), + (setp_32ri Int32Regs:$a, imm:$b, Mode)>; + def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)), + (setp_32ir imm:$a, Int32Regs:$b, Mode)>; + // i64 -> pred + def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)), + (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>; + def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)), + (setp_64ri Int64Regs:$a, imm:$b, Mode)>; + def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)), + (setp_64ir imm:$a, Int64Regs:$b, Mode)>; + + // i16 -> i32 + def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)), + (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>; + def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)), + (set_16ri Int16Regs:$a, imm:$b, Mode)>; + def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)), + (set_16ir imm:$a, Int16Regs:$b, Mode)>; + // i32 -> i32 + def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)), + (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>; + def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)), + (set_32ri Int32Regs:$a, imm:$b, Mode)>; + def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)), + (set_32ir imm:$a, Int32Regs:$b, Mode)>; + // i64 -> i32 + def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)), + (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>; + def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)), + (set_64ri Int64Regs:$a, imm:$b, Mode)>; + def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)), + (set_64ir imm:$a, Int64Regs:$b, Mode)>; } -// Generate string block like -// { -// .reg .pred p; -// .reg .s16 %temp1; -// .reg .s16 %temp2; -// cvt.s16.s8 %temp1, %a; -// cvt s16.s8 %temp1, %b; -// setp.gt.s16 p, %temp1, %temp2; -// selp.s16 %dst, -1, 0, p; -// } -// when OpcStr=setp.gt.s d=%dst a=%a b=%b type=s16 cvt=cvt.s16.s8 -class Set_Stri8<string OpcStr, string d, string a, string b, string type, - string cvt> { - string t1 = "{{\n\t.reg .pred p;\n\t"; - string t2 = !strconcat(t1, ".reg ."); - string t3 = !strconcat(t2, type); - string t4 = !strconcat(t3, " %temp1;\n\t"); - string t5 = !strconcat(t4, ".reg ."); - string t6 = !strconcat(t5, type); - string t7 = !strconcat(t6, " %temp2;\n\t"); - string t8 = !strconcat(t7, cvt); - string t9 = !strconcat(t8, " \t%temp1, "); - string t10 = !strconcat(t9, a); - string t11 = !strconcat(t10, ";\n\t"); - string t12 = !strconcat(t11, cvt); - string t13 = !strconcat(t12, " \t%temp2, "); - string t14 = !strconcat(t13, b); - string t15 = !strconcat(t14, ";\n\t"); - string t16 = !strconcat(t15, OpcStr); - string t17 = !strconcat(t16, "16"); - string t18 = !strconcat(t17, " \tp, %temp1, %temp2;\n\t"); - string t19 = !strconcat(t18, "selp.s16 \t"); - string t20 = !strconcat(t19, d); - string s = !strconcat(t20, ", -1, 0, p;\n\t}}"); +multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode> + : ISET_FORMAT<OpNode, Mode, + SETP_s16rr, SETP_s16ri, SETP_s16ir, + SETP_s32rr, SETP_s32ri, SETP_s32ir, + SETP_s64rr, SETP_s64ri, SETP_s64ir, + SET_s16rr, SET_s16ri, SET_s16ir, + SET_s32rr, SET_s32ri, SET_s32ir, + SET_s64rr, SET_s64ri, SET_s64ir> { + // TableGen doesn't like empty multiclasses + def : PatLeaf<(i32 0)>; } -multiclass ISET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode, - string TypeStr, string CVTStr> { - def i8rr_toi8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - Set_Stri8<OpcStr, "$dst", "$a", "$b", TypeStr, CVTStr>.s, - []>; - def i16rr_toi16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, - Int16Regs:$b), - Set_Str<OpcStr, "16", "16", "$dst", "$a", "$b">.s, - []>; - def i32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, - Int32Regs:$b), - Set_Str<OpcStr, "32", "32", "$dst", "$a", "$b">.s, - []>; - def i64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, - Int64Regs:$b), - Set_Str<OpcStr, "64", "64", "$dst", "$a", "$b">.s, - []>; - - def i8rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - Handle_i8rr<OpcStr, TypeStr, CVTStr>.s, - [(set Int1Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>; - def i8ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - Handle_i8ri<OpcStr, TypeStr, CVTStr>.s, - [(set Int1Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>; - def i8ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i8imm:$a, Int8Regs:$b), - Handle_i8ir<OpcStr, TypeStr, CVTStr>.s, - [(set Int1Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>; - def i16rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>; - def i16ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, i16imm:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>; - def i16ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i16imm:$a, Int16Regs:$b), - !strconcat(OpcStr, "16 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>; - def i32rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>; - def i32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, i32imm:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>; - def i32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i32imm:$a, Int32Regs:$b), - !strconcat(OpcStr, "32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>; - def i64rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>; - def i64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, i64imm:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>; - def i64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i64imm:$a, Int64Regs:$b), - !strconcat(OpcStr, "64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>; - - def i8rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b), - Handle_i8rr<OpcStr_u32, TypeStr, CVTStr>.s, - [(set Int32Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>; - def i8ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, i8imm:$b), - Handle_i8ri<OpcStr_u32, TypeStr, CVTStr>.s, - [(set Int32Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>; - def i8ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i8imm:$a, Int8Regs:$b), - Handle_i8ir<OpcStr_u32, TypeStr, CVTStr>.s, - [(set Int32Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>; - def i16rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, - Int16Regs:$b), - !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>; - def i16ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b), - !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>; - def i16ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i16imm:$a, Int16Regs:$b), - !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>; - def i32rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, - Int32Regs:$b), - !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>; - def i32ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b), - !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>; - def i32ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, Int32Regs:$b), - !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>; - def i64rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a, - Int64Regs:$b), - !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>; - def i64ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a, i64imm:$b), - !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>; - def i64ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i64imm:$a, Int64Regs:$b), - !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>; +multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode> + : ISET_FORMAT<OpNode, Mode, + SETP_u16rr, SETP_u16ri, SETP_u16ir, + SETP_u32rr, SETP_u32ri, SETP_u32ir, + SETP_u64rr, SETP_u64ri, SETP_u64ir, + SET_u16rr, SET_u16ri, SET_u16ir, + SET_u32rr, SET_u32ri, SET_u32ir, + SET_u64rr, SET_u64ri, SET_u64ir> { + // TableGen doesn't like empty multiclasses + def : PatLeaf<(i32 0)>; } -multiclass FSET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode> { - def f32rr_toi32_ftz: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a, - Float32Regs:$b), - Set_Str<OpcStr, "ftz.f32", "32", "$dst", "$a", "$b">.s, - []>, Requires<[doF32FTZ]>; - def f32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a, - Float32Regs:$b), - Set_Str<OpcStr, "f32", "32", "$dst", "$a", "$b">.s, - []>; - def f64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Float64Regs:$a, - Float64Regs:$b), - Set_Str<OpcStr, "f64", "64", "$dst", "$a", "$b">.s, - []>; - def f64rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float64Regs:$a, - Float64Regs:$b), - Set_Str<OpcStr, "f64", "32", "$dst", "$a", "$b">.s, - []>; - - def f32rr_p_ftz: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a - , Float32Regs:$b), - !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]> - , Requires<[doF32FTZ]>; - def f32rr_p: NVPTXInst<(outs Int1Regs:$dst), - (ins Float32Regs:$a, Float32Regs:$b), - !strconcat(OpcStr, "f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>; - def f32ri_p_ftz: NVPTXInst<(outs Int1Regs:$dst), - (ins Float32Regs:$a, f32imm:$b), - !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>, - Requires<[doF32FTZ]>; - def f32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a, f32imm:$b), - !strconcat(OpcStr, "f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>; - def f32ir_p_ftz: NVPTXInst<(outs Int1Regs:$dst), - (ins f32imm:$a, Float32Regs:$b), - !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>, - Requires<[doF32FTZ]>; - def f32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f32imm:$a, Float32Regs:$b), - !strconcat(OpcStr, "f32 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>; - def f64rr_p: NVPTXInst<(outs Int1Regs:$dst), - (ins Float64Regs:$a, Float64Regs:$b), - !strconcat(OpcStr, "f64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>; - def f64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float64Regs:$a, f64imm:$b), - !strconcat(OpcStr, "f64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>; - def f64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f64imm:$a, Float64Regs:$b), - !strconcat(OpcStr, "f64 \t$dst, $a, $b;"), - [(set Int1Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>; - - def f32rr_u32_ftz: NVPTXInst<(outs Int32Regs:$dst), - (ins Float32Regs:$a, Float32Regs:$b), - !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>; - def f32rr_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins Float32Regs:$a, Float32Regs:$b), - !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>; - def f32ri_u32_ftz: NVPTXInst<(outs Int32Regs:$dst), - (ins Float32Regs:$a, f32imm:$b), - !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>; - def f32ri_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins Float32Regs:$a, f32imm:$b), - !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>; - def f32ir_u32_ftz: NVPTXInst<(outs Int32Regs:$dst), - (ins f32imm:$a, Float32Regs:$b), - !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>; - def f32ir_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins f32imm:$a, Float32Regs:$b), - !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>; - def f64rr_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins Float64Regs:$a, Float64Regs:$b), - !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>; - def f64ri_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins Float64Regs:$a, f64imm:$b), - !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>; - def f64ir_u32: NVPTXInst<(outs Int32Regs:$dst), - (ins f64imm:$a, Float64Regs:$b), - !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"), - [(set Int32Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>; +defm : ISET_FORMAT_SIGNED<setgt, CmpGT>; +defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>; +defm : ISET_FORMAT_SIGNED<setlt, CmpLT>; +defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>; +defm : ISET_FORMAT_SIGNED<setge, CmpGE>; +defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>; +defm : ISET_FORMAT_SIGNED<setle, CmpLE>; +defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>; +defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>; +defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>; +defm : ISET_FORMAT_SIGNED<setne, CmpNE>; +defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>; + +// i1 compares +def : Pat<(setne Int1Regs:$a, Int1Regs:$b), + (XORb1rr Int1Regs:$a, Int1Regs:$b)>; +def : Pat<(setune Int1Regs:$a, Int1Regs:$b), + (XORb1rr Int1Regs:$a, Int1Regs:$b)>; + +def : Pat<(seteq Int1Regs:$a, Int1Regs:$b), + (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>; +def : Pat<(setueq Int1Regs:$a, Int1Regs:$b), + (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>; + +// i1 compare -> i32 +def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)), + (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>; +def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)), + (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>; + + + +multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> { + // f32 -> pred + def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)), + (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)), + (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>; + def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)), + (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)), + (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>; + def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)), + (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)), + (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>; + + // f64 -> pred + def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)), + (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>; + def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)), + (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>; + def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)), + (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>; + + // f32 -> i32 + def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)), + (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)), + (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>; + def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)), + (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)), + (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>; + def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)), + (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>, + Requires<[doF32FTZ]>; + def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)), + (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>; + + // f64 -> i32 + def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)), + (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>; + def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)), + (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>; + def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)), + (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>; } -defm ISetSGT -: ISET_FORMAT<"setp.gt.s", "set.gt.u32.s", setgt, "s16", "cvt.s16.s8">; -defm ISetUGT -: ISET_FORMAT<"setp.gt.u", "set.gt.u32.u", setugt, "u16", "cvt.u16.u8">; -defm ISetSLT -: ISET_FORMAT<"setp.lt.s", "set.lt.u32.s", setlt, "s16", "cvt.s16.s8">; -defm ISetULT -: ISET_FORMAT<"setp.lt.u", "set.lt.u32.u", setult, "u16", "cvt.u16.u8">; -defm ISetSGE -: ISET_FORMAT<"setp.ge.s", "set.ge.u32.s", setge, "s16", "cvt.s16.s8">; -defm ISetUGE -: ISET_FORMAT<"setp.ge.u", "set.ge.u32.u", setuge, "u16", "cvt.u16.u8">; -defm ISetSLE -: ISET_FORMAT<"setp.le.s", "set.le.u32.s", setle, "s16", "cvt.s16.s8">; -defm ISetULE -: ISET_FORMAT<"setp.le.u", "set.le.u32.u", setule, "u16", "cvt.u16.u8">; -defm ISetSEQ -: ISET_FORMAT<"setp.eq.s", "set.eq.u32.s", seteq, "s16", "cvt.s16.s8">; -defm ISetUEQ -: ISET_FORMAT<"setp.eq.u", "set.eq.u32.u", setueq, "u16", "cvt.u16.u8">; -defm ISetSNE -: ISET_FORMAT<"setp.ne.s", "set.ne.u32.s", setne, "s16", "cvt.s16.s8">; -defm ISetUNE -: ISET_FORMAT<"setp.ne.u", "set.ne.u32.u", setune, "u16", "cvt.u16.u8">; - -def ISetSNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - "xor.pred \t$dst, $a, $b;", - [(set Int1Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>; -def ISetUNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - "xor.pred \t$dst, $a, $b;", - [(set Int1Regs:$dst, (setune Int1Regs:$a, Int1Regs:$b))]>; -def ISetSEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - !strconcat("{{\n\t", - !strconcat(".reg .pred temp;\n\t", - !strconcat("xor.pred \ttemp, $a, $b;\n\t", - !strconcat("not.pred \t$dst, temp;\n\t}}","")))), - [(set Int1Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>; -def ISetUEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - !strconcat("{{\n\t", - !strconcat(".reg .pred temp;\n\t", - !strconcat("xor.pred \ttemp, $a, $b;\n\t", - !strconcat("not.pred \t$dst, temp;\n\t}}","")))), - [(set Int1Regs:$dst, (setueq Int1Regs:$a, Int1Regs:$b))]>; - -// Compare 2 i1's and produce a u32 -def ISETSNEi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - !strconcat("{{\n\t", - !strconcat(".reg .pred temp;\n\t", - !strconcat("xor.pred \ttemp, $a, $b;\n\t", - !strconcat("selp.u32 \t$dst, -1, 0, temp;", "\n\t}}")))), - [(set Int32Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>; -def ISETSEQi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst), - (ins Int1Regs:$a, Int1Regs:$b), - !strconcat("{{\n\t", - !strconcat(".reg .pred temp;\n\t", - !strconcat("xor.pred \ttemp, $a, $b;\n\t", - !strconcat("selp.u32 \t$dst, 0, -1, temp;", "\n\t}}")))), - [(set Int32Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>; - -defm FSetGT : FSET_FORMAT<"setp.gt.", "set.gt.u32.", setogt>; -defm FSetLT : FSET_FORMAT<"setp.lt.", "set.lt.u32.", setolt>; -defm FSetGE : FSET_FORMAT<"setp.ge.", "set.ge.u32.", setoge>; -defm FSetLE : FSET_FORMAT<"setp.le.", "set.le.u32.", setole>; -defm FSetEQ : FSET_FORMAT<"setp.eq.", "set.eq.u32.", setoeq>; -defm FSetNE : FSET_FORMAT<"setp.ne.", "set.ne.u32.", setone>; - -defm FSetUGT : FSET_FORMAT<"setp.gtu.", "set.gtu.u32.", setugt>; -defm FSetULT : FSET_FORMAT<"setp.ltu.", "set.ltu.u32.",setult>; -defm FSetUGE : FSET_FORMAT<"setp.geu.", "set.geu.u32.",setuge>; -defm FSetULE : FSET_FORMAT<"setp.leu.", "set.leu.u32.",setule>; -defm FSetUEQ : FSET_FORMAT<"setp.equ.", "set.equ.u32.",setueq>; -defm FSetUNE : FSET_FORMAT<"setp.neu.", "set.neu.u32.",setune>; - -defm FSetNUM : FSET_FORMAT<"setp.num.", "set.num.u32.",seto>; -defm FSetNAN : FSET_FORMAT<"setp.nan.", "set.nan.u32.",setuo>; - -def SELECTi1rr : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)), - (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a), - (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>; -def SELECTi8rr : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, Int8Regs:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, Int8Regs:$b))]>; -def SELECTi8ri : NVPTXInst<(outs Int8Regs:$dst), - (ins Int8Regs:$a, i8imm:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, imm:$b))]>; -def SELECTi8ir : NVPTXInst<(outs Int8Regs:$dst), - (ins i8imm:$a, Int8Regs:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, Int8Regs:$b))]>; -def SELECTi8ii : NVPTXInst<(outs Int8Regs:$dst), - (ins i8imm:$a, i8imm:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>; - -def SELECTi16rr : NVPTXInst<(outs Int16Regs:$dst), - (ins Int16Regs:$a, Int16Regs:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, Int16Regs:$b))]>; -def SELECTi16ri : NVPTXInst<(outs Int16Regs:$dst), - (ins Int16Regs:$a, i16imm:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, imm:$b))]>; -def SELECTi16ir : NVPTXInst<(outs Int16Regs:$dst), - (ins i16imm:$a, Int16Regs:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, Int16Regs:$b))]>; -def SELECTi16ii : NVPTXInst<(outs Int16Regs:$dst), - (ins i16imm:$a, i16imm:$b, Int1Regs:$p), - "selp.b16 \t$dst, $a, $b, $p;", - [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>; - -def SELECTi32rr : NVPTXInst<(outs Int32Regs:$dst), - (ins Int32Regs:$a, Int32Regs:$b, Int1Regs:$p), - "selp.b32 \t$dst, $a, $b, $p;", - [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, Int32Regs:$b))]>; -def SELECTi32ri : NVPTXInst<(outs Int32Regs:$dst), - (ins Int32Regs:$a, i32imm:$b, Int1Regs:$p), - "selp.b32 \t$dst, $a, $b, $p;", - [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, imm:$b))]>; -def SELECTi32ir : NVPTXInst<(outs Int32Regs:$dst), - (ins i32imm:$a, Int32Regs:$b, Int1Regs:$p), - "selp.b32 \t$dst, $a, $b, $p;", - [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, Int32Regs:$b))]>; -def SELECTi32ii : NVPTXInst<(outs Int32Regs:$dst), - (ins i32imm:$a, i32imm:$b, Int1Regs:$p), - "selp.b32 \t$dst, $a, $b, $p;", - [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>; - -def SELECTi64rr : NVPTXInst<(outs Int64Regs:$dst), - (ins Int64Regs:$a, Int64Regs:$b, Int1Regs:$p), - "selp.b64 \t$dst, $a, $b, $p;", - [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, Int64Regs:$b))]>; -def SELECTi64ri : NVPTXInst<(outs Int64Regs:$dst), - (ins Int64Regs:$a, i64imm:$b, Int1Regs:$p), - "selp.b64 \t$dst, $a, $b, $p;", - [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, imm:$b))]>; -def SELECTi64ir : NVPTXInst<(outs Int64Regs:$dst), - (ins i64imm:$a, Int64Regs:$b, Int1Regs:$p), - "selp.b64 \t$dst, $a, $b, $p;", - [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, Int64Regs:$b))]>; -def SELECTi64ii : NVPTXInst<(outs Int64Regs:$dst), - (ins i64imm:$a, i64imm:$b, Int1Regs:$p), - "selp.b64 \t$dst, $a, $b, $p;", - [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>; - -def SELECTf32rr : NVPTXInst<(outs Float32Regs:$dst), - (ins Float32Regs:$a, Float32Regs:$b, Int1Regs:$p), - "selp.f32 \t$dst, $a, $b, $p;", - [(set Float32Regs:$dst, - (select Int1Regs:$p, Float32Regs:$a, Float32Regs:$b))]>; -def SELECTf32ri : NVPTXInst<(outs Float32Regs:$dst), - (ins Float32Regs:$a, f32imm:$b, Int1Regs:$p), - "selp.f32 \t$dst, $a, $b, $p;", - [(set Float32Regs:$dst, (select Int1Regs:$p, Float32Regs:$a, fpimm:$b))]>; -def SELECTf32ir : NVPTXInst<(outs Float32Regs:$dst), - (ins f32imm:$a, Float32Regs:$b, Int1Regs:$p), - "selp.f32 \t$dst, $a, $b, $p;", - [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float32Regs:$b))]>; -def SELECTf32ii : NVPTXInst<(outs Float32Regs:$dst), - (ins f32imm:$a, f32imm:$b, Int1Regs:$p), - "selp.f32 \t$dst, $a, $b, $p;", - [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>; - -def SELECTf64rr : NVPTXInst<(outs Float64Regs:$dst), - (ins Float64Regs:$a, Float64Regs:$b, Int1Regs:$p), - "selp.f64 \t$dst, $a, $b, $p;", - [(set Float64Regs:$dst, - (select Int1Regs:$p, Float64Regs:$a, Float64Regs:$b))]>; -def SELECTf64ri : NVPTXInst<(outs Float64Regs:$dst), - (ins Float64Regs:$a, f64imm:$b, Int1Regs:$p), - "selp.f64 \t$dst, $a, $b, $p;", - [(set Float64Regs:$dst, (select Int1Regs:$p, Float64Regs:$a, fpimm:$b))]>; -def SELECTf64ir : NVPTXInst<(outs Float64Regs:$dst), - (ins f64imm:$a, Float64Regs:$b, Int1Regs:$p), - "selp.f64 \t$dst, $a, $b, $p;", - [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float64Regs:$b))]>; -def SELECTf64ii : NVPTXInst<(outs Float64Regs:$dst), - (ins f64imm:$a, f64imm:$b, Int1Regs:$p), - "selp.f64 \t $dst, $a, $b, $p;", - [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>; +defm FSetGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>; +defm FSetLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>; +defm FSetGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>; +defm FSetLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>; +defm FSetEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>; +defm FSetNE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>; + +defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>; +defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>; +defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>; +defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>; +defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>; +defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>; + +defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>; +defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>; //def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad, // [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; @@ -1751,17 +1577,22 @@ def SDTDeclareParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, def SDTDeclareScalarParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>; def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>; +def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>; +def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>; def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>; +def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>; +def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>; def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>; def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>; def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>; def SDTCallVoidProfile : SDTypeProfile<0, 1, []>; def SDTCallValProfile : SDTypeProfile<1, 0, []>; def SDTMoveParamProfile : SDTypeProfile<1, 1, []>; -def SDTMoveRetvalProfile : SDTypeProfile<0, 1, []>; def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>; +def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>; +def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>; def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>; def DeclareParam : SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile, @@ -1776,18 +1607,24 @@ def DeclareRet : SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def LoadParam : SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile, [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>; +def LoadParamV2 : SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile, + [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>; +def LoadParamV4 : SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile, + [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>; def PrintCall : SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def PrintCallUni : SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def StoreParam : SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; +def StoreParamV2 : SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile, + [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; +def StoreParamV4 : SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile, + [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def StoreParamU32 : SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def StoreParamS32 : SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; -def MoveToParam : SDNode<"NVPTXISD::MoveToParam", SDTStoreParamProfile, - [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def CallArgBegin : SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def CallArg : SDNode<"NVPTXISD::CallArg", SDTCallArgProfile, @@ -1804,12 +1641,12 @@ def CallVal : SDNode<"NVPTXISD::CallVal", SDTCallValProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; def MoveParam : SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>; -def MoveRetval : SDNode<"NVPTXISD::MoveRetval", SDTMoveRetvalProfile, - [SDNPHasChain, SDNPSideEffect]>; def StoreRetval : SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile, [SDNPHasChain, SDNPSideEffect]>; -def MoveToRetval : SDNode<"NVPTXISD::MoveToRetval", SDTStoreRetvalProfile, - [SDNPHasChain, SDNPSideEffect]>; +def StoreRetvalV2 : SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile, + [SDNPHasChain, SDNPSideEffect]>; +def StoreRetvalV4 : SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile, + [SDNPHasChain, SDNPSideEffect]>; def PseudoUseParam : SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile, [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>; @@ -1820,7 +1657,7 @@ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> : NVPTXInst<(outs regclass:$dst), (ins i32imm:$b), !strconcat(!strconcat("ld.param", opstr), "\t$dst, [retval0+$b];"), - [(set regclass:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>; + []>; class LoadParamRegInst<NVPTXRegClass regclass, string opstr> : NVPTXInst<(outs regclass:$dst), (ins i32imm:$b), @@ -1828,35 +1665,57 @@ class LoadParamRegInst<NVPTXRegClass regclass, string opstr> : "\t$dst, retval$b;"), [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>; +class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b), + !strconcat(!strconcat("ld.param.v2", opstr), + "\t{{$dst, $dst2}}, [retval0+$b];"), []>; + +class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3, + regclass:$dst4), + (ins i32imm:$b), + !strconcat(!strconcat("ld.param.v4", opstr), + "\t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"), []>; + class StoreParamInst<NVPTXRegClass regclass, string opstr> : NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b), !strconcat(!strconcat("st.param", opstr), "\t[param$a+$b], $val;"), - [(StoreParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>; + []>; -class MoveToParamInst<NVPTXRegClass regclass, string opstr> : - NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b), - !strconcat(!strconcat("mov", opstr), - "\tparam$a, $val;"), - [(MoveToParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>; +class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, + i32imm:$a, i32imm:$b), + !strconcat(!strconcat("st.param.v2", opstr), + "\t[param$a+$b], {{$val, $val2}};"), + []>; + +class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs), (ins regclass:$val, regclass:$val1, regclass:$val2, + regclass:$val3, i32imm:$a, i32imm:$b), + !strconcat(!strconcat("st.param.v4", opstr), + "\t[param$a+$b], {{$val, $val2, $val3, $val4}};"), + []>; class StoreRetvalInst<NVPTXRegClass regclass, string opstr> : NVPTXInst<(outs), (ins regclass:$val, i32imm:$a), !strconcat(!strconcat("st.param", opstr), "\t[func_retval0+$a], $val;"), - [(StoreRetval (i32 imm:$a), regclass:$val)]>; + []>; -class MoveToRetvalInst<NVPTXRegClass regclass, string opstr> : - NVPTXInst<(outs), (ins i32imm:$num, regclass:$val), - !strconcat(!strconcat("mov", opstr), - "\tfunc_retval$num, $val;"), - [(MoveToRetval (i32 imm:$num), regclass:$val)]>; +class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a), + !strconcat(!strconcat("st.param.v2", opstr), + "\t[func_retval0+$a], {{$val, $val2}};"), + []>; -class MoveRetvalInst<NVPTXRegClass regclass, string opstr> : - NVPTXInst<(outs), (ins regclass:$val), - !strconcat(!strconcat("mov", opstr), - "\tfunc_retval0, $val;"), - [(MoveRetval regclass:$val)]>; +class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> : + NVPTXInst<(outs), + (ins regclass:$val, regclass:$val2, regclass:$val3, + regclass:$val4, i32imm:$a), + !strconcat(!strconcat("st.param.v4", opstr), + "\t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"), + []>; def PrintCallRetInst1 : NVPTXInst<(outs), (ins), "call (retval0), ", @@ -1919,126 +1778,81 @@ def PrintCallUniNoRetInst : NVPTXInst<(outs), (ins), "call.uni ", def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">; def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">; def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">; -def LoadParamMemI8 : LoadParamMemInst<Int8Regs, ".b8">; - -//def LoadParamMemI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b), -// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t", -// "cvt.u16.u32\t$dst, temp_param_reg;"), -// [(set Int16Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>; -//def LoadParamMemI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b), -// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t", -// "cvt.u16.u32\t$dst, temp_param_reg;"), -// [(set Int8Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>; - +def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">; +def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">; +def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">; +def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">; +def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">; +def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">; +def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">; +def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">; def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">; def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">; - -def LoadParamRegI64 : LoadParamRegInst<Int64Regs, ".b64">; -def LoadParamRegI32 : LoadParamRegInst<Int32Regs, ".b32">; -def LoadParamRegI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b), - "cvt.u16.u32\t$dst, retval$b;", - [(set Int16Regs:$dst, - (LoadParam (i32 0), (i32 imm:$b)))]>; -def LoadParamRegI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b), - "cvt.u16.u32\t$dst, retval$b;", - [(set Int8Regs:$dst, - (LoadParam (i32 0), (i32 imm:$b)))]>; - -def LoadParamRegF32 : LoadParamRegInst<Float32Regs, ".f32">; -def LoadParamRegF64 : LoadParamRegInst<Float64Regs, ".f64">; +def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">; +def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">; +def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">; def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">; def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">; -def StoreParamI16 : NVPTXInst<(outs), - (ins Int16Regs:$val, i32imm:$a, i32imm:$b), - "st.param.b16\t[param$a+$b], $val;", - [(StoreParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>; - -def StoreParamI8 : NVPTXInst<(outs), - (ins Int8Regs:$val, i32imm:$a, i32imm:$b), - "st.param.b8\t[param$a+$b], $val;", - [(StoreParam - (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>; - -def StoreParamS32I16 : NVPTXInst<(outs), - (ins Int16Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.s32.s16\ttemp_param_reg, $val;\n\t", - "st.param.b32\t[param$a+$b], temp_param_reg;"), - [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>; -def StoreParamU32I16 : NVPTXInst<(outs), - (ins Int16Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t", - "st.param.b32\t[param$a+$b], temp_param_reg;"), - [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>; - -def StoreParamU32I8 : NVPTXInst<(outs), - (ins Int8Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.u32.u8\ttemp_param_reg, $val;\n\t", - "st.param.b32\t[param$a+$b], temp_param_reg;"), - [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>; -def StoreParamS32I8 : NVPTXInst<(outs), - (ins Int8Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.s32.s8\ttemp_param_reg, $val;\n\t", - "st.param.b32\t[param$a+$b], temp_param_reg;"), - [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>; +def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">; +def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">; +def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">; +def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">; +def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">; +def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">; + +// FIXME: StoreParamV4Inst crashes llvm-tblgen :( +//def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">; +def StoreParamV4I32 : NVPTXInst<(outs), (ins Int32Regs:$val, Int32Regs:$val2, + Int32Regs:$val3, Int32Regs:$val4, + i32imm:$a, i32imm:$b), + "st.param.b32\t[param$a+$b], {{$val, $val2, $val3, $val4}};", + []>; + +def StoreParamV4I16 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2, + Int16Regs:$val3, Int16Regs:$val4, + i32imm:$a, i32imm:$b), + "st.param.v4.b16\t[param$a+$b], {{$val, $val2, $val3, $val4}};", + []>; + +def StoreParamV4I8 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2, + Int16Regs:$val3, Int16Regs:$val4, + i32imm:$a, i32imm:$b), + "st.param.v4.b8\t[param$a+$b], {{$val, $val2, $val3, $val4}};", + []>; def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">; def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">; +def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">; +def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">; +// FIXME: StoreParamV4Inst crashes llvm-tblgen :( +//def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">; +def StoreParamV4F32 : NVPTXInst<(outs), + (ins Float32Regs:$val, Float32Regs:$val2, + Float32Regs:$val3, Float32Regs:$val4, + i32imm:$a, i32imm:$b), + "st.param.v4.f32\t[param$a+$b], {{$val, $val2, $val3, $val4}};", + []>; -def MoveToParamI64 : MoveToParamInst<Int64Regs, ".b64">; -def MoveToParamI32 : MoveToParamInst<Int32Regs, ".b32">; -def MoveToParamF64 : MoveToParamInst<Float64Regs, ".f64">; -def MoveToParamF32 : MoveToParamInst<Float32Regs, ".f32">; -def MoveToParamI16 : NVPTXInst<(outs), - (ins Int16Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t", - "mov.b32\tparam$a, temp_param_reg;"), - [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>; -def MoveToParamI8 : NVPTXInst<(outs), - (ins Int8Regs:$val, i32imm:$a, i32imm:$b), - !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t", - "mov.b32\tparam$a, temp_param_reg;"), - [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>; def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">; def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">; def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">; -def StoreRetvalI8 : StoreRetvalInst<Int8Regs, ".b8">; - -//def StoreRetvalI16 : NVPTXInst<(outs), (ins Int16Regs:$val, i32imm:$a), -// !strconcat("\{\n\t", -// !strconcat(".reg .b32 temp_retval_reg;\n\t", -// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t", -// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))), -// [(StoreRetval (i32 imm:$a), Int16Regs:$val)]>; -//def StoreRetvalI8 : NVPTXInst<(outs), (ins Int8Regs:$val, i32imm:$a), -// !strconcat("\{\n\t", -// !strconcat(".reg .b32 temp_retval_reg;\n\t", -// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t", -// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))), -// [(StoreRetval (i32 imm:$a), Int8Regs:$val)]>; +def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">; +def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">; +def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">; +def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">; +def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">; +def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">; +def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">; +def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">; def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">; def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">; - -def MoveRetvalI64 : MoveRetvalInst<Int64Regs, ".b64">; -def MoveRetvalI32 : MoveRetvalInst<Int32Regs, ".b32">; -def MoveRetvalI16 : MoveRetvalInst<Int16Regs, ".b16">; -def MoveRetvalI8 : MoveRetvalInst<Int8Regs, ".b8">; -def MoveRetvalF64 : MoveRetvalInst<Float64Regs, ".f64">; -def MoveRetvalF32 : MoveRetvalInst<Float32Regs, ".f32">; - -def MoveToRetvalI64 : MoveToRetvalInst<Int64Regs, ".b64">; -def MoveToRetvalI32 : MoveToRetvalInst<Int32Regs, ".b32">; -def MoveToRetvalF64 : MoveToRetvalInst<Float64Regs, ".f64">; -def MoveToRetvalF32 : MoveToRetvalInst<Float32Regs, ".f32">; -def MoveToRetvalI16 : NVPTXInst<(outs), (ins i32imm:$num, Int16Regs:$val), - "cvt.u32.u16\tfunc_retval$num, $val;", - [(MoveToRetval (i32 imm:$num), Int16Regs:$val)]>; -def MoveToRetvalI8 : NVPTXInst<(outs), (ins i32imm:$num, Int8Regs:$val), - "cvt.u32.u16\tfunc_retval$num, $val;", - [(MoveToRetval (i32 imm:$num), Int8Regs:$val)]>; +def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">; +def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">; +def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">; def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>; def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>; @@ -2056,7 +1870,6 @@ class LastCallArgInst<NVPTXRegClass regclass> : def CallArgI64 : CallArgInst<Int64Regs>; def CallArgI32 : CallArgInst<Int32Regs>; def CallArgI16 : CallArgInst<Int16Regs>; -def CallArgI8 : CallArgInst<Int8Regs>; def CallArgF64 : CallArgInst<Float64Regs>; def CallArgF32 : CallArgInst<Float32Regs>; @@ -2064,7 +1877,6 @@ def CallArgF32 : CallArgInst<Float32Regs>; def LastCallArgI64 : LastCallArgInst<Int64Regs>; def LastCallArgI32 : LastCallArgInst<Int32Regs>; def LastCallArgI16 : LastCallArgInst<Int16Regs>; -def LastCallArgI8 : LastCallArgInst<Int8Regs>; def LastCallArgF64 : LastCallArgInst<Float64Regs>; def LastCallArgF32 : LastCallArgInst<Float32Regs>; @@ -2124,9 +1936,6 @@ def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">; def MoveParamI16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src), "cvt.u16.u32\t$dst, $src;", [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>; -def MoveParamI8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src), - "cvt.u16.u32\t$dst, $src;", - [(set Int8Regs:$dst, (MoveParam Int8Regs:$src))]>; def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">; def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">; @@ -2138,7 +1947,6 @@ class PseudoUseParamInst<NVPTXRegClass regclass> : def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>; def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>; def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>; -def PseudoUseParamI8 : PseudoUseParamInst<Int8Regs>; def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>; def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>; @@ -2180,7 +1988,7 @@ multiclass LD<NVPTXRegClass regclass> { } let mayLoad=1, neverHasSideEffects=1 in { -defm LD_i8 : LD<Int8Regs>; +defm LD_i8 : LD<Int16Regs>; defm LD_i16 : LD<Int16Regs>; defm LD_i32 : LD<Int32Regs>; defm LD_i64 : LD<Int64Regs>; @@ -2222,7 +2030,7 @@ multiclass ST<NVPTXRegClass regclass> { } let mayStore=1, neverHasSideEffects=1 in { -defm ST_i8 : ST<Int8Regs>; +defm ST_i8 : ST<Int16Regs>; defm ST_i16 : ST<Int16Regs>; defm ST_i32 : ST<Int32Regs>; defm ST_i64 : ST<Int64Regs>; @@ -2306,7 +2114,7 @@ multiclass LD_VEC<NVPTXRegClass regclass> { []>; } let mayLoad=1, neverHasSideEffects=1 in { -defm LDV_i8 : LD_VEC<Int8Regs>; +defm LDV_i8 : LD_VEC<Int16Regs>; defm LDV_i16 : LD_VEC<Int16Regs>; defm LDV_i32 : LD_VEC<Int32Regs>; defm LDV_i64 : LD_VEC<Int64Regs>; @@ -2389,7 +2197,7 @@ multiclass ST_VEC<NVPTXRegClass regclass> { []>; } let mayStore=1, neverHasSideEffects=1 in { -defm STV_i8 : ST_VEC<Int8Regs>; +defm STV_i8 : ST_VEC<Int16Regs>; defm STV_i16 : ST_VEC<Int16Regs>; defm STV_i32 : ST_VEC<Int32Regs>; defm STV_i64 : ST_VEC<Int64Regs>; @@ -2400,291 +2208,6 @@ defm STV_f64 : ST_VEC<Float64Regs>; //---- Conversion ---- -multiclass CVT_INT_TO_FP <string OpStr, SDNode OpNode> { -// FIXME: need to add f16 support -// def CVTf16i8 : -// NVPTXInst<(outs Float16Regs:$d), (ins Int8Regs:$a), -// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "8 \t$d, $a;"), -// [(set Float16Regs:$d, (OpNode Int8Regs:$a))]>; -// def CVTf16i16 : -// NVPTXInst<(outs Float16Regs:$d), (ins Int16Regs:$a), -// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "16 \t$d, $a;"), -// [(set Float16Regs:$d, (OpNode Int16Regs:$a))]>; -// def CVTf16i32 : -// NVPTXInst<(outs Float16Regs:$d), (ins Int32Regs:$a), -// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "32 \t$d, $a;"), -// [(set Float16Regs:$d, (OpNode Int32Regs:$a))]>; -// def CVTf16i64: -// NVPTXInst<(outs Float16Regs:$d), (ins Int64Regs:$a), -// !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"), -// [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>; - - def CVTf32i1 : - NVPTXInst<(outs Float32Regs:$d), (ins Int1Regs:$a), - "selp.f32 \t$d, 1.0, 0.0, $a;", - [(set Float32Regs:$d, (OpNode Int1Regs:$a))]>; - def CVTf32i8 : - NVPTXInst<(outs Float32Regs:$d), (ins Int8Regs:$a), - !strconcat(!strconcat("cvt.rn.f32.", OpStr), "8 \t$d, $a;"), - [(set Float32Regs:$d, (OpNode Int8Regs:$a))]>; - def CVTf32i16 : - NVPTXInst<(outs Float32Regs:$d), (ins Int16Regs:$a), - !strconcat(!strconcat("cvt.rn.f32.", OpStr), "16 \t$d, $a;"), - [(set Float32Regs:$d, (OpNode Int16Regs:$a))]>; - def CVTf32i32 : - NVPTXInst<(outs Float32Regs:$d), (ins Int32Regs:$a), - !strconcat(!strconcat("cvt.rn.f32.", OpStr), "32 \t$d, $a;"), - [(set Float32Regs:$d, (OpNode Int32Regs:$a))]>; - def CVTf32i64: - NVPTXInst<(outs Float32Regs:$d), (ins Int64Regs:$a), - !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"), - [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>; - - def CVTf64i1 : - NVPTXInst<(outs Float64Regs:$d), (ins Int1Regs:$a), - "selp.f64 \t$d, 1.0, 0.0, $a;", - [(set Float64Regs:$d, (OpNode Int1Regs:$a))]>; - def CVTf64i8 : - NVPTXInst<(outs Float64Regs:$d), (ins Int8Regs:$a), - !strconcat(!strconcat("cvt.rn.f64.", OpStr), "8 \t$d, $a;"), - [(set Float64Regs:$d, (OpNode Int8Regs:$a))]>; - def CVTf64i16 : - NVPTXInst<(outs Float64Regs:$d), (ins Int16Regs:$a), - !strconcat(!strconcat("cvt.rn.f64.", OpStr), "16 \t$d, $a;"), - [(set Float64Regs:$d, (OpNode Int16Regs:$a))]>; - def CVTf64i32 : - NVPTXInst<(outs Float64Regs:$d), (ins Int32Regs:$a), - !strconcat(!strconcat("cvt.rn.f64.", OpStr), "32 \t$d, $a;"), - [(set Float64Regs:$d, (OpNode Int32Regs:$a))]>; - def CVTf64i64: - NVPTXInst<(outs Float64Regs:$d), (ins Int64Regs:$a), - !strconcat(!strconcat("cvt.rn.f64.", OpStr), "64 \t$d, $a;"), - [(set Float64Regs:$d, (OpNode Int64Regs:$a))]>; -} - -defm Sint_to_fp : CVT_INT_TO_FP <"s", sint_to_fp>; -defm Uint_to_fp : CVT_INT_TO_FP <"u", uint_to_fp>; - -multiclass CVT_FP_TO_INT <string OpStr, SDNode OpNode> { -// FIXME: need to add f16 support -// def CVTi8f16: -// NVPTXInst<(outs Int8Regs:$d), (ins Float16Regs:$a), -// !strconcat(!strconcat("cvt.rzi.", OpStr), "8.f16 $d, $a;"), -// [(set Int8Regs:$d, (OpNode Float16Regs:$a))]>; - def CVTi8f32_ftz: - NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"), - [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>; - def CVTi8f32: - NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"), - [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>; - def CVTi8f64: - NVPTXInst<(outs Int8Regs:$d), (ins Float64Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"), - [(set Int8Regs:$d, (OpNode Float64Regs:$a))]>; - -// FIXME: need to add f16 support -// def CVTi16f16: -// NVPTXInst<(outs Int16Regs:$d), (ins Float16Regs:$a), -// !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f16 \t$d, $a;"), -// [(set Int16Regs:$d, (OpNode Float16Regs:$a))]>; - def CVTi16f32_ftz: - NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"), - [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>; - def CVTi16f32: - NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"), - [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>; - def CVTi16f64: - NVPTXInst<(outs Int16Regs:$d), (ins Float64Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"), - [(set Int16Regs:$d, (OpNode Float64Regs:$a))]>; - -// FIXME: need to add f16 support -// def CVTi32f16: def CVTi32f16: -// NVPTXInst<(outs Int32Regs:$d), (ins Float16Regs:$a), -// !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f16 \t$d, $a;"), -// [(set Int32Regs:$d, (OpNode Float16Regs:$a))]>; - def CVTi32f32_ftz: - NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "32.f32 \t$d, $a;"), - [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>; - def CVTi32f32: - NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f32 \t$d, $a;"), - [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>; - def CVTi32f64: - NVPTXInst<(outs Int32Regs:$d), (ins Float64Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f64 \t$d, $a;"), - [(set Int32Regs:$d, (OpNode Float64Regs:$a))]>; - -// FIXME: need to add f16 support -// def CVTi64f16: -// NVPTXInst<(outs Int64Regs:$d), (ins Float16Regs:$a), -// !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f16 \t$d, $a;"), -// [(set Int64Regs:$d, (OpNode Float16Regs:$a))]>; - def CVTi64f32_ftz: - NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "64.f32 \t$d, $a;"), - [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>; - def CVTi64f32: - NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f32 \t$d, $a;"), - [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>; - def CVTi64f64: - NVPTXInst<(outs Int64Regs:$d), (ins Float64Regs:$a), - !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f64 \t$d, $a;"), - [(set Int64Regs:$d, (OpNode Float64Regs:$a))]>; -} - -defm Fp_to_sint : CVT_FP_TO_INT <"s", fp_to_sint>; -defm Fp_to_uint : CVT_FP_TO_INT <"u", fp_to_uint>; - -multiclass INT_EXTEND_UNSIGNED_1 <SDNode OpNode> { - def ext1to8: - NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a), - "selp.u16 \t$d, 1, 0, $a;", - [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to16: - NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a), - "selp.u16 \t$d, 1, 0, $a;", - [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to32: - NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a), - "selp.u32 \t$d, 1, 0, $a;", - [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to64: - NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a), - "selp.u64 \t$d, 1, 0, $a;", - [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>; -} - -multiclass INT_EXTEND_SIGNED_1 <SDNode OpNode> { - def ext1to8: - NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a), - "selp.s16 \t$d, -1, 0, $a;", - [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to16: - NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a), - "selp.s16 \t$d, -1, 0, $a;", - [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to32: - NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a), - "selp.s32 \t$d, -1, 0, $a;", - [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>; - def ext1to64: - NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a), - "selp.s64 \t$d, -1, 0, $a;", - [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>; -} - -multiclass INT_EXTEND <string OpStr, SDNode OpNode> { - // All Int8Regs are emiited as 16bit registers in ptx. - // And there is no selp.u8 in ptx. - def ext8to16: - NVPTXInst<(outs Int16Regs:$d), (ins Int8Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("16.", - !strconcat(OpStr, "8 \t$d, $a;")))), - [(set Int16Regs:$d, (OpNode Int8Regs:$a))]>; - def ext8to32: - NVPTXInst<(outs Int32Regs:$d), (ins Int8Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.", - !strconcat(OpStr, "8 \t$d, $a;")))), - [(set Int32Regs:$d, (OpNode Int8Regs:$a))]>; - def ext8to64: - NVPTXInst<(outs Int64Regs:$d), (ins Int8Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.", - !strconcat(OpStr, "8 \t$d, $a;")))), - [(set Int64Regs:$d, (OpNode Int8Regs:$a))]>; - def ext16to32: - NVPTXInst<(outs Int32Regs:$d), (ins Int16Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.", - !strconcat(OpStr, "16 \t$d, $a;")))), - [(set Int32Regs:$d, (OpNode Int16Regs:$a))]>; - def ext16to64: - NVPTXInst<(outs Int64Regs:$d), (ins Int16Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.", - !strconcat(OpStr, "16 \t$d, $a;")))), - [(set Int64Regs:$d, (OpNode Int16Regs:$a))]>; - def ext32to64: - NVPTXInst<(outs Int64Regs:$d), (ins Int32Regs:$a), - !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.", - !strconcat(OpStr, "32 \t$d, $a;")))), - [(set Int64Regs:$d, (OpNode Int32Regs:$a))]>; -} - -defm Sint_extend_1 : INT_EXTEND_SIGNED_1<sext>; -defm Zint_extend_1 : INT_EXTEND_UNSIGNED_1<zext>; -defm Aint_extend_1 : INT_EXTEND_UNSIGNED_1<anyext>; - -defm Sint_extend : INT_EXTEND <"s", sext>; -defm Zint_extend : INT_EXTEND <"u", zext>; -defm Aint_extend : INT_EXTEND <"u", anyext>; - -class TRUNC_to1_asm<string sz> { - string s = !strconcat("{{\n\t", - !strconcat(".reg ", - !strconcat(sz, - !strconcat(" temp;\n\t", - !strconcat("and", - !strconcat(sz, - !strconcat("\t temp, $a, 1;\n\t", - !strconcat("setp", - !strconcat(sz, ".eq \t $d, temp, 1;\n\t}}"))))))))); -} - -def TRUNC_64to32 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a), - "cvt.u32.u64 \t$d, $a;", - [(set Int32Regs:$d, (trunc Int64Regs:$a))]>; -def TRUNC_64to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int64Regs:$a), - "cvt.u16.u64 \t$d, $a;", - [(set Int16Regs:$d, (trunc Int64Regs:$a))]>; -def TRUNC_64to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int64Regs:$a), - "cvt.u8.u64 \t$d, $a;", - [(set Int8Regs:$d, (trunc Int64Regs:$a))]>; -def TRUNC_32to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int32Regs:$a), - "cvt.u16.u32 \t$d, $a;", - [(set Int16Regs:$d, (trunc Int32Regs:$a))]>; -def TRUNC_32to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int32Regs:$a), - "cvt.u8.u32 \t$d, $a;", - [(set Int8Regs:$d, (trunc Int32Regs:$a))]>; -def TRUNC_16to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int16Regs:$a), - "cvt.u8.u16 \t$d, $a;", - [(set Int8Regs:$d, (trunc Int16Regs:$a))]>; -def TRUNC_64to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a), - TRUNC_to1_asm<".b64">.s, - [(set Int1Regs:$d, (trunc Int64Regs:$a))]>; -def TRUNC_32to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a), - TRUNC_to1_asm<".b32">.s, - [(set Int1Regs:$d, (trunc Int32Regs:$a))]>; -def TRUNC_16to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int16Regs:$a), - TRUNC_to1_asm<".b16">.s, - [(set Int1Regs:$d, (trunc Int16Regs:$a))]>; -def TRUNC_8to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int8Regs:$a), - TRUNC_to1_asm<".b16">.s, - [(set Int1Regs:$d, (trunc Int8Regs:$a))]>; - -// Select instructions -def : Pat<(select Int32Regs:$pred, Int8Regs:$a, Int8Regs:$b), - (SELECTi8rr Int8Regs:$a, Int8Regs:$b, (TRUNC_32to1 Int32Regs:$pred))>; -def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b), - (SELECTi16rr Int16Regs:$a, Int16Regs:$b, - (TRUNC_32to1 Int32Regs:$pred))>; -def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b), - (SELECTi32rr Int32Regs:$a, Int32Regs:$b, - (TRUNC_32to1 Int32Regs:$pred))>; -def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b), - (SELECTi64rr Int64Regs:$a, Int64Regs:$b, - (TRUNC_32to1 Int32Regs:$pred))>; -def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b), - (SELECTf32rr Float32Regs:$a, Float32Regs:$b, - (TRUNC_32to1 Int32Regs:$pred))>; -def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b), - (SELECTf64rr Float64Regs:$a, Float64Regs:$b, - (TRUNC_32to1 Int32Regs:$pred))>; - class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn, NVPTXRegClass regclassOut> : NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a), @@ -2696,29 +2219,209 @@ def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>; def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>; def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>; +// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where +// we cannot specify floating-point literals in isel patterns. Therefore, we +// use an integer selp to select either 1 or 0 and then cvt to floating-point. + +// sint -> f32 +def : Pat<(f32 (sint_to_fp Int1Regs:$a)), + (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>; +def : Pat<(f32 (sint_to_fp Int16Regs:$a)), + (CVT_f32_s16 Int16Regs:$a, CvtRN)>; +def : Pat<(f32 (sint_to_fp Int32Regs:$a)), + (CVT_f32_s32 Int32Regs:$a, CvtRN)>; +def : Pat<(f32 (sint_to_fp Int64Regs:$a)), + (CVT_f32_s64 Int64Regs:$a, CvtRN)>; + +// uint -> f32 +def : Pat<(f32 (uint_to_fp Int1Regs:$a)), + (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>; +def : Pat<(f32 (uint_to_fp Int16Regs:$a)), + (CVT_f32_u16 Int16Regs:$a, CvtRN)>; +def : Pat<(f32 (uint_to_fp Int32Regs:$a)), + (CVT_f32_u32 Int32Regs:$a, CvtRN)>; +def : Pat<(f32 (uint_to_fp Int64Regs:$a)), + (CVT_f32_u64 Int64Regs:$a, CvtRN)>; + +// sint -> f64 +def : Pat<(f64 (sint_to_fp Int1Regs:$a)), + (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>; +def : Pat<(f64 (sint_to_fp Int16Regs:$a)), + (CVT_f64_s16 Int16Regs:$a, CvtRN)>; +def : Pat<(f64 (sint_to_fp Int32Regs:$a)), + (CVT_f64_s32 Int32Regs:$a, CvtRN)>; +def : Pat<(f64 (sint_to_fp Int64Regs:$a)), + (CVT_f64_s64 Int64Regs:$a, CvtRN)>; + +// uint -> f64 +def : Pat<(f64 (uint_to_fp Int1Regs:$a)), + (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>; +def : Pat<(f64 (uint_to_fp Int16Regs:$a)), + (CVT_f64_u16 Int16Regs:$a, CvtRN)>; +def : Pat<(f64 (uint_to_fp Int32Regs:$a)), + (CVT_f64_u32 Int32Regs:$a, CvtRN)>; +def : Pat<(f64 (uint_to_fp Int64Regs:$a)), + (CVT_f64_u64 Int64Regs:$a, CvtRN)>; + + +// f32 -> sint +def : Pat<(i1 (fp_to_sint Float32Regs:$a)), + (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>; +def : Pat<(i16 (fp_to_sint Float32Regs:$a)), + (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i16 (fp_to_sint Float32Regs:$a)), + (CVT_s16_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(i32 (fp_to_sint Float32Regs:$a)), + (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i32 (fp_to_sint Float32Regs:$a)), + (CVT_s32_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(i64 (fp_to_sint Float32Regs:$a)), + (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i64 (fp_to_sint Float32Regs:$a)), + (CVT_s64_f32 Float32Regs:$a, CvtRZI)>; + +// f32 -> uint +def : Pat<(i1 (fp_to_uint Float32Regs:$a)), + (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>; +def : Pat<(i16 (fp_to_uint Float32Regs:$a)), + (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i16 (fp_to_uint Float32Regs:$a)), + (CVT_u16_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(i32 (fp_to_uint Float32Regs:$a)), + (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i32 (fp_to_uint Float32Regs:$a)), + (CVT_u32_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(i64 (fp_to_uint Float32Regs:$a)), + (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i64 (fp_to_uint Float32Regs:$a)), + (CVT_u64_f32 Float32Regs:$a, CvtRZI)>; + +// f64 -> sint +def : Pat<(i1 (fp_to_sint Float64Regs:$a)), + (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>; +def : Pat<(i16 (fp_to_sint Float64Regs:$a)), + (CVT_s16_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(i32 (fp_to_sint Float64Regs:$a)), + (CVT_s32_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(i64 (fp_to_sint Float64Regs:$a)), + (CVT_s64_f64 Float64Regs:$a, CvtRZI)>; + +// f64 -> uint +def : Pat<(i1 (fp_to_uint Float64Regs:$a)), + (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>; +def : Pat<(i16 (fp_to_uint Float64Regs:$a)), + (CVT_u16_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(i32 (fp_to_uint Float64Regs:$a)), + (CVT_u32_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(i64 (fp_to_uint Float64Regs:$a)), + (CVT_u64_f64 Float64Regs:$a, CvtRZI)>; + +// sext i1 +def : Pat<(i16 (sext Int1Regs:$a)), + (SELP_s16ii -1, 0, Int1Regs:$a)>; +def : Pat<(i32 (sext Int1Regs:$a)), + (SELP_s32ii -1, 0, Int1Regs:$a)>; +def : Pat<(i64 (sext Int1Regs:$a)), + (SELP_s64ii -1, 0, Int1Regs:$a)>; + +// zext i1 +def : Pat<(i16 (zext Int1Regs:$a)), + (SELP_u16ii 1, 0, Int1Regs:$a)>; +def : Pat<(i32 (zext Int1Regs:$a)), + (SELP_u32ii 1, 0, Int1Regs:$a)>; +def : Pat<(i64 (zext Int1Regs:$a)), + (SELP_u64ii 1, 0, Int1Regs:$a)>; + +// anyext i1 +def : Pat<(i16 (anyext Int1Regs:$a)), + (SELP_u16ii -1, 0, Int1Regs:$a)>; +def : Pat<(i32 (anyext Int1Regs:$a)), + (SELP_u32ii -1, 0, Int1Regs:$a)>; +def : Pat<(i64 (anyext Int1Regs:$a)), + (SELP_u64ii -1, 0, Int1Regs:$a)>; + +// sext i16 +def : Pat<(i32 (sext Int16Regs:$a)), + (CVT_s32_s16 Int16Regs:$a, CvtNONE)>; +def : Pat<(i64 (sext Int16Regs:$a)), + (CVT_s64_s16 Int16Regs:$a, CvtNONE)>; + +// zext i16 +def : Pat<(i32 (zext Int16Regs:$a)), + (CVT_u32_u16 Int16Regs:$a, CvtNONE)>; +def : Pat<(i64 (zext Int16Regs:$a)), + (CVT_u64_u16 Int16Regs:$a, CvtNONE)>; + +// anyext i16 +def : Pat<(i32 (anyext Int16Regs:$a)), + (CVT_u32_u16 Int16Regs:$a, CvtNONE)>; +def : Pat<(i64 (anyext Int16Regs:$a)), + (CVT_u64_u16 Int16Regs:$a, CvtNONE)>; + +// sext i32 +def : Pat<(i64 (sext Int32Regs:$a)), + (CVT_s64_s32 Int32Regs:$a, CvtNONE)>; + +// zext i32 +def : Pat<(i64 (zext Int32Regs:$a)), + (CVT_u64_u32 Int32Regs:$a, CvtNONE)>; + +// anyext i32 +def : Pat<(i64 (anyext Int32Regs:$a)), + (CVT_u64_u32 Int32Regs:$a, CvtNONE)>; + + +// truncate i64 +def : Pat<(i32 (trunc Int64Regs:$a)), + (CVT_u32_u64 Int64Regs:$a, CvtNONE)>; +def : Pat<(i16 (trunc Int64Regs:$a)), + (CVT_u16_u64 Int64Regs:$a, CvtNONE)>; +def : Pat<(i1 (trunc Int64Regs:$a)), + (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>; + +// truncate i32 +def : Pat<(i16 (trunc Int32Regs:$a)), + (CVT_u16_u32 Int32Regs:$a, CvtNONE)>; +def : Pat<(i1 (trunc Int32Regs:$a)), + (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>; + +// truncate i16 +def : Pat<(i1 (trunc Int16Regs:$a)), + (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>; + +// sext_inreg +def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>; +def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>; +def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>; +def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>; +def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>; +def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>; + + +// Select instructions with 32-bit predicates +def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b), + (SELP_b16rr Int16Regs:$a, Int16Regs:$b, + (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>; +def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b), + (SELP_b32rr Int32Regs:$a, Int32Regs:$b, + (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>; +def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b), + (SELP_b64rr Int64Regs:$a, Int64Regs:$b, + (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>; +def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b), + (SELP_f32rr Float32Regs:$a, Float32Regs:$b, + (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>; +def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b), + (SELP_f64rr Float64Regs:$a, Float64Regs:$b, + (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>; + + // pack a set of smaller int registers to a larger int register -def V4I8toI32 : NVPTXInst<(outs Int32Regs:$d), - (ins Int8Regs:$s1, Int8Regs:$s2, - Int8Regs:$s3, Int8Regs:$s4), - !strconcat("{{\n\t.reg .b8\t%t<4>;", - !strconcat("\n\tcvt.u8.u8\t%t0, $s1;", - !strconcat("\n\tcvt.u8.u8\t%t1, $s2;", - !strconcat("\n\tcvt.u8.u8\t%t2, $s3;", - !strconcat("\n\tcvt.u8.u8\t%t3, $s4;", - "\n\tmov.b32\t$d, {%t0, %t1, %t2, %t3};\n\t}}"))))), - []>; def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d), (ins Int16Regs:$s1, Int16Regs:$s2, Int16Regs:$s3, Int16Regs:$s4), "mov.b64\t$d, {{$s1, $s2, $s3, $s4}};", []>; -def V2I8toI16 : NVPTXInst<(outs Int16Regs:$d), - (ins Int8Regs:$s1, Int8Regs:$s2), - !strconcat("{{\n\t.reg .b8\t%t<2>;", - !strconcat("\n\tcvt.u8.u8\t%t0, $s1;", - !strconcat("\n\tcvt.u8.u8\t%t1, $s2;", - "\n\tmov.b16\t$d, {%t0, %t1};\n\t}}"))), - []>; def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d), (ins Int16Regs:$s1, Int16Regs:$s2), "mov.b32\t$d, {{$s1, $s2}};", @@ -2733,28 +2436,11 @@ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d), []>; // unpack a larger int register to a set of smaller int registers -def I32toV4I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2, - Int8Regs:$d3, Int8Regs:$d4), - (ins Int32Regs:$s), - !strconcat("{{\n\t.reg .b8\t%t<4>;", - !strconcat("\n\tmov.b32\t{%t0, %t1, %t2, %t3}, $s;", - !strconcat("\n\tcvt.u8.u8\t$d1, %t0;", - !strconcat("\n\tcvt.u8.u8\t$d2, %t1;", - !strconcat("\n\tcvt.u8.u8\t$d3, %t2;", - "\n\tcvt.u8.u8\t$d4, %t3;\n\t}}"))))), - []>; def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2, Int16Regs:$d3, Int16Regs:$d4), (ins Int64Regs:$s), "mov.b64\t{{$d1, $d2, $d3, $d4}}, $s;", []>; -def I16toV2I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2), - (ins Int16Regs:$s), - !strconcat("{{\n\t.reg .b8\t%t<2>;", - !strconcat("\n\tmov.b16\t{%t0, %t1}, $s;", - !strconcat("\n\tcvt.u8.u8\t$d1, %t0;", - "\n\tcvt.u8.u8\t$d2, %t1;\n\t}}"))), - []>; def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2), (ins Int32Regs:$s), "mov.b32\t{{$d1, $d2}}, $s;", @@ -2768,21 +2454,75 @@ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2), "mov.b64\t{{$d1, $d2}}, $s;", []>; -def FPRound_ftz : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a), - "cvt.rn.ftz.f32.f64 \t$d, $a;", - [(set Float32Regs:$d, (fround Float64Regs:$a))]>, Requires<[doF32FTZ]>; - -def FPRound : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a), - "cvt.rn.f32.f64 \t$d, $a;", - [(set Float32Regs:$d, (fround Float64Regs:$a))]>; - -def FPExtend_ftz : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a), - "cvt.ftz.f64.f32 \t$d, $a;", - [(set Float64Regs:$d, (fextend Float32Regs:$a))]>, Requires<[doF32FTZ]>; - -def FPExtend : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a), - "cvt.f64.f32 \t$d, $a;", - [(set Float64Regs:$d, (fextend Float32Regs:$a))]>; +// Count leading zeros +def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a), + "clz.b32\t$d, $a;", + []>; +def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a), + "clz.b64\t$d, $a;", + []>; + +// 32-bit has a direct PTX instruction +def : Pat<(ctlz Int32Regs:$a), + (CLZr32 Int32Regs:$a)>; +def : Pat<(ctlz_zero_undef Int32Regs:$a), + (CLZr32 Int32Regs:$a)>; + +// For 64-bit, the result in PTX is actually 32-bit so we zero-extend +// to 64-bit to match the LLVM semantics +def : Pat<(ctlz Int64Regs:$a), + (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>; +def : Pat<(ctlz_zero_undef Int64Regs:$a), + (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>; + +// For 16-bit, we zero-extend to 32-bit, then trunc the result back +// to 16-bits (ctlz of a 16-bit value is guaranteed to require less +// than 16 bits to store). We also need to subtract 16 because the +// high-order 16 zeros were counted. +def : Pat<(ctlz Int16Regs:$a), + (SUBi16ri (CVT_u16_u32 (CLZr32 + (CVT_u32_u16 Int16Regs:$a, CvtNONE)), + CvtNONE), 16)>; +def : Pat<(ctlz_zero_undef Int16Regs:$a), + (SUBi16ri (CVT_u16_u32 (CLZr32 + (CVT_u32_u16 Int16Regs:$a, CvtNONE)), + CvtNONE), 16)>; + +// Population count +def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a), + "popc.b32\t$d, $a;", + []>; +def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a), + "popc.b64\t$d, $a;", + []>; + +// 32-bit has a direct PTX instruction +def : Pat<(ctpop Int32Regs:$a), + (POPCr32 Int32Regs:$a)>; + +// For 64-bit, the result in PTX is actually 32-bit so we zero-extend +// to 64-bit to match the LLVM semantics +def : Pat<(ctpop Int64Regs:$a), + (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>; + +// For 16-bit, we zero-extend to 32-bit, then trunc the result back +// to 16-bits (ctpop of a 16-bit value is guaranteed to require less +// than 16 bits to store) +def : Pat<(ctpop Int16Regs:$a), + (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), + CvtNONE)>; + +// fround f64 -> f32 +def : Pat<(f32 (fround Float64Regs:$a)), + (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(f32 (fround Float64Regs:$a)), + (CVT_f32_f64 Float64Regs:$a, CvtRN)>; + +// fextend f32 -> f64 +def : Pat<(f64 (fextend Float32Regs:$a)), + (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(f64 (fextend Float32Regs:$a)), + (CVT_f64_f32 Float32Regs:$a, CvtNONE)>; def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue]>; @@ -2810,8 +2550,8 @@ let isTerminator=1 in { [(br bb:$target)]>; } -def : Pat<(brcond Int32Regs:$a, bb:$target), (CBranch - (ISetUNEi32ri_p Int32Regs:$a, 0), bb:$target)>; +def : Pat<(brcond Int32Regs:$a, bb:$target), + (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>; // SelectionDAGBuilder::visitSWitchCase() will invert the condition of a // conditional branch if diff --git a/lib/Target/NVPTX/NVPTXIntrinsics.td b/lib/Target/NVPTX/NVPTXIntrinsics.td index 24037ca..14049b1 100644 --- a/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -82,49 +82,36 @@ def INT_MEMBAR_SYS : MEMBAR<"membar.sys;", int_nvvm_membar_sys>; //----------------------------------- // Map min(1.0, max(0.0, x)) to sat(x) -multiclass SAT<NVPTXRegClass regclass, Operand fimm, Intrinsic IntMinOp, - Intrinsic IntMaxOp, PatLeaf f0, PatLeaf f1, string OpStr> { - - // fmin(1.0, fmax(0.0, x)) => sat(x) - def SAT11 : NVPTXInst<(outs regclass:$dst), - (ins fimm:$srcf0, fimm:$srcf1, regclass:$src), - OpStr, - [(set regclass:$dst, (IntMinOp f1:$srcf0 , - (IntMaxOp f0:$srcf1, regclass:$src)))]>; - - // fmin(1.0, fmax(x, 0.0)) => sat(x) - def SAT12 : NVPTXInst<(outs regclass:$dst), - (ins fimm:$srcf0, fimm:$srcf1, regclass:$src), - OpStr, - [(set regclass:$dst, (IntMinOp f1:$srcf0 , - (IntMaxOp regclass:$src, f0:$srcf1)))]>; - - // fmin(fmax(0.0, x), 1.0) => sat(x) - def SAT13 : NVPTXInst<(outs regclass:$dst), - (ins fimm:$srcf0, fimm:$srcf1, regclass:$src), - OpStr, - [(set regclass:$dst, (IntMinOp - (IntMaxOp f0:$srcf0, regclass:$src), f1:$srcf1))]>; - - // fmin(fmax(x, 0.0), 1.0) => sat(x) - def SAT14 : NVPTXInst<(outs regclass:$dst), - (ins fimm:$srcf0, fimm:$srcf1, regclass:$src), - OpStr, - [(set regclass:$dst, (IntMinOp - (IntMaxOp regclass:$src, f0:$srcf0), f1:$srcf1))]>; - -} -// Note that max(0.0, min(x, 1.0)) cannot be mapped to sat(x) because when x -// is NaN +// Note that max(0.0, min(x, 1.0)) cannot be mapped to sat(x) because when x is +// NaN // max(0.0, min(x, 1.0)) is 1.0 while sat(x) is 0. // Same story for fmax, fmin. -defm SAT_fmin_fmax_f : SAT<Float32Regs, f32imm, int_nvvm_fmin_f, - int_nvvm_fmax_f, immFloat0, immFloat1, - "cvt.sat.f32.f32 \t$dst, $src; \n">; -defm SAT_fmin_fmax_d : SAT<Float64Regs, f64imm, int_nvvm_fmin_d, - int_nvvm_fmax_d, immDouble0, immDouble1, - "cvt.sat.f64.f64 \t$dst, $src; \n">; +def : Pat<(int_nvvm_fmin_f immFloat1, + (int_nvvm_fmax_f immFloat0, Float32Regs:$a)), + (CVT_f32_f32 Float32Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_f immFloat1, + (int_nvvm_fmax_f Float32Regs:$a, immFloat0)), + (CVT_f32_f32 Float32Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_f + (int_nvvm_fmax_f immFloat0, Float32Regs:$a), immFloat1), + (CVT_f32_f32 Float32Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_f + (int_nvvm_fmax_f Float32Regs:$a, immFloat0), immFloat1), + (CVT_f32_f32 Float32Regs:$a, CvtSAT)>; + +def : Pat<(int_nvvm_fmin_d immDouble1, + (int_nvvm_fmax_d immDouble0, Float64Regs:$a)), + (CVT_f64_f64 Float64Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_d immDouble1, + (int_nvvm_fmax_d Float64Regs:$a, immDouble0)), + (CVT_f64_f64 Float64Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_d + (int_nvvm_fmax_d immDouble0, Float64Regs:$a), immDouble1), + (CVT_f64_f64 Float64Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_fmin_d + (int_nvvm_fmax_d Float64Regs:$a, immDouble0), immDouble1), + (CVT_f64_f64 Float64Regs:$a, CvtSAT)>; // We need a full string for OpcStr here because we need to deal with case like @@ -312,19 +299,19 @@ def INT_NVVM_SAD_UI : F_MATH_3<"sad.u32 \t$dst, $src0, $src1, $src2;", // Floor Ceil // -def INT_NVVM_FLOOR_FTZ_F : F_MATH_1<"cvt.rmi.ftz.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_floor_ftz_f>; -def INT_NVVM_FLOOR_F : F_MATH_1<"cvt.rmi.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_floor_f>; -def INT_NVVM_FLOOR_D : F_MATH_1<"cvt.rmi.f64.f64 \t$dst, $src0;", - Float64Regs, Float64Regs, int_nvvm_floor_d>; +def : Pat<(int_nvvm_floor_ftz_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>; +def : Pat<(int_nvvm_floor_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_floor_d Float64Regs:$a), + (CVT_f64_f64 Float64Regs:$a, CvtRMI)>; -def INT_NVVM_CEIL_FTZ_F : F_MATH_1<"cvt.rpi.ftz.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_ceil_ftz_f>; -def INT_NVVM_CEIL_F : F_MATH_1<"cvt.rpi.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_ceil_f>; -def INT_NVVM_CEIL_D : F_MATH_1<"cvt.rpi.f64.f64 \t$dst, $src0;", - Float64Regs, Float64Regs, int_nvvm_ceil_d>; +def : Pat<(int_nvvm_ceil_ftz_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>; +def : Pat<(int_nvvm_ceil_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRPI)>; +def : Pat<(int_nvvm_ceil_d Float64Regs:$a), + (CVT_f64_f64 Float64Regs:$a, CvtRPI)>; // // Abs @@ -347,37 +334,34 @@ def INT_NVVM_FABS_D : F_MATH_1<"abs.f64 \t$dst, $src0;", Float64Regs, // Round // -def INT_NVVM_ROUND_FTZ_F : F_MATH_1<"cvt.rni.ftz.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_round_ftz_f>; -def INT_NVVM_ROUND_F : F_MATH_1<"cvt.rni.f32.f32 \t$dst, $src0;", Float32Regs, - Float32Regs, int_nvvm_round_f>; - -def INT_NVVM_ROUND_D : F_MATH_1<"cvt.rni.f64.f64 \t$dst, $src0;", Float64Regs, - Float64Regs, int_nvvm_round_d>; +def : Pat<(int_nvvm_round_ftz_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>; +def : Pat<(int_nvvm_round_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_round_d Float64Regs:$a), + (CVT_f64_f64 Float64Regs:$a, CvtRNI)>; // // Trunc // -def INT_NVVM_TRUNC_FTZ_F : F_MATH_1<"cvt.rzi.ftz.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_trunc_ftz_f>; -def INT_NVVM_TRUNC_F : F_MATH_1<"cvt.rzi.f32.f32 \t$dst, $src0;", Float32Regs, - Float32Regs, int_nvvm_trunc_f>; - -def INT_NVVM_TRUNC_D : F_MATH_1<"cvt.rzi.f64.f64 \t$dst, $src0;", Float64Regs, - Float64Regs, int_nvvm_trunc_d>; +def : Pat<(int_nvvm_trunc_ftz_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>; +def : Pat<(int_nvvm_trunc_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_trunc_d Float64Regs:$a), + (CVT_f64_f64 Float64Regs:$a, CvtRZI)>; // // Saturate // -def INT_NVVM_SATURATE_FTZ_F : F_MATH_1<"cvt.sat.ftz.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_saturate_ftz_f>; -def INT_NVVM_SATURATE_F : F_MATH_1<"cvt.sat.f32.f32 \t$dst, $src0;", - Float32Regs, Float32Regs, int_nvvm_saturate_f>; - -def INT_NVVM_SATURATE_D : F_MATH_1<"cvt.sat.f64.f64 \t$dst, $src0;", - Float64Regs, Float64Regs, int_nvvm_saturate_d>; +def : Pat<(int_nvvm_saturate_ftz_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtSAT_FTZ)>; +def : Pat<(int_nvvm_saturate_f Float32Regs:$a), + (CVT_f32_f32 Float32Regs:$a, CvtSAT)>; +def : Pat<(int_nvvm_saturate_d Float64Regs:$a), + (CVT_f64_f64 Float64Regs:$a, CvtSAT)>; // // Exp2 Log2 @@ -568,110 +552,110 @@ def INT_NVVM_ADD_RP_D : F_MATH_2<"add.rp.f64 \t$dst, $src0, $src1;", // Convert // -def INT_NVVM_D2F_RN_FTZ : F_MATH_1<"cvt.rn.ftz.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rn_ftz>; -def INT_NVVM_D2F_RN : F_MATH_1<"cvt.rn.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rn>; -def INT_NVVM_D2F_RZ_FTZ : F_MATH_1<"cvt.rz.ftz.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rz_ftz>; -def INT_NVVM_D2F_RZ : F_MATH_1<"cvt.rz.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rz>; -def INT_NVVM_D2F_RM_FTZ : F_MATH_1<"cvt.rm.ftz.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rm_ftz>; -def INT_NVVM_D2F_RM : F_MATH_1<"cvt.rm.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rm>; -def INT_NVVM_D2F_RP_FTZ : F_MATH_1<"cvt.rp.ftz.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rp_ftz>; -def INT_NVVM_D2F_RP : F_MATH_1<"cvt.rp.f32.f64 \t$dst, $src0;", - Float32Regs, Float64Regs, int_nvvm_d2f_rp>; - -def INT_NVVM_D2I_RN : F_MATH_1<"cvt.rni.s32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2i_rn>; -def INT_NVVM_D2I_RZ : F_MATH_1<"cvt.rzi.s32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2i_rz>; -def INT_NVVM_D2I_RM : F_MATH_1<"cvt.rmi.s32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2i_rm>; -def INT_NVVM_D2I_RP : F_MATH_1<"cvt.rpi.s32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2i_rp>; - -def INT_NVVM_D2UI_RN : F_MATH_1<"cvt.rni.u32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2ui_rn>; -def INT_NVVM_D2UI_RZ : F_MATH_1<"cvt.rzi.u32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2ui_rz>; -def INT_NVVM_D2UI_RM : F_MATH_1<"cvt.rmi.u32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2ui_rm>; -def INT_NVVM_D2UI_RP : F_MATH_1<"cvt.rpi.u32.f64 \t$dst, $src0;", - Int32Regs, Float64Regs, int_nvvm_d2ui_rp>; - -def INT_NVVM_I2D_RN : F_MATH_1<"cvt.rn.f64.s32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_i2d_rn>; -def INT_NVVM_I2D_RZ : F_MATH_1<"cvt.rz.f64.s32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_i2d_rz>; -def INT_NVVM_I2D_RM : F_MATH_1<"cvt.rm.f64.s32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_i2d_rm>; -def INT_NVVM_I2D_RP : F_MATH_1<"cvt.rp.f64.s32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_i2d_rp>; - -def INT_NVVM_UI2D_RN : F_MATH_1<"cvt.rn.f64.u32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_ui2d_rn>; -def INT_NVVM_UI2D_RZ : F_MATH_1<"cvt.rz.f64.u32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_ui2d_rz>; -def INT_NVVM_UI2D_RM : F_MATH_1<"cvt.rm.f64.u32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_ui2d_rm>; -def INT_NVVM_UI2D_RP : F_MATH_1<"cvt.rp.f64.u32 \t$dst, $src0;", - Float64Regs, Int32Regs, int_nvvm_ui2d_rp>; - -def INT_NVVM_F2I_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2i_rn_ftz>; -def INT_NVVM_F2I_RN : F_MATH_1<"cvt.rni.s32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2i_rn>; -def INT_NVVM_F2I_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2i_rz_ftz>; -def INT_NVVM_F2I_RZ : F_MATH_1<"cvt.rzi.s32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2i_rz>; -def INT_NVVM_F2I_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2i_rm_ftz>; -def INT_NVVM_F2I_RM : F_MATH_1<"cvt.rmi.s32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2i_rm>; -def INT_NVVM_F2I_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2i_rp_ftz>; -def INT_NVVM_F2I_RP : F_MATH_1<"cvt.rpi.s32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2i_rp>; - -def INT_NVVM_F2UI_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2ui_rn_ftz>; -def INT_NVVM_F2UI_RN : F_MATH_1<"cvt.rni.u32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2ui_rn>; -def INT_NVVM_F2UI_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2ui_rz_ftz>; -def INT_NVVM_F2UI_RZ : F_MATH_1<"cvt.rzi.u32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2ui_rz>; -def INT_NVVM_F2UI_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2ui_rm_ftz>; -def INT_NVVM_F2UI_RM : F_MATH_1<"cvt.rmi.u32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2ui_rm>; -def INT_NVVM_F2UI_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u32.f32 \t$dst, $src0;", - Int32Regs, Float32Regs, int_nvvm_f2ui_rp_ftz>; -def INT_NVVM_F2UI_RP : F_MATH_1<"cvt.rpi.u32.f32 \t$dst, $src0;", Int32Regs, - Float32Regs, int_nvvm_f2ui_rp>; - -def INT_NVVM_I2F_RN : F_MATH_1<"cvt.rn.f32.s32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_i2f_rn>; -def INT_NVVM_I2F_RZ : F_MATH_1<"cvt.rz.f32.s32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_i2f_rz>; -def INT_NVVM_I2F_RM : F_MATH_1<"cvt.rm.f32.s32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_i2f_rm>; -def INT_NVVM_I2F_RP : F_MATH_1<"cvt.rp.f32.s32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_i2f_rp>; - -def INT_NVVM_UI2F_RN : F_MATH_1<"cvt.rn.f32.u32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_ui2f_rn>; -def INT_NVVM_UI2F_RZ : F_MATH_1<"cvt.rz.f32.u32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_ui2f_rz>; -def INT_NVVM_UI2F_RM : F_MATH_1<"cvt.rm.f32.u32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_ui2f_rm>; -def INT_NVVM_UI2F_RP : F_MATH_1<"cvt.rp.f32.u32 \t$dst, $src0;", Float32Regs, - Int32Regs, int_nvvm_ui2f_rp>; +def : Pat<(int_nvvm_d2f_rn_ftz Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>; +def : Pat<(int_nvvm_d2f_rn Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_d2f_rz_ftz Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRZ_FTZ)>; +def : Pat<(int_nvvm_d2f_rz Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_d2f_rm_ftz Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRM_FTZ)>; +def : Pat<(int_nvvm_d2f_rm Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_d2f_rp_ftz Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRP_FTZ)>; +def : Pat<(int_nvvm_d2f_rp Float64Regs:$a), + (CVT_f32_f64 Float64Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_d2i_rn Float64Regs:$a), + (CVT_s32_f64 Float64Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_d2i_rz Float64Regs:$a), + (CVT_s32_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_d2i_rm Float64Regs:$a), + (CVT_s32_f64 Float64Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_d2i_rp Float64Regs:$a), + (CVT_s32_f64 Float64Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_d2ui_rn Float64Regs:$a), + (CVT_u32_f64 Float64Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_d2ui_rz Float64Regs:$a), + (CVT_u32_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_d2ui_rm Float64Regs:$a), + (CVT_u32_f64 Float64Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_d2ui_rp Float64Regs:$a), + (CVT_u32_f64 Float64Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_i2d_rn Int32Regs:$a), + (CVT_f64_s32 Int32Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_i2d_rz Int32Regs:$a), + (CVT_f64_s32 Int32Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_i2d_rm Int32Regs:$a), + (CVT_f64_s32 Int32Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_i2d_rp Int32Regs:$a), + (CVT_f64_s32 Int32Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_ui2d_rn Int32Regs:$a), + (CVT_f64_u32 Int32Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ui2d_rz Int32Regs:$a), + (CVT_f64_u32 Int32Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ui2d_rm Int32Regs:$a), + (CVT_f64_u32 Int32Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ui2d_rp Int32Regs:$a), + (CVT_f64_u32 Int32Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_f2i_rn_ftz Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRNI_FTZ)>; +def : Pat<(int_nvvm_f2i_rn Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_f2i_rz_ftz Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>; +def : Pat<(int_nvvm_f2i_rz Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_f2i_rm_ftz Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRMI_FTZ)>; +def : Pat<(int_nvvm_f2i_rm Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_f2i_rp_ftz Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRPI_FTZ)>; +def : Pat<(int_nvvm_f2i_rp Float32Regs:$a), + (CVT_s32_f32 Float32Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_f2ui_rn_ftz Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRNI_FTZ)>; +def : Pat<(int_nvvm_f2ui_rn Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_f2ui_rz_ftz Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>; +def : Pat<(int_nvvm_f2ui_rz Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_f2ui_rm_ftz Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRMI_FTZ)>; +def : Pat<(int_nvvm_f2ui_rm Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_f2ui_rp_ftz Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRPI_FTZ)>; +def : Pat<(int_nvvm_f2ui_rp Float32Regs:$a), + (CVT_u32_f32 Float32Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_i2f_rn Int32Regs:$a), + (CVT_f32_s32 Int32Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_i2f_rz Int32Regs:$a), + (CVT_f32_s32 Int32Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_i2f_rm Int32Regs:$a), + (CVT_f32_s32 Int32Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_i2f_rp Int32Regs:$a), + (CVT_f32_s32 Int32Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_ui2f_rn Int32Regs:$a), + (CVT_f32_u32 Int32Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ui2f_rz Int32Regs:$a), + (CVT_f32_u32 Int32Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ui2f_rm Int32Regs:$a), + (CVT_f32_u32 Int32Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ui2f_rp Int32Regs:$a), + (CVT_f32_u32 Int32Regs:$a, CvtRP)>; def INT_NVVM_LOHI_I2D : F_MATH_2<"mov.b64 \t$dst, {{$src0, $src1}};", Float64Regs, Int32Regs, Int32Regs, int_nvvm_lohi_i2d>; @@ -687,91 +671,106 @@ def INT_NVVM_D2I_HI : F_MATH_1<!strconcat("{{\n\t", "}}"))), Int32Regs, Float64Regs, int_nvvm_d2i_hi>; -def INT_NVVM_F2LL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ll_rn_ftz>; -def INT_NVVM_F2LL_RN : F_MATH_1<"cvt.rni.s64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ll_rn>; -def INT_NVVM_F2LL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ll_rz_ftz>; -def INT_NVVM_F2LL_RZ : F_MATH_1<"cvt.rzi.s64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ll_rz>; -def INT_NVVM_F2LL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ll_rm_ftz>; -def INT_NVVM_F2LL_RM : F_MATH_1<"cvt.rmi.s64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ll_rm>; -def INT_NVVM_F2LL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ll_rp_ftz>; -def INT_NVVM_F2LL_RP : F_MATH_1<"cvt.rpi.s64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ll_rp>; - -def INT_NVVM_F2ULL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ull_rn_ftz>; -def INT_NVVM_F2ULL_RN : F_MATH_1<"cvt.rni.u64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ull_rn>; -def INT_NVVM_F2ULL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ull_rz_ftz>; -def INT_NVVM_F2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ull_rz>; -def INT_NVVM_F2ULL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ull_rm_ftz>; -def INT_NVVM_F2ULL_RM : F_MATH_1<"cvt.rmi.u64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ull_rm>; -def INT_NVVM_F2ULL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u64.f32 \t$dst, $src0;", - Int64Regs, Float32Regs, int_nvvm_f2ull_rp_ftz>; -def INT_NVVM_F2ULL_RP : F_MATH_1<"cvt.rpi.u64.f32 \t$dst, $src0;", Int64Regs, - Float32Regs, int_nvvm_f2ull_rp>; - -def INT_NVVM_D2LL_RN : F_MATH_1<"cvt.rni.s64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ll_rn>; -def INT_NVVM_D2LL_RZ : F_MATH_1<"cvt.rzi.s64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ll_rz>; -def INT_NVVM_D2LL_RM : F_MATH_1<"cvt.rmi.s64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ll_rm>; -def INT_NVVM_D2LL_RP : F_MATH_1<"cvt.rpi.s64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ll_rp>; - -def INT_NVVM_D2ULL_RN : F_MATH_1<"cvt.rni.u64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ull_rn>; -def INT_NVVM_D2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ull_rz>; -def INT_NVVM_D2ULL_RM : F_MATH_1<"cvt.rmi.u64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ull_rm>; -def INT_NVVM_D2ULL_RP : F_MATH_1<"cvt.rpi.u64.f64 \t$dst, $src0;", Int64Regs, - Float64Regs, int_nvvm_d2ull_rp>; - -def INT_NVVM_LL2F_RN : F_MATH_1<"cvt.rn.f32.s64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ll2f_rn>; -def INT_NVVM_LL2F_RZ : F_MATH_1<"cvt.rz.f32.s64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ll2f_rz>; -def INT_NVVM_LL2F_RM : F_MATH_1<"cvt.rm.f32.s64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ll2f_rm>; -def INT_NVVM_LL2F_RP : F_MATH_1<"cvt.rp.f32.s64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ll2f_rp>; -def INT_NVVM_ULL2F_RN : F_MATH_1<"cvt.rn.f32.u64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ull2f_rn>; -def INT_NVVM_ULL2F_RZ : F_MATH_1<"cvt.rz.f32.u64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ull2f_rz>; -def INT_NVVM_ULL2F_RM : F_MATH_1<"cvt.rm.f32.u64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ull2f_rm>; -def INT_NVVM_ULL2F_RP : F_MATH_1<"cvt.rp.f32.u64 \t$dst, $src0;", Float32Regs, - Int64Regs, int_nvvm_ull2f_rp>; - -def INT_NVVM_LL2D_RN : F_MATH_1<"cvt.rn.f64.s64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ll2d_rn>; -def INT_NVVM_LL2D_RZ : F_MATH_1<"cvt.rz.f64.s64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ll2d_rz>; -def INT_NVVM_LL2D_RM : F_MATH_1<"cvt.rm.f64.s64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ll2d_rm>; -def INT_NVVM_LL2D_RP : F_MATH_1<"cvt.rp.f64.s64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ll2d_rp>; -def INT_NVVM_ULL2D_RN : F_MATH_1<"cvt.rn.f64.u64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ull2d_rn>; -def INT_NVVM_ULL2D_RZ : F_MATH_1<"cvt.rz.f64.u64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ull2d_rz>; -def INT_NVVM_ULL2D_RM : F_MATH_1<"cvt.rm.f64.u64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ull2d_rm>; -def INT_NVVM_ULL2D_RP : F_MATH_1<"cvt.rp.f64.u64 \t$dst, $src0;", Float64Regs, - Int64Regs, int_nvvm_ull2d_rp>; +def : Pat<(int_nvvm_f2ll_rn_ftz Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRNI_FTZ)>; +def : Pat<(int_nvvm_f2ll_rn Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_f2ll_rz_ftz Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>; +def : Pat<(int_nvvm_f2ll_rz Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_f2ll_rm_ftz Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRMI_FTZ)>; +def : Pat<(int_nvvm_f2ll_rm Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_f2ll_rp_ftz Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRPI_FTZ)>; +def : Pat<(int_nvvm_f2ll_rp Float32Regs:$a), + (CVT_s64_f32 Float32Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_f2ull_rn_ftz Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRNI_FTZ)>; +def : Pat<(int_nvvm_f2ull_rn Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_f2ull_rz_ftz Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>; +def : Pat<(int_nvvm_f2ull_rz Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_f2ull_rm_ftz Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRMI_FTZ)>; +def : Pat<(int_nvvm_f2ull_rm Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_f2ull_rp_ftz Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRPI_FTZ)>; +def : Pat<(int_nvvm_f2ull_rp Float32Regs:$a), + (CVT_u64_f32 Float32Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_d2ll_rn Float64Regs:$a), + (CVT_s64_f64 Float64Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_d2ll_rz Float64Regs:$a), + (CVT_s64_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_d2ll_rm Float64Regs:$a), + (CVT_s64_f64 Float64Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_d2ll_rp Float64Regs:$a), + (CVT_s64_f64 Float64Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_d2ull_rn Float64Regs:$a), + (CVT_u64_f64 Float64Regs:$a, CvtRNI)>; +def : Pat<(int_nvvm_d2ull_rz Float64Regs:$a), + (CVT_u64_f64 Float64Regs:$a, CvtRZI)>; +def : Pat<(int_nvvm_d2ull_rm Float64Regs:$a), + (CVT_u64_f64 Float64Regs:$a, CvtRMI)>; +def : Pat<(int_nvvm_d2ull_rp Float64Regs:$a), + (CVT_u64_f64 Float64Regs:$a, CvtRPI)>; + +def : Pat<(int_nvvm_ll2f_rn Int64Regs:$a), + (CVT_f32_s64 Int64Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ll2f_rz Int64Regs:$a), + (CVT_f32_s64 Int64Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ll2f_rm Int64Regs:$a), + (CVT_f32_s64 Int64Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ll2f_rp Int64Regs:$a), + (CVT_f32_s64 Int64Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_ull2f_rn Int64Regs:$a), + (CVT_f32_u64 Int64Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ull2f_rz Int64Regs:$a), + (CVT_f32_u64 Int64Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ull2f_rm Int64Regs:$a), + (CVT_f32_u64 Int64Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ull2f_rp Int64Regs:$a), + (CVT_f32_u64 Int64Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_ll2d_rn Int64Regs:$a), + (CVT_f64_s64 Int64Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ll2d_rz Int64Regs:$a), + (CVT_f64_s64 Int64Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ll2d_rm Int64Regs:$a), + (CVT_f64_s64 Int64Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ll2d_rp Int64Regs:$a), + (CVT_f64_s64 Int64Regs:$a, CvtRP)>; + +def : Pat<(int_nvvm_ull2d_rn Int64Regs:$a), + (CVT_f64_u64 Int64Regs:$a, CvtRN)>; +def : Pat<(int_nvvm_ull2d_rz Int64Regs:$a), + (CVT_f64_u64 Int64Regs:$a, CvtRZ)>; +def : Pat<(int_nvvm_ull2d_rm Int64Regs:$a), + (CVT_f64_u64 Int64Regs:$a, CvtRM)>; +def : Pat<(int_nvvm_ull2d_rp Int64Regs:$a), + (CVT_f64_u64 Int64Regs:$a, CvtRP)>; + + +// FIXME: Ideally, we could use these patterns instead of the scope-creating +// patterns, but ptxas does not like these since .s16 is not compatible with +// .f16. The solution is to use .bXX for all integer register types, but we +// are not there yet. +//def : Pat<(int_nvvm_f2h_rn_ftz Float32Regs:$a), +// (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>; +//def : Pat<(int_nvvm_f2h_rn Float32Regs:$a), +// (CVT_f16_f32 Float32Regs:$a, CvtRN)>; +// +//def : Pat<(int_nvvm_h2f Int16Regs:$a), +// (CVT_f32_f16 Int16Regs:$a, CvtNONE)>; def INT_NVVM_F2H_RN_FTZ : F_MATH_1<!strconcat("{{\n\t", !strconcat(".reg .b16 %temp;\n\t", @@ -793,6 +792,13 @@ def INT_NVVM_H2F : F_MATH_1<!strconcat("{{\n\t", "}}")))), Float32Regs, Int16Regs, int_nvvm_h2f>; +def : Pat<(f32 (f16_to_f32 Int16Regs:$a)), + (CVT_f32_f16 Int16Regs:$a, CvtNONE)>; +def : Pat<(i16 (f32_to_f16 Float32Regs:$a)), + (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>; +def : Pat<(i16 (f32_to_f16 Float32Regs:$a)), + (CVT_f16_f32 Float32Regs:$a, CvtRN)>; + // // Bitcast // @@ -1270,6 +1276,11 @@ def INT_PTX_SREG_WARPSIZE : F_SREG<"mov.u32 \t$dst, WARP_SZ;", Int32Regs, // Support for ldu on sm_20 or later //----------------------------------- +def ldu_i8 : PatFrag<(ops node:$ptr), (int_nvvm_ldu_global_i node:$ptr), [{ + MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N); + return M->getMemoryVT() == MVT::i8; +}]>; + // Scalar // @TODO: Revisit this, Changed imemAny to imem multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> { @@ -1291,8 +1302,27 @@ multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> { [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>; } -defm INT_PTX_LDU_GLOBAL_i8 : LDU_G<"u8 \t$result, [$src];", Int8Regs, -int_nvvm_ldu_global_i>; +multiclass LDU_G_NOINTRIN<string TyStr, NVPTXRegClass regclass, PatFrag IntOp> { + def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src), + !strconcat("ldu.global.", TyStr), + [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDU]>; + def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src), + !strconcat("ldu.global.", TyStr), + [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDU]>; + def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src), + !strconcat("ldu.global.", TyStr), + [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>, + Requires<[hasLDU]>; + def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src), + !strconcat("ldu.global.", TyStr), + [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDU]>; + def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src), + !strconcat("ldu.global.", TyStr), + [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>; +} + +defm INT_PTX_LDU_GLOBAL_i8 : LDU_G_NOINTRIN<"u8 \t$result, [$src];", Int16Regs, + ldu_i8>; defm INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16 \t$result, [$src];", Int16Regs, int_nvvm_ldu_global_i>; defm INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32 \t$result, [$src];", Int32Regs, @@ -1312,25 +1342,43 @@ int_nvvm_ldu_global_p>; // Elementized vector ldu multiclass VLDU_G_ELE_V2<string TyStr, NVPTXRegClass regclass> { - def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int32Regs:$src), + def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins Int32Regs:$src), + !strconcat("ldu.global.", TyStr), []>; + def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins Int64Regs:$src), + !strconcat("ldu.global.", TyStr), []>; + def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins MEMri:$src), !strconcat("ldu.global.", TyStr), []>; - def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), - (ins Int64Regs:$src), + def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins MEMri64:$src), + !strconcat("ldu.global.", TyStr), []>; + def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins imemAny:$src), !strconcat("ldu.global.", TyStr), []>; } -multiclass VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass> { - def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, - regclass:$dst4), (ins Int32Regs:$src), +multiclass VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass> { + def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins Int32Regs:$src), + !strconcat("ldu.global.", TyStr), []>; + def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins Int64Regs:$src), + !strconcat("ldu.global.", TyStr), []>; + def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins MEMri:$src), !strconcat("ldu.global.", TyStr), []>; - def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, - regclass:$dst4), (ins Int64Regs:$src), + def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins MEMri64:$src), + !strconcat("ldu.global.", TyStr), []>; + def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins imemAny:$src), !strconcat("ldu.global.", TyStr), []>; } defm INT_PTX_LDU_G_v2i8_ELE - : VLDU_G_ELE_V2<"v2.u8 \t{{$dst1, $dst2}}, [$src];", Int8Regs>; + : VLDU_G_ELE_V2<"v2.u8 \t{{$dst1, $dst2}}, [$src];", Int16Regs>; defm INT_PTX_LDU_G_v2i16_ELE : VLDU_G_ELE_V2<"v2.u16 \t{{$dst1, $dst2}}, [$src];", Int16Regs>; defm INT_PTX_LDU_G_v2i32_ELE @@ -1342,7 +1390,7 @@ defm INT_PTX_LDU_G_v2i64_ELE defm INT_PTX_LDU_G_v2f64_ELE : VLDU_G_ELE_V2<"v2.f64 \t{{$dst1, $dst2}}, [$src];", Float64Regs>; defm INT_PTX_LDU_G_v4i8_ELE - : VLDU_G_ELE_V4<"v4.u8 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int8Regs>; + : VLDU_G_ELE_V4<"v4.u8 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int16Regs>; defm INT_PTX_LDU_G_v4i16_ELE : VLDU_G_ELE_V4<"v4.u16 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int16Regs>; @@ -1422,20 +1470,38 @@ defm INT_PTX_LDG_GLOBAL_p64 // Elementized vector ldg multiclass VLDG_G_ELE_V2<string TyStr, NVPTXRegClass regclass> { - def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), (ins Int32Regs:$src), !strconcat("ld.global.nc.", TyStr), []>; - def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), (ins Int64Regs:$src), !strconcat("ld.global.nc.", TyStr), []>; + def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins MEMri:$src), + !strconcat("ld.global.nc.", TyStr), []>; + def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins MEMri64:$src), + !strconcat("ld.global.nc.", TyStr), []>; + def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2), + (ins imemAny:$src), + !strconcat("ld.global.nc.", TyStr), []>; } multiclass VLDG_G_ELE_V4<string TyStr, NVPTXRegClass regclass> { - def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, - regclass:$dst3, regclass:$dst4), (ins Int32Regs:$src), + def _areg32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins Int32Regs:$src), + !strconcat("ld.global.nc.", TyStr), []>; + def _areg64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins Int64Regs:$src), + !strconcat("ld.global.nc.", TyStr), []>; + def _ari32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins MEMri:$src), !strconcat("ld.global.nc.", TyStr), []>; - def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, - regclass:$dst3, regclass:$dst4), (ins Int64Regs:$src), + def _ari64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins MEMri64:$src), + !strconcat("ld.global.nc.", TyStr), []>; + def _avar: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, + regclass:$dst4), (ins imemAny:$src), !strconcat("ld.global.nc.", TyStr), []>; } @@ -1542,10 +1608,6 @@ def nvvm_ptr_gen_to_param_64 : NVPTXInst<(outs Int64Regs:$result), // nvvm.move intrinsicc -def nvvm_move_i8 : NVPTXInst<(outs Int8Regs:$r), (ins Int8Regs:$s), - "mov.b16 \t$r, $s;", - [(set Int8Regs:$r, - (int_nvvm_move_i8 Int8Regs:$s))]>; def nvvm_move_i16 : NVPTXInst<(outs Int16Regs:$r), (ins Int16Regs:$s), "mov.b16 \t$r, $s;", [(set Int16Regs:$r, diff --git a/lib/Target/NVPTX/NVPTXMCExpr.cpp b/lib/Target/NVPTX/NVPTXMCExpr.cpp new file mode 100644 index 0000000..ca24764 --- /dev/null +++ b/lib/Target/NVPTX/NVPTXMCExpr.cpp @@ -0,0 +1,46 @@ +//===-- NVPTXMCExpr.cpp - NVPTX specific MC expression classes ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "nvptx-mcexpr" +#include "NVPTXMCExpr.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +using namespace llvm; + +const NVPTXFloatMCExpr* +NVPTXFloatMCExpr::Create(VariantKind Kind, APFloat Flt, MCContext &Ctx) { + return new (Ctx) NVPTXFloatMCExpr(Kind, Flt); +} + +void NVPTXFloatMCExpr::PrintImpl(raw_ostream &OS) const { + bool Ignored; + unsigned NumHex; + APFloat APF = getAPFloat(); + + switch (Kind) { + default: llvm_unreachable("Invalid kind!"); + case VK_NVPTX_SINGLE_PREC_FLOAT: + OS << "0f"; + NumHex = 8; + APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &Ignored); + break; + case VK_NVPTX_DOUBLE_PREC_FLOAT: + OS << "0d"; + NumHex = 16; + APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &Ignored); + break; + } + + APInt API = APF.bitcastToAPInt(); + std::string HexStr(utohexstr(API.getZExtValue())); + if (HexStr.length() < NumHex) + OS << std::string(NumHex - HexStr.length(), '0'); + OS << utohexstr(API.getZExtValue()); +} diff --git a/lib/Target/NVPTX/NVPTXMCExpr.h b/lib/Target/NVPTX/NVPTXMCExpr.h new file mode 100644 index 0000000..0efb231 --- /dev/null +++ b/lib/Target/NVPTX/NVPTXMCExpr.h @@ -0,0 +1,83 @@ +//===-- NVPTXMCExpr.h - NVPTX specific MC expression classes ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// Modeled after ARMMCExpr + +#ifndef NVPTXMCEXPR_H +#define NVPTXMCEXPR_H + +#include "llvm/ADT/APFloat.h" +#include "llvm/MC/MCExpr.h" + +namespace llvm { + +class NVPTXFloatMCExpr : public MCTargetExpr { +public: + enum VariantKind { + VK_NVPTX_None, + VK_NVPTX_SINGLE_PREC_FLOAT, // FP constant in single-precision + VK_NVPTX_DOUBLE_PREC_FLOAT // FP constant in double-precision + }; + +private: + const VariantKind Kind; + const APFloat Flt; + + explicit NVPTXFloatMCExpr(VariantKind _Kind, APFloat _Flt) + : Kind(_Kind), Flt(_Flt) {} + +public: + /// @name Construction + /// @{ + + static const NVPTXFloatMCExpr *Create(VariantKind Kind, APFloat Flt, + MCContext &Ctx); + + static const NVPTXFloatMCExpr *CreateConstantFPSingle(APFloat Flt, + MCContext &Ctx) { + return Create(VK_NVPTX_SINGLE_PREC_FLOAT, Flt, Ctx); + } + + static const NVPTXFloatMCExpr *CreateConstantFPDouble(APFloat Flt, + MCContext &Ctx) { + return Create(VK_NVPTX_DOUBLE_PREC_FLOAT, Flt, Ctx); + } + + /// @} + /// @name Accessors + /// @{ + + /// getOpcode - Get the kind of this expression. + VariantKind getKind() const { return Kind; } + + /// getSubExpr - Get the child of this expression. + APFloat getAPFloat() const { return Flt; } + +/// @} + + void PrintImpl(raw_ostream &OS) const; + bool EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const { + return false; + } + void AddValueSymbols(MCAssembler *) const {}; + const MCSection *FindAssociatedSection() const { + return NULL; + } + + // There are no TLS NVPTXMCExprs at the moment. + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {} + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } +}; +} // end namespace llvm + +#endif diff --git a/lib/Target/NVPTX/NVPTXNumRegisters.h b/lib/Target/NVPTX/NVPTXNumRegisters.h deleted file mode 100644 index a95c16b..0000000 --- a/lib/Target/NVPTX/NVPTXNumRegisters.h +++ /dev/null @@ -1,16 +0,0 @@ - -//===-- NVPTXNumRegisters.h - PTX Register Info ---------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef NVPTX_NUM_REGISTERS_H -#define NVPTX_NUM_REGISTERS_H - -namespace llvm { const unsigned NVPTXNumRegisters = 396; } - -#endif diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp index bb039f8..4d3a1d9 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp @@ -38,10 +38,6 @@ std::string getNVPTXRegClassName(TargetRegisterClass const *RC) { return ".s32"; } else if (RC == &NVPTX::Int16RegsRegClass) { return ".s16"; - } - // Int8Regs become 16-bit registers in PTX - else if (RC == &NVPTX::Int8RegsRegClass) { - return ".s16"; } else if (RC == &NVPTX::Int1RegsRegClass) { return ".pred"; } else if (RC == &NVPTX::SpecialRegsRegClass) { @@ -64,8 +60,6 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) { return "%r"; } else if (RC == &NVPTX::Int16RegsRegClass) { return "%rs"; - } else if (RC == &NVPTX::Int8RegsRegClass) { - return "%rc"; } else if (RC == &NVPTX::Int1RegsRegClass) { return "%p"; } else if (RC == &NVPTX::SpecialRegsRegClass) { @@ -77,8 +71,7 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) { } } -NVPTXRegisterInfo::NVPTXRegisterInfo(const TargetInstrInfo &tii, - const NVPTXSubtarget &st) +NVPTXRegisterInfo::NVPTXRegisterInfo(const NVPTXSubtarget &st) : NVPTXGenRegisterInfo(0), Is64Bit(st.is64Bit()) {} #define GET_REGINFO_TARGET_DESC diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.h b/lib/Target/NVPTX/NVPTXRegisterInfo.h index d406820..0a20f29 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.h +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.h @@ -35,7 +35,7 @@ private: ManagedStringPool ManagedStrPool; public: - NVPTXRegisterInfo(const TargetInstrInfo &tii, const NVPTXSubtarget &st); + NVPTXRegisterInfo(const NVPTXSubtarget &st); //------------------------------------------------------ // Pure virtual functions from TargetRegisterInfo diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.td b/lib/Target/NVPTX/NVPTXRegisterInfo.td index 8d100d6..7a38a66 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.td +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.td @@ -29,9 +29,10 @@ def VRFrameLocal : NVPTXReg<"%SPL">; // Special Registers used as the stack def VRDepot : NVPTXReg<"%Depot">; -foreach i = 0-395 in { +// We use virtual registers, but define a few physical registers here to keep +// SDAG and the MachineInstr layers happy. +foreach i = 0-4 in { def P#i : NVPTXReg<"%p"#i>; // Predicate - def RC#i : NVPTXReg<"%rc"#i>; // 8-bit def RS#i : NVPTXReg<"%rs"#i>; // 16-bit def R#i : NVPTXReg<"%r"#i>; // 32-bit def RL#i : NVPTXReg<"%rl"#i>; // 64-bit @@ -48,17 +49,16 @@ foreach i = 0-395 in { //===----------------------------------------------------------------------===// // Register classes //===----------------------------------------------------------------------===// -def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 395))>; -def Int8Regs : NVPTXRegClass<[i8], 8, (add (sequence "RC%u", 0, 395))>; -def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%u", 0, 395))>; -def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%u", 0, 395))>; -def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 395))>; -def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 395))>; -def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%u", 0, 395))>; -def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%u", 0, 395))>; -def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%u", 0, 395))>; -def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 395))>; -def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 395))>; +def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 4))>; +def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%u", 0, 4))>; +def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%u", 0, 4))>; +def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 4))>; +def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 4))>; +def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%u", 0, 4))>; +def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%u", 0, 4))>; +def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%u", 0, 4))>; +def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 4))>; +def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 4))>; // Read NVPTXRegisterInfo.cpp to see how VRFrame and VRDepot are used. def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot)>; diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp index 2dcd73d..c4d0d6e 100644 --- a/lib/Target/NVPTX/NVPTXSubtarget.cpp +++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp @@ -19,23 +19,18 @@ using namespace llvm; -// Select Driver Interface -#include "llvm/Support/CommandLine.h" -namespace { -cl::opt<NVPTX::DrvInterface> DriverInterface( - cl::desc("Choose driver interface:"), - cl::values(clEnumValN(NVPTX::NVCL, "drvnvcl", "Nvidia OpenCL driver"), - clEnumValN(NVPTX::CUDA, "drvcuda", "Nvidia CUDA driver"), - clEnumValN(NVPTX::TEST, "drvtest", "Plain Test"), clEnumValEnd), - cl::init(NVPTX::NVCL)); -} NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool is64Bit) : NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0), SmVersion(20) { - drvInterface = DriverInterface; + Triple T(TT); + + if (T.getOS() == Triple::NVCL) + drvInterface = NVPTX::NVCL; + else + drvInterface = NVPTX::CUDA; // Provide the default CPU if none std::string defCPU = "sm_20"; diff --git a/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/lib/Target/NVPTX/NVPTXTargetObjectFile.h index 6ab0e08..bfd6ab1 100644 --- a/lib/Target/NVPTX/NVPTXTargetObjectFile.h +++ b/lib/Target/NVPTX/NVPTXTargetObjectFile.h @@ -21,7 +21,29 @@ class Module; class NVPTXTargetObjectFile : public TargetLoweringObjectFile { public: - NVPTXTargetObjectFile() {} + NVPTXTargetObjectFile() { + TextSection = 0; + DataSection = 0; + BSSSection = 0; + ReadOnlySection = 0; + + StaticCtorSection = 0; + StaticDtorSection = 0; + LSDASection = 0; + EHFrameSection = 0; + DwarfAbbrevSection = 0; + DwarfInfoSection = 0; + DwarfLineSection = 0; + DwarfFrameSection = 0; + DwarfPubTypesSection = 0; + DwarfDebugInlineSection = 0; + DwarfStrSection = 0; + DwarfLocSection = 0; + DwarfARangesSection = 0; + DwarfRangesSection = 0; + DwarfMacroInfoSection = 0; + } + ~NVPTXTargetObjectFile() { delete TextSection; delete DataSection; diff --git a/lib/Target/PowerPC/AsmParser/LLVMBuild.txt b/lib/Target/PowerPC/AsmParser/LLVMBuild.txt index bd08c13..02ebf1d 100644 --- a/lib/Target/PowerPC/AsmParser/LLVMBuild.txt +++ b/lib/Target/PowerPC/AsmParser/LLVMBuild.txt @@ -19,5 +19,5 @@ type = Library name = PowerPCAsmParser parent = PowerPC -required_libraries = PowerPCInfo MC MCParser Support +required_libraries = PowerPCDesc PowerPCInfo MC MCParser Support add_to_library_groups = PowerPC diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp index 9cf16f0..a8f7509 100644 --- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp +++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "MCTargetDesc/PPCMCTargetDesc.h" +#include "MCTargetDesc/PPCMCExpr.h" #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCExpr.h" @@ -17,6 +18,7 @@ #include "llvm/MC/MCParser/MCAsmLexer.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" @@ -106,6 +108,67 @@ static unsigned CRRegs[8] = { PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7 }; +// Evaluate an expression containing condition register +// or condition register field symbols. Returns positive +// value on success, or -1 on error. +static int64_t +EvaluateCRExpr(const MCExpr *E) { + switch (E->getKind()) { + case MCExpr::Target: + return -1; + + case MCExpr::Constant: { + int64_t Res = cast<MCConstantExpr>(E)->getValue(); + return Res < 0 ? -1 : Res; + } + + case MCExpr::SymbolRef: { + const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E); + StringRef Name = SRE->getSymbol().getName(); + + if (Name == "lt") return 0; + if (Name == "gt") return 1; + if (Name == "eq") return 2; + if (Name == "so") return 3; + if (Name == "un") return 3; + + if (Name == "cr0") return 0; + if (Name == "cr1") return 1; + if (Name == "cr2") return 2; + if (Name == "cr3") return 3; + if (Name == "cr4") return 4; + if (Name == "cr5") return 5; + if (Name == "cr6") return 6; + if (Name == "cr7") return 7; + + return -1; + } + + case MCExpr::Unary: + return -1; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(E); + int64_t LHSVal = EvaluateCRExpr(BE->getLHS()); + int64_t RHSVal = EvaluateCRExpr(BE->getRHS()); + int64_t Res; + + if (LHSVal < 0 || RHSVal < 0) + return -1; + + switch (BE->getOpcode()) { + default: return -1; + case MCBinaryExpr::Add: Res = LHSVal + RHSVal; break; + case MCBinaryExpr::Mul: Res = LHSVal * RHSVal; break; + } + + return Res < 0 ? -1 : Res; + } + } + + llvm_unreachable("Invalid expression kind!"); +} + struct PPCOperand; class PPCAsmParser : public MCTargetAsmParser { @@ -126,10 +189,16 @@ class PPCAsmParser : public MCTargetAsmParser { virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); + const MCExpr *ExtractModifierFromExpr(const MCExpr *E, + PPCMCExpr::VariantKind &Variant); + const MCExpr *FixupVariantKind(const MCExpr *E); + bool ParseExpression(const MCExpr *&EVal); + bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands); bool ParseDirectiveWord(unsigned Size, SMLoc L); bool ParseDirectiveTC(unsigned Size, SMLoc L); + bool ParseDirectiveMachine(SMLoc L); bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl<MCParsedAsmOperand*> &Operands, @@ -153,7 +222,8 @@ public: : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { // Check for 64-bit vs. 32-bit pointer mode. Triple TheTriple(STI.getTargetTriple()); - IsPPC64 = TheTriple.getArch() == Triple::ppc64; + IsPPC64 = (TheTriple.getArch() == Triple::ppc64 || + TheTriple.getArch() == Triple::ppc64le); // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); } @@ -163,6 +233,8 @@ public: SmallVectorImpl<MCParsedAsmOperand*> &Operands); virtual bool ParseDirective(AsmToken DirectiveID); + + unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind); }; /// PPCOperand - Instances of this class represent a parsed PowerPC machine @@ -171,7 +243,8 @@ struct PPCOperand : public MCParsedAsmOperand { enum KindTy { Token, Immediate, - Expression + Expression, + TLSRegister } Kind; SMLoc StartLoc, EndLoc; @@ -188,12 +261,18 @@ struct PPCOperand : public MCParsedAsmOperand { struct ExprOp { const MCExpr *Val; + int64_t CRVal; // Cached result of EvaluateCRExpr(Val) + }; + + struct TLSRegOp { + const MCSymbolRefExpr *Sym; }; union { struct TokOp Tok; struct ImmOp Imm; struct ExprOp Expr; + struct TLSRegOp TLSReg; }; PPCOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} @@ -213,6 +292,9 @@ public: case Expression: Expr = o.Expr; break; + case TLSRegister: + TLSReg = o.TLSReg; + break; } } @@ -235,6 +317,16 @@ public: return Expr.Val; } + int64_t getExprCRVal() const { + assert(Kind == Expression && "Invalid access!"); + return Expr.CRVal; + } + + const MCExpr *getTLSReg() const { + assert(Kind == TLSRegister && "Invalid access!"); + return TLSReg.Sym; + } + unsigned getReg() const { assert(isRegNumber() && "Invalid access!"); return (unsigned) Imm.Val; @@ -242,7 +334,12 @@ public: unsigned getCCReg() const { assert(isCCRegNumber() && "Invalid access!"); - return (unsigned) Imm.Val; + return (unsigned) (Kind == Immediate ? Imm.Val : Expr.CRVal); + } + + unsigned getCRBit() const { + assert(isCRBitNumber() && "Invalid access!"); + return (unsigned) (Kind == Immediate ? Imm.Val : Expr.CRVal); } unsigned getCRBitMask() const { @@ -262,9 +359,24 @@ public: bool isS16ImmX4() const { return Kind == Expression || (Kind == Immediate && isInt<16>(getImm()) && (getImm() & 3) == 0); } + bool isS17Imm() const { return Kind == Expression || + (Kind == Immediate && isInt<17>(getImm())); } + bool isTLSReg() const { return Kind == TLSRegister; } + bool isDirectBr() const { return Kind == Expression || + (Kind == Immediate && isInt<26>(getImm()) && + (getImm() & 3) == 0); } + bool isCondBr() const { return Kind == Expression || + (Kind == Immediate && isInt<16>(getImm()) && + (getImm() & 3) == 0); } bool isRegNumber() const { return Kind == Immediate && isUInt<5>(getImm()); } - bool isCCRegNumber() const { return Kind == Immediate && - isUInt<3>(getImm()); } + bool isCCRegNumber() const { return (Kind == Expression + && isUInt<3>(getExprCRVal())) || + (Kind == Immediate + && isUInt<3>(getImm())); } + bool isCRBitNumber() const { return (Kind == Expression + && isUInt<5>(getExprCRVal())) || + (Kind == Immediate + && isUInt<5>(getImm())); } bool isCRBitMask() const { return Kind == Immediate && isUInt<8>(getImm()) && isPowerOf2_32(getImm()); } bool isMem() const { return false; } @@ -325,7 +437,7 @@ public: void addRegCRBITRCOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getReg()])); + Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getCRBit()])); } void addRegCRRCOperands(MCInst &Inst, unsigned N) const { @@ -346,6 +458,19 @@ public: Inst.addOperand(MCOperand::CreateExpr(getExpr())); } + void addBranchTargetOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + if (Kind == Immediate) + Inst.addOperand(MCOperand::CreateImm(getImm() / 4)); + else + Inst.addOperand(MCOperand::CreateExpr(getExpr())); + } + + void addTLSRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateExpr(getTLSReg())); + } + StringRef getToken() const { assert(Kind == Token && "Invalid access!"); return StringRef(Tok.Data, Tok.Length); @@ -363,6 +488,20 @@ public: return Op; } + static PPCOperand *CreateTokenWithStringCopy(StringRef Str, SMLoc S, + bool IsPPC64) { + // Allocate extra memory for the string and copy it. + void *Mem = ::operator new(sizeof(PPCOperand) + Str.size()); + PPCOperand *Op = new (Mem) PPCOperand(Token); + Op->Tok.Data = (const char *)(Op + 1); + Op->Tok.Length = Str.size(); + std::memcpy((char *)(Op + 1), Str.data(), Str.size()); + Op->StartLoc = S; + Op->EndLoc = S; + Op->IsPPC64 = IsPPC64; + return Op; + } + static PPCOperand *CreateImm(int64_t Val, SMLoc S, SMLoc E, bool IsPPC64) { PPCOperand *Op = new PPCOperand(Immediate); Op->Imm.Val = Val; @@ -376,11 +515,34 @@ public: SMLoc S, SMLoc E, bool IsPPC64) { PPCOperand *Op = new PPCOperand(Expression); Op->Expr.Val = Val; + Op->Expr.CRVal = EvaluateCRExpr(Val); + Op->StartLoc = S; + Op->EndLoc = E; + Op->IsPPC64 = IsPPC64; + return Op; + } + + static PPCOperand *CreateTLSReg(const MCSymbolRefExpr *Sym, + SMLoc S, SMLoc E, bool IsPPC64) { + PPCOperand *Op = new PPCOperand(TLSRegister); + Op->TLSReg.Sym = Sym; Op->StartLoc = S; Op->EndLoc = E; Op->IsPPC64 = IsPPC64; return Op; } + + static PPCOperand *CreateFromMCExpr(const MCExpr *Val, + SMLoc S, SMLoc E, bool IsPPC64) { + if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val)) + return CreateImm(CE->getValue(), S, E, IsPPC64); + + if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Val)) + if (SRE->getKind() == MCSymbolRefExpr::VK_PPC_TLS) + return CreateTLSReg(SRE, S, E, IsPPC64); + + return CreateExpr(Val, S, E, IsPPC64); + } }; } // end anonymous namespace. @@ -396,6 +558,9 @@ void PPCOperand::print(raw_ostream &OS) const { case Expression: getExpr()->print(OS); break; + case TLSRegister: + getTLSReg()->print(OS); + break; } } @@ -403,11 +568,133 @@ void PPCOperand::print(raw_ostream &OS) const { void PPCAsmParser:: ProcessInstruction(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - switch (Inst.getOpcode()) { - case PPC::SLWI: { + int Opcode = Inst.getOpcode(); + switch (Opcode) { + case PPC::LAx: { + MCInst TmpInst; + TmpInst.setOpcode(PPC::LA); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(2)); + TmpInst.addOperand(Inst.getOperand(1)); + Inst = TmpInst; + break; + } + case PPC::SUBI: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(PPC::RLWINM); + TmpInst.setOpcode(PPC::ADDI); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(-N)); + Inst = TmpInst; + break; + } + case PPC::SUBIS: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(PPC::ADDIS); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(-N)); + Inst = TmpInst; + break; + } + case PPC::SUBIC: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(PPC::ADDIC); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(-N)); + Inst = TmpInst; + break; + } + case PPC::SUBICo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(PPC::ADDICo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(-N)); + Inst = TmpInst; + break; + } + case PPC::EXTLWI: + case PPC::EXTLWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::EXTLWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(B)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + TmpInst.addOperand(MCOperand::CreateImm(N - 1)); + Inst = TmpInst; + break; + } + case PPC::EXTRWI: + case PPC::EXTRWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::EXTRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(B + N)); + TmpInst.addOperand(MCOperand::CreateImm(32 - N)); + TmpInst.addOperand(MCOperand::CreateImm(31)); + Inst = TmpInst; + break; + } + case PPC::INSLWI: + case PPC::INSLWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::INSLWI? PPC::RLWIMI : PPC::RLWIMIo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(32 - B)); + TmpInst.addOperand(MCOperand::CreateImm(B)); + TmpInst.addOperand(MCOperand::CreateImm((B + N) - 1)); + Inst = TmpInst; + break; + } + case PPC::INSRWI: + case PPC::INSRWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::INSRWI? PPC::RLWIMI : PPC::RLWIMIo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(32 - (B + N))); + TmpInst.addOperand(MCOperand::CreateImm(B)); + TmpInst.addOperand(MCOperand::CreateImm((B + N) - 1)); + Inst = TmpInst; + break; + } + case PPC::ROTRWI: + case PPC::ROTRWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(Opcode == PPC::ROTRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(32 - N)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + TmpInst.addOperand(MCOperand::CreateImm(31)); + Inst = TmpInst; + break; + } + case PPC::SLWI: + case PPC::SLWIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(Opcode == PPC::SLWI? PPC::RLWINM : PPC::RLWINMo); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::CreateImm(N)); @@ -416,10 +703,11 @@ ProcessInstruction(MCInst &Inst, Inst = TmpInst; break; } - case PPC::SRWI: { + case PPC::SRWI: + case PPC::SRWIo: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(PPC::RLWINM); + TmpInst.setOpcode(Opcode == PPC::SRWI? PPC::RLWINM : PPC::RLWINMo); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::CreateImm(32 - N)); @@ -428,10 +716,90 @@ ProcessInstruction(MCInst &Inst, Inst = TmpInst; break; } - case PPC::SLDI: { + case PPC::CLRRWI: + case PPC::CLRRWIo: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(PPC::RLDICR); + TmpInst.setOpcode(Opcode == PPC::CLRRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + TmpInst.addOperand(MCOperand::CreateImm(31 - N)); + Inst = TmpInst; + break; + } + case PPC::CLRLSLWI: + case PPC::CLRLSLWIo: { + MCInst TmpInst; + int64_t B = Inst.getOperand(2).getImm(); + int64_t N = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::CLRLSLWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(N)); + TmpInst.addOperand(MCOperand::CreateImm(B - N)); + TmpInst.addOperand(MCOperand::CreateImm(31 - N)); + Inst = TmpInst; + break; + } + case PPC::EXTLDI: + case PPC::EXTLDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::EXTLDI? PPC::RLDICR : PPC::RLDICRo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(B)); + TmpInst.addOperand(MCOperand::CreateImm(N - 1)); + Inst = TmpInst; + break; + } + case PPC::EXTRDI: + case PPC::EXTRDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::EXTRDI? PPC::RLDICL : PPC::RLDICLo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(B + N)); + TmpInst.addOperand(MCOperand::CreateImm(64 - N)); + Inst = TmpInst; + break; + } + case PPC::INSRDI: + case PPC::INSRDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + int64_t B = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::INSRDI? PPC::RLDIMI : PPC::RLDIMIo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(64 - (B + N))); + TmpInst.addOperand(MCOperand::CreateImm(B)); + Inst = TmpInst; + break; + } + case PPC::ROTRDI: + case PPC::ROTRDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(Opcode == PPC::ROTRDI? PPC::RLDICL : PPC::RLDICLo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(64 - N)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + Inst = TmpInst; + break; + } + case PPC::SLDI: + case PPC::SLDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(Opcode == PPC::SLDI? PPC::RLDICR : PPC::RLDICRo); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::CreateImm(N)); @@ -439,10 +807,11 @@ ProcessInstruction(MCInst &Inst, Inst = TmpInst; break; } - case PPC::SRDI: { + case PPC::SRDI: + case PPC::SRDIo: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(PPC::RLDICL); + TmpInst.setOpcode(Opcode == PPC::SRDI? PPC::RLDICL : PPC::RLDICLo); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::CreateImm(64 - N)); @@ -450,6 +819,31 @@ ProcessInstruction(MCInst &Inst, Inst = TmpInst; break; } + case PPC::CLRRDI: + case PPC::CLRRDIo: { + MCInst TmpInst; + int64_t N = Inst.getOperand(2).getImm(); + TmpInst.setOpcode(Opcode == PPC::CLRRDI? PPC::RLDICR : PPC::RLDICRo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(0)); + TmpInst.addOperand(MCOperand::CreateImm(63 - N)); + Inst = TmpInst; + break; + } + case PPC::CLRLSLDI: + case PPC::CLRLSLDIo: { + MCInst TmpInst; + int64_t B = Inst.getOperand(2).getImm(); + int64_t N = Inst.getOperand(3).getImm(); + TmpInst.setOpcode(Opcode == PPC::CLRLSLDI? PPC::RLDIC : PPC::RLDICo); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::CreateImm(N)); + TmpInst.addOperand(MCOperand::CreateImm(B - N)); + Inst = TmpInst; + break; + } } } @@ -502,6 +896,10 @@ MatchRegisterName(const AsmToken &Tok, unsigned &RegNo, int64_t &IntVal) { RegNo = isPPC64()? PPC::CTR8 : PPC::CTR; IntVal = 9; return false; + } else if (Name.equals_lower("vrsave")) { + RegNo = PPC::VRSAVE; + IntVal = 256; + return false; } else if (Name.substr(0, 1).equals_lower("r") && !Name.substr(1).getAsInteger(10, IntVal) && IntVal < 32) { RegNo = isPPC64()? XRegs[IntVal] : RRegs[IntVal]; @@ -540,6 +938,159 @@ ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { return Error(StartLoc, "invalid register name"); } +/// Extract \code @l/@ha \endcode modifier from expression. Recursively scan +/// the expression and check for VK_PPC_LO/HI/HA +/// symbol variants. If all symbols with modifier use the same +/// variant, return the corresponding PPCMCExpr::VariantKind, +/// and a modified expression using the default symbol variant. +/// Otherwise, return NULL. +const MCExpr *PPCAsmParser:: +ExtractModifierFromExpr(const MCExpr *E, + PPCMCExpr::VariantKind &Variant) { + MCContext &Context = getParser().getContext(); + Variant = PPCMCExpr::VK_PPC_None; + + switch (E->getKind()) { + case MCExpr::Target: + case MCExpr::Constant: + return 0; + + case MCExpr::SymbolRef: { + const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E); + + switch (SRE->getKind()) { + case MCSymbolRefExpr::VK_PPC_LO: + Variant = PPCMCExpr::VK_PPC_LO; + break; + case MCSymbolRefExpr::VK_PPC_HI: + Variant = PPCMCExpr::VK_PPC_HI; + break; + case MCSymbolRefExpr::VK_PPC_HA: + Variant = PPCMCExpr::VK_PPC_HA; + break; + case MCSymbolRefExpr::VK_PPC_HIGHER: + Variant = PPCMCExpr::VK_PPC_HIGHER; + break; + case MCSymbolRefExpr::VK_PPC_HIGHERA: + Variant = PPCMCExpr::VK_PPC_HIGHERA; + break; + case MCSymbolRefExpr::VK_PPC_HIGHEST: + Variant = PPCMCExpr::VK_PPC_HIGHEST; + break; + case MCSymbolRefExpr::VK_PPC_HIGHESTA: + Variant = PPCMCExpr::VK_PPC_HIGHESTA; + break; + default: + return 0; + } + + return MCSymbolRefExpr::Create(&SRE->getSymbol(), Context); + } + + case MCExpr::Unary: { + const MCUnaryExpr *UE = cast<MCUnaryExpr>(E); + const MCExpr *Sub = ExtractModifierFromExpr(UE->getSubExpr(), Variant); + if (!Sub) + return 0; + return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context); + } + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(E); + PPCMCExpr::VariantKind LHSVariant, RHSVariant; + const MCExpr *LHS = ExtractModifierFromExpr(BE->getLHS(), LHSVariant); + const MCExpr *RHS = ExtractModifierFromExpr(BE->getRHS(), RHSVariant); + + if (!LHS && !RHS) + return 0; + + if (!LHS) LHS = BE->getLHS(); + if (!RHS) RHS = BE->getRHS(); + + if (LHSVariant == PPCMCExpr::VK_PPC_None) + Variant = RHSVariant; + else if (RHSVariant == PPCMCExpr::VK_PPC_None) + Variant = LHSVariant; + else if (LHSVariant == RHSVariant) + Variant = LHSVariant; + else + return 0; + + return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context); + } + } + + llvm_unreachable("Invalid expression kind!"); +} + +/// Find all VK_TLSGD/VK_TLSLD symbol references in expression and replace +/// them by VK_PPC_TLSGD/VK_PPC_TLSLD. This is necessary to avoid having +/// _GLOBAL_OFFSET_TABLE_ created via ELFObjectWriter::RelocNeedsGOT. +/// FIXME: This is a hack. +const MCExpr *PPCAsmParser:: +FixupVariantKind(const MCExpr *E) { + MCContext &Context = getParser().getContext(); + + switch (E->getKind()) { + case MCExpr::Target: + case MCExpr::Constant: + return E; + + case MCExpr::SymbolRef: { + const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E); + MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; + + switch (SRE->getKind()) { + case MCSymbolRefExpr::VK_TLSGD: + Variant = MCSymbolRefExpr::VK_PPC_TLSGD; + break; + case MCSymbolRefExpr::VK_TLSLD: + Variant = MCSymbolRefExpr::VK_PPC_TLSLD; + break; + default: + return E; + } + return MCSymbolRefExpr::Create(&SRE->getSymbol(), Variant, Context); + } + + case MCExpr::Unary: { + const MCUnaryExpr *UE = cast<MCUnaryExpr>(E); + const MCExpr *Sub = FixupVariantKind(UE->getSubExpr()); + if (Sub == UE->getSubExpr()) + return E; + return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context); + } + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(E); + const MCExpr *LHS = FixupVariantKind(BE->getLHS()); + const MCExpr *RHS = FixupVariantKind(BE->getRHS()); + if (LHS == BE->getLHS() && RHS == BE->getRHS()) + return E; + return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context); + } + } + + llvm_unreachable("Invalid expression kind!"); +} + +/// Parse an expression. This differs from the default "parseExpression" +/// in that it handles complex \code @l/@ha \endcode modifiers. +bool PPCAsmParser:: +ParseExpression(const MCExpr *&EVal) { + if (getParser().parseExpression(EVal)) + return true; + + EVal = FixupVariantKind(EVal); + + PPCMCExpr::VariantKind Variant; + const MCExpr *E = ExtractModifierFromExpr(EVal, Variant); + if (E) + EVal = PPCMCExpr::Create(Variant, E, false, getParser().getContext()); + + return false; +} + bool PPCAsmParser:: ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { SMLoc S = Parser.getTok().getLoc(); @@ -571,23 +1122,40 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { case AsmToken::Identifier: case AsmToken::Dot: case AsmToken::Dollar: - if (!getParser().parseExpression(EVal)) + if (!ParseExpression(EVal)) break; /* fall through */ default: return Error(S, "unknown operand"); } - if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(EVal)) - Op = PPCOperand::CreateImm(CE->getValue(), S, E, isPPC64()); - else - Op = PPCOperand::CreateExpr(EVal, S, E, isPPC64()); - // Push the parsed operand into the list of operands + Op = PPCOperand::CreateFromMCExpr(EVal, S, E, isPPC64()); Operands.push_back(Op); - // Check for D-form memory operands - if (getLexer().is(AsmToken::LParen)) { + // Check whether this is a TLS call expression + bool TLSCall = false; + if (const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(EVal)) + TLSCall = Ref->getSymbol().getName() == "__tls_get_addr"; + + if (TLSCall && getLexer().is(AsmToken::LParen)) { + const MCExpr *TLSSym; + + Parser.Lex(); // Eat the '('. + S = Parser.getTok().getLoc(); + if (ParseExpression(TLSSym)) + return Error(S, "invalid TLS call expression"); + if (getLexer().isNot(AsmToken::RParen)) + return Error(Parser.getTok().getLoc(), "missing ')'"); + E = Parser.getTok().getLoc(); + Parser.Lex(); // Eat the ')'. + + Op = PPCOperand::CreateFromMCExpr(TLSSym, S, E, isPPC64()); + Operands.push_back(Op); + } + + // Otherwise, check for D-form memory operands + if (!TLSCall && getLexer().is(AsmToken::LParen)) { Parser.Lex(); // Eat the '('. S = Parser.getTok().getLoc(); @@ -628,15 +1196,38 @@ bool PPCAsmParser:: ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands) { // The first operand is the token for the instruction name. + // If the next character is a '+' or '-', we need to add it to the + // instruction name, to match what TableGen is doing. + std::string NewOpcode; + if (getLexer().is(AsmToken::Plus)) { + getLexer().Lex(); + NewOpcode = Name; + NewOpcode += '+'; + Name = NewOpcode; + } + if (getLexer().is(AsmToken::Minus)) { + getLexer().Lex(); + NewOpcode = Name; + NewOpcode += '-'; + Name = NewOpcode; + } // If the instruction ends in a '.', we need to create a separate // token for it, to match what TableGen is doing. size_t Dot = Name.find('.'); StringRef Mnemonic = Name.slice(0, Dot); - Operands.push_back(PPCOperand::CreateToken(Mnemonic, NameLoc, isPPC64())); + if (!NewOpcode.empty()) // Underlying memory for Name is volatile. + Operands.push_back( + PPCOperand::CreateTokenWithStringCopy(Mnemonic, NameLoc, isPPC64())); + else + Operands.push_back(PPCOperand::CreateToken(Mnemonic, NameLoc, isPPC64())); if (Dot != StringRef::npos) { SMLoc DotLoc = SMLoc::getFromPointer(NameLoc.getPointer() + Dot); StringRef DotStr = Name.slice(Dot, StringRef::npos); - Operands.push_back(PPCOperand::CreateToken(DotStr, DotLoc, isPPC64())); + if (!NewOpcode.empty()) // Underlying memory for Name is volatile. + Operands.push_back( + PPCOperand::CreateTokenWithStringCopy(DotStr, DotLoc, isPPC64())); + else + Operands.push_back(PPCOperand::CreateToken(DotStr, DotLoc, isPPC64())); } // If there are no more operands then finish @@ -664,9 +1255,13 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, bool PPCAsmParser::ParseDirective(AsmToken DirectiveID) { StringRef IDVal = DirectiveID.getIdentifier(); if (IDVal == ".word") - return ParseDirectiveWord(4, DirectiveID.getLoc()); + return ParseDirectiveWord(2, DirectiveID.getLoc()); + if (IDVal == ".llong") + return ParseDirectiveWord(8, DirectiveID.getLoc()); if (IDVal == ".tc") return ParseDirectiveTC(isPPC64()? 8 : 4, DirectiveID.getLoc()); + if (IDVal == ".machine") + return ParseDirectiveMachine(DirectiveID.getLoc()); return true; } @@ -712,12 +1307,59 @@ bool PPCAsmParser::ParseDirectiveTC(unsigned Size, SMLoc L) { return ParseDirectiveWord(Size, L); } +/// ParseDirectiveMachine +/// ::= .machine [ cpu | "push" | "pop" ] +bool PPCAsmParser::ParseDirectiveMachine(SMLoc L) { + if (getLexer().isNot(AsmToken::Identifier) && + getLexer().isNot(AsmToken::String)) + return Error(L, "unexpected token in directive"); + + StringRef CPU = Parser.getTok().getIdentifier(); + Parser.Lex(); + + // FIXME: Right now, the parser always allows any available + // instruction, so the .machine directive is not useful. + // Implement ".machine any" (by doing nothing) for the benefit + // of existing assembler code. Likewise, we can then implement + // ".machine push" and ".machine pop" as no-op. + if (CPU != "any" && CPU != "push" && CPU != "pop") + return Error(L, "unrecognized machine type"); + + if (getLexer().isNot(AsmToken::EndOfStatement)) + return Error(L, "unexpected token in directive"); + + return false; +} + /// Force static initialization. extern "C" void LLVMInitializePowerPCAsmParser() { RegisterMCAsmParser<PPCAsmParser> A(ThePPC32Target); RegisterMCAsmParser<PPCAsmParser> B(ThePPC64Target); + RegisterMCAsmParser<PPCAsmParser> C(ThePPC64LETarget); } #define GET_REGISTER_MATCHER #define GET_MATCHER_IMPLEMENTATION #include "PPCGenAsmMatcher.inc" + +// Define this matcher function after the auto-generated include so we +// have the match class enum definitions. +unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp, + unsigned Kind) { + // If the kind is a token for a literal immediate, check if our asm + // operand matches. This is for InstAliases which have a fixed-value + // immediate in the syntax. + int64_t ImmVal; + switch (Kind) { + case MCK_0: ImmVal = 0; break; + case MCK_1: ImmVal = 1; break; + default: return Match_InvalidOperand; + } + + PPCOperand *Op = static_cast<PPCOperand*>(AsmOp); + if (Op->isImm() && Op->getImm() == ImmVal) + return Match_Success; + + return Match_InvalidOperand; +} + diff --git a/lib/Target/PowerPC/CMakeLists.txt b/lib/Target/PowerPC/CMakeLists.txt index e5c5204..9a763f5 100644 --- a/lib/Target/PowerPC/CMakeLists.txt +++ b/lib/Target/PowerPC/CMakeLists.txt @@ -7,6 +7,7 @@ tablegen(LLVM PPCGenMCCodeEmitter.inc -gen-emitter -mc-emitter) tablegen(LLVM PPCGenRegisterInfo.inc -gen-register-info) tablegen(LLVM PPCGenInstrInfo.inc -gen-instr-info) tablegen(LLVM PPCGenDAGISel.inc -gen-dag-isel) +tablegen(LLVM PPCGenFastISel.inc -gen-fast-isel) tablegen(LLVM PPCGenCallingConv.inc -gen-callingconv) tablegen(LLVM PPCGenSubtargetInfo.inc -gen-subtarget) add_public_tablegen_target(PowerPCCommonTableGen) @@ -20,6 +21,7 @@ add_llvm_target(PowerPCCodeGen PPCInstrInfo.cpp PPCISelDAGToDAG.cpp PPCISelLowering.cpp + PPCFastISel.cpp PPCFrameLowering.cpp PPCJITInfo.cpp PPCMCInstLower.cpp @@ -32,7 +34,7 @@ add_llvm_target(PowerPCCodeGen PPCSelectionDAGInfo.cpp ) -add_dependencies(LLVMPowerPCCodeGen intrinsics_gen) +add_dependencies(LLVMPowerPCCodeGen PowerPCCommonTableGen intrinsics_gen) add_subdirectory(AsmParser) add_subdirectory(InstPrinter) diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp index 432167e..08d7665 100644 --- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp +++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp @@ -90,19 +90,87 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo, if (StringRef(Modifier) == "cc") { switch ((PPC::Predicate)Code) { - case PPC::PRED_LT: O << "lt"; return; - case PPC::PRED_LE: O << "le"; return; - case PPC::PRED_EQ: O << "eq"; return; - case PPC::PRED_GE: O << "ge"; return; - case PPC::PRED_GT: O << "gt"; return; - case PPC::PRED_NE: O << "ne"; return; - case PPC::PRED_UN: O << "un"; return; - case PPC::PRED_NU: O << "nu"; return; + case PPC::PRED_LT_MINUS: + case PPC::PRED_LT_PLUS: + case PPC::PRED_LT: + O << "lt"; + return; + case PPC::PRED_LE_MINUS: + case PPC::PRED_LE_PLUS: + case PPC::PRED_LE: + O << "le"; + return; + case PPC::PRED_EQ_MINUS: + case PPC::PRED_EQ_PLUS: + case PPC::PRED_EQ: + O << "eq"; + return; + case PPC::PRED_GE_MINUS: + case PPC::PRED_GE_PLUS: + case PPC::PRED_GE: + O << "ge"; + return; + case PPC::PRED_GT_MINUS: + case PPC::PRED_GT_PLUS: + case PPC::PRED_GT: + O << "gt"; + return; + case PPC::PRED_NE_MINUS: + case PPC::PRED_NE_PLUS: + case PPC::PRED_NE: + O << "ne"; + return; + case PPC::PRED_UN_MINUS: + case PPC::PRED_UN_PLUS: + case PPC::PRED_UN: + O << "un"; + return; + case PPC::PRED_NU_MINUS: + case PPC::PRED_NU_PLUS: + case PPC::PRED_NU: + O << "nu"; + return; + } + llvm_unreachable("Invalid predicate code"); + } + + if (StringRef(Modifier) == "pm") { + switch ((PPC::Predicate)Code) { + case PPC::PRED_LT: + case PPC::PRED_LE: + case PPC::PRED_EQ: + case PPC::PRED_GE: + case PPC::PRED_GT: + case PPC::PRED_NE: + case PPC::PRED_UN: + case PPC::PRED_NU: + return; + case PPC::PRED_LT_MINUS: + case PPC::PRED_LE_MINUS: + case PPC::PRED_EQ_MINUS: + case PPC::PRED_GE_MINUS: + case PPC::PRED_GT_MINUS: + case PPC::PRED_NE_MINUS: + case PPC::PRED_UN_MINUS: + case PPC::PRED_NU_MINUS: + O << "-"; + return; + case PPC::PRED_LT_PLUS: + case PPC::PRED_LE_PLUS: + case PPC::PRED_EQ_PLUS: + case PPC::PRED_GE_PLUS: + case PPC::PRED_GT_PLUS: + case PPC::PRED_NE_PLUS: + case PPC::PRED_UN_PLUS: + case PPC::PRED_NU_PLUS: + O << "+"; + return; } + llvm_unreachable("Invalid predicate code"); } assert(StringRef(Modifier) == "reg" && - "Need to specify 'cc' or 'reg' as predicate op modifier!"); + "Need to specify 'cc', 'pm' or 'reg' as predicate op modifier!"); printOperand(MI, OpNo+1, O); } @@ -137,7 +205,10 @@ void PPCInstPrinter::printS16ImmOperand(const MCInst *MI, unsigned OpNo, void PPCInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << (unsigned short)MI->getOperand(OpNo).getImm(); + if (MI->getOperand(OpNo).isImm()) + O << (unsigned short)MI->getOperand(OpNo).getImm(); + else + printOperand(MI, OpNo, O); } void PPCInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo, @@ -148,11 +219,14 @@ void PPCInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo, // Branches can take an immediate operand. This is used by the branch // selection pass to print .+8, an eight byte displacement from the PC. O << ".+"; - printAbsAddrOperand(MI, OpNo, O); + printAbsBranchOperand(MI, OpNo, O); } -void PPCInstPrinter::printAbsAddrOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { +void PPCInstPrinter::printAbsBranchOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + if (!MI->getOperand(OpNo).isImm()) + return printOperand(MI, OpNo, O); + O << (int)MI->getOperand(OpNo).getImm()*4; } @@ -199,6 +273,13 @@ void PPCInstPrinter::printMemRegReg(const MCInst *MI, unsigned OpNo, printOperand(MI, OpNo+1, O); } +void PPCInstPrinter::printTLSCall(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + printBranchOperand(MI, OpNo, O); + O << '('; + printOperand(MI, OpNo+1, O); + O << ')'; +} /// stripRegisterPrefix - This method strips the character prefix from a diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h index f64a329..8a4c03d 100644 --- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h +++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h @@ -21,15 +21,14 @@ namespace llvm { class MCOperand; class PPCInstPrinter : public MCInstPrinter { - // 0 -> AIX, 1 -> Darwin. - unsigned SyntaxVariant; + bool IsDarwin; public: PPCInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, - const MCRegisterInfo &MRI, unsigned syntaxVariant) - : MCInstPrinter(MAI, MII, MRI), SyntaxVariant(syntaxVariant) {} + const MCRegisterInfo &MRI, bool isDarwin) + : MCInstPrinter(MAI, MII, MRI), IsDarwin(isDarwin) {} bool isDarwinSyntax() const { - return SyntaxVariant == 1; + return IsDarwin; } virtual void printRegName(raw_ostream &OS, unsigned RegNo) const; @@ -51,7 +50,8 @@ public: void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printAbsAddrOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printAbsBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printTLSCall(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printcrbitm(const MCInst *MI, unsigned OpNo, raw_ostream &O); diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp index 3fa2e09..b2a8701 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -30,12 +30,13 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { case FK_Data_2: case FK_Data_4: case FK_Data_8: - case PPC::fixup_ppc_tlsreg: case PPC::fixup_ppc_nofixup: return Value; case PPC::fixup_ppc_brcond14: + case PPC::fixup_ppc_brcond14abs: return Value & 0xfffc; case PPC::fixup_ppc_br24: + case PPC::fixup_ppc_br24abs: return Value & 0x3fffffc; case PPC::fixup_ppc_half16: return Value & 0xffff; @@ -56,11 +57,12 @@ static unsigned getFixupKindNumBytes(unsigned Kind) { return 2; case FK_Data_4: case PPC::fixup_ppc_brcond14: + case PPC::fixup_ppc_brcond14abs: case PPC::fixup_ppc_br24: + case PPC::fixup_ppc_br24abs: return 4; case FK_Data_8: return 8; - case PPC::fixup_ppc_tlsreg: case PPC::fixup_ppc_nofixup: return 0; } @@ -93,9 +95,10 @@ public: // name offset bits flags { "fixup_ppc_br24", 6, 24, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_ppc_br24abs", 6, 24, 0 }, + { "fixup_ppc_brcond14abs", 16, 14, 0 }, { "fixup_ppc_half16", 0, 16, 0 }, { "fixup_ppc_half16ds", 0, 14, 0 }, - { "fixup_ppc_tlsreg", 0, 0, 0 }, { "fixup_ppc_nofixup", 0, 0, 0 } }; @@ -142,16 +145,20 @@ public: } bool writeNopData(uint64_t Count, MCObjectWriter *OW) const { - // FIXME: Zero fill for now. That's not right, but at least will get the - // section size right. - for (uint64_t i = 0; i != Count; ++i) - OW->Write8(0); + // Can't emit NOP with size not multiple of 32-bits + if (Count % 4 != 0) + return false; + + uint64_t NumNops = Count / 4; + for (uint64_t i = 0; i != NumNops; ++i) + OW->Write32(0x60000000); + return true; } unsigned getPointerSize() const { StringRef Name = TheTarget.getName(); - if (Name == "ppc64") return 8; + if (Name == "ppc64" || Name == "ppc64le") return 8; assert(Name == "ppc32" && "Unknown target name!"); return 4; } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp index 7188f93..54de70e 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp @@ -30,6 +30,11 @@ namespace { virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, bool IsPCRel, bool IsRelocWithSymbol, int64_t Addend) const; + virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm, + const MCValue &Target, + const MCFragment &F, + const MCFixup &Fixup, + bool IsPCRel) const; virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target, const MCFixup &Fixup, bool IsPCRel) const; @@ -58,11 +63,30 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, default: llvm_unreachable("Unimplemented"); case PPC::fixup_ppc_br24: + case PPC::fixup_ppc_br24abs: Type = ELF::R_PPC_REL24; break; case PPC::fixup_ppc_brcond14: + case PPC::fixup_ppc_brcond14abs: Type = ELF::R_PPC_REL14; break; + case PPC::fixup_ppc_half16: + switch (Modifier) { + default: llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_None: + Type = ELF::R_PPC_REL16; + break; + case MCSymbolRefExpr::VK_PPC_LO: + Type = ELF::R_PPC_REL16_LO; + break; + case MCSymbolRefExpr::VK_PPC_HI: + Type = ELF::R_PPC_REL16_HI; + break; + case MCSymbolRefExpr::VK_PPC_HA: + Type = ELF::R_PPC_REL16_HA; + break; + } + break; case FK_Data_4: case FK_PCRel_4: Type = ELF::R_PPC_REL32; @@ -75,60 +99,167 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, } else { switch ((unsigned)Fixup.getKind()) { default: llvm_unreachable("invalid fixup kind!"); - case PPC::fixup_ppc_br24: + case PPC::fixup_ppc_br24abs: Type = ELF::R_PPC_ADDR24; break; - case PPC::fixup_ppc_brcond14: + case PPC::fixup_ppc_brcond14abs: Type = ELF::R_PPC_ADDR14; // XXX: or BRNTAKEN?_ break; case PPC::fixup_ppc_half16: switch (Modifier) { default: llvm_unreachable("Unsupported Modifier"); - case MCSymbolRefExpr::VK_PPC_TPREL16_HA: - Type = ELF::R_PPC_TPREL16_HA; + case MCSymbolRefExpr::VK_None: + Type = ELF::R_PPC_ADDR16; break; - case MCSymbolRefExpr::VK_PPC_DTPREL16_HA: - Type = ELF::R_PPC64_DTPREL16_HA; + case MCSymbolRefExpr::VK_PPC_LO: + Type = ELF::R_PPC_ADDR16_LO; + break; + case MCSymbolRefExpr::VK_PPC_HI: + Type = ELF::R_PPC_ADDR16_HI; break; - case MCSymbolRefExpr::VK_PPC_ADDR16_HA: + case MCSymbolRefExpr::VK_PPC_HA: Type = ELF::R_PPC_ADDR16_HA; - break; - case MCSymbolRefExpr::VK_PPC_TOC16_HA: - Type = ELF::R_PPC64_TOC16_HA; break; - case MCSymbolRefExpr::VK_PPC_GOT_TPREL16_HA: - Type = ELF::R_PPC64_GOT_TPREL16_HA; + case MCSymbolRefExpr::VK_PPC_HIGHER: + Type = ELF::R_PPC64_ADDR16_HIGHER; break; - case MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_HA: - Type = ELF::R_PPC64_GOT_TLSGD16_HA; + case MCSymbolRefExpr::VK_PPC_HIGHERA: + Type = ELF::R_PPC64_ADDR16_HIGHERA; break; - case MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_HA: - Type = ELF::R_PPC64_GOT_TLSLD16_HA; + case MCSymbolRefExpr::VK_PPC_HIGHEST: + Type = ELF::R_PPC64_ADDR16_HIGHEST; break; - case MCSymbolRefExpr::VK_PPC_TPREL16_LO: - Type = ELF::R_PPC_TPREL16_LO; + case MCSymbolRefExpr::VK_PPC_HIGHESTA: + Type = ELF::R_PPC64_ADDR16_HIGHESTA; break; - case MCSymbolRefExpr::VK_PPC_DTPREL16_LO: - Type = ELF::R_PPC64_DTPREL16_LO; + case MCSymbolRefExpr::VK_GOT: + Type = ELF::R_PPC_GOT16; break; - case MCSymbolRefExpr::VK_None: - Type = ELF::R_PPC_ADDR16; + case MCSymbolRefExpr::VK_PPC_GOT_LO: + Type = ELF::R_PPC_GOT16_LO; break; - case MCSymbolRefExpr::VK_PPC_ADDR16_LO: - Type = ELF::R_PPC_ADDR16_LO; - break; - case MCSymbolRefExpr::VK_PPC_TOC_ENTRY: + case MCSymbolRefExpr::VK_PPC_GOT_HI: + Type = ELF::R_PPC_GOT16_HI; + break; + case MCSymbolRefExpr::VK_PPC_GOT_HA: + Type = ELF::R_PPC_GOT16_HA; + break; + case MCSymbolRefExpr::VK_PPC_TOC: Type = ELF::R_PPC64_TOC16; break; - case MCSymbolRefExpr::VK_PPC_TOC16_LO: + case MCSymbolRefExpr::VK_PPC_TOC_LO: Type = ELF::R_PPC64_TOC16_LO; break; - case MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_LO: + case MCSymbolRefExpr::VK_PPC_TOC_HI: + Type = ELF::R_PPC64_TOC16_HI; + break; + case MCSymbolRefExpr::VK_PPC_TOC_HA: + Type = ELF::R_PPC64_TOC16_HA; + break; + case MCSymbolRefExpr::VK_PPC_TPREL: + Type = ELF::R_PPC_TPREL16; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_LO: + Type = ELF::R_PPC_TPREL16_LO; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HI: + Type = ELF::R_PPC_TPREL16_HI; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HA: + Type = ELF::R_PPC_TPREL16_HA; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HIGHER: + Type = ELF::R_PPC64_TPREL16_HIGHER; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HIGHERA: + Type = ELF::R_PPC64_TPREL16_HIGHERA; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HIGHEST: + Type = ELF::R_PPC64_TPREL16_HIGHEST; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_HIGHESTA: + Type = ELF::R_PPC64_TPREL16_HIGHESTA; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL: + Type = ELF::R_PPC64_DTPREL16; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_LO: + Type = ELF::R_PPC64_DTPREL16_LO; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HI: + Type = ELF::R_PPC64_DTPREL16_HI; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HA: + Type = ELF::R_PPC64_DTPREL16_HA; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHER: + Type = ELF::R_PPC64_DTPREL16_HIGHER; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHERA: + Type = ELF::R_PPC64_DTPREL16_HIGHERA; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHEST: + Type = ELF::R_PPC64_DTPREL16_HIGHEST; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_HIGHESTA: + Type = ELF::R_PPC64_DTPREL16_HIGHESTA; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSGD: + Type = ELF::R_PPC64_GOT_TLSGD16; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO: Type = ELF::R_PPC64_GOT_TLSGD16_LO; break; - case MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO: + case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HI: + Type = ELF::R_PPC64_GOT_TLSGD16_HI; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA: + Type = ELF::R_PPC64_GOT_TLSGD16_HA; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSLD: + Type = ELF::R_PPC64_GOT_TLSLD16; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO: Type = ELF::R_PPC64_GOT_TLSLD16_LO; break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HI: + Type = ELF::R_PPC64_GOT_TLSLD16_HI; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA: + Type = ELF::R_PPC64_GOT_TLSLD16_HA; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL: + /* We don't have R_PPC64_GOT_TPREL16, but since GOT offsets + are always 4-aligned, we can use R_PPC64_GOT_TPREL16_DS. */ + Type = ELF::R_PPC64_GOT_TPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO: + /* We don't have R_PPC64_GOT_TPREL16_LO, but since GOT offsets + are always 4-aligned, we can use R_PPC64_GOT_TPREL16_LO_DS. */ + Type = ELF::R_PPC64_GOT_TPREL16_LO_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL_HI: + Type = ELF::R_PPC64_GOT_TPREL16_HI; + break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL: + /* We don't have R_PPC64_GOT_DTPREL16, but since GOT offsets + are always 4-aligned, we can use R_PPC64_GOT_DTPREL16_DS. */ + Type = ELF::R_PPC64_GOT_DTPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_LO: + /* We don't have R_PPC64_GOT_DTPREL16_LO, but since GOT offsets + are always 4-aligned, we can use R_PPC64_GOT_DTPREL16_LO_DS. */ + Type = ELF::R_PPC64_GOT_DTPREL16_LO_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA: + Type = ELF::R_PPC64_GOT_TPREL16_HA; + break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_HI: + Type = ELF::R_PPC64_GOT_DTPREL16_HI; + break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_HA: + Type = ELF::R_PPC64_GOT_DTPREL16_HA; + break; } break; case PPC::fixup_ppc_half16ds: @@ -137,23 +268,47 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, case MCSymbolRefExpr::VK_None: Type = ELF::R_PPC64_ADDR16_DS; break; - case MCSymbolRefExpr::VK_PPC_ADDR16_LO: + case MCSymbolRefExpr::VK_PPC_LO: Type = ELF::R_PPC64_ADDR16_LO_DS; break; - case MCSymbolRefExpr::VK_PPC_TOC_ENTRY: + case MCSymbolRefExpr::VK_GOT: + Type = ELF::R_PPC64_GOT16_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_LO: + Type = ELF::R_PPC64_GOT16_LO_DS; + break; + case MCSymbolRefExpr::VK_PPC_TOC: Type = ELF::R_PPC64_TOC16_DS; break; - case MCSymbolRefExpr::VK_PPC_TOC16_LO: + case MCSymbolRefExpr::VK_PPC_TOC_LO: Type = ELF::R_PPC64_TOC16_LO_DS; break; - case MCSymbolRefExpr::VK_PPC_GOT_TPREL16_LO: + case MCSymbolRefExpr::VK_PPC_TPREL: + Type = ELF::R_PPC64_TPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_TPREL_LO: + Type = ELF::R_PPC64_TPREL16_LO_DS; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL: + Type = ELF::R_PPC64_DTPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL_LO: + Type = ELF::R_PPC64_DTPREL16_LO_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL: + Type = ELF::R_PPC64_GOT_TPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO: Type = ELF::R_PPC64_GOT_TPREL16_LO_DS; break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL: + Type = ELF::R_PPC64_GOT_DTPREL16_DS; + break; + case MCSymbolRefExpr::VK_PPC_GOT_DTPREL_LO: + Type = ELF::R_PPC64_GOT_DTPREL16_LO_DS; + break; } break; - case PPC::fixup_ppc_tlsreg: - Type = ELF::R_PPC64_TLS; - break; case PPC::fixup_ppc_nofixup: switch (Modifier) { default: llvm_unreachable("Unsupported Modifier"); @@ -163,17 +318,29 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, case MCSymbolRefExpr::VK_PPC_TLSLD: Type = ELF::R_PPC64_TLSLD; break; + case MCSymbolRefExpr::VK_PPC_TLS: + Type = ELF::R_PPC64_TLS; + break; } break; case FK_Data_8: switch (Modifier) { default: llvm_unreachable("Unsupported Modifier"); - case MCSymbolRefExpr::VK_PPC_TOC: + case MCSymbolRefExpr::VK_PPC_TOCBASE: Type = ELF::R_PPC64_TOC; break; case MCSymbolRefExpr::VK_None: Type = ELF::R_PPC64_ADDR64; break; + case MCSymbolRefExpr::VK_PPC_DTPMOD: + Type = ELF::R_PPC64_DTPMOD64; + break; + case MCSymbolRefExpr::VK_PPC_TPREL: + Type = ELF::R_PPC64_TPREL64; + break; + case MCSymbolRefExpr::VK_PPC_DTPREL: + Type = ELF::R_PPC64_DTPREL64; + break; } break; case FK_Data_4: @@ -195,6 +362,35 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target, return getRelocTypeInner(Target, Fixup, IsPCRel); } +const MCSymbol *PPCELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm, + const MCValue &Target, + const MCFragment &F, + const MCFixup &Fixup, + bool IsPCRel) const { + assert(Target.getSymA() && "SymA cannot be 0"); + MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ? + MCSymbolRefExpr::VK_None : Target.getSymA()->getKind(); + + bool EmitThisSym; + switch (Modifier) { + // GOT references always need a relocation, even if the + // target symbol is local. + case MCSymbolRefExpr::VK_GOT: + case MCSymbolRefExpr::VK_PPC_GOT_LO: + case MCSymbolRefExpr::VK_PPC_GOT_HI: + case MCSymbolRefExpr::VK_PPC_GOT_HA: + EmitThisSym = true; + break; + default: + EmitThisSym = false; + break; + } + + if (EmitThisSym) + return &Target.getSymA()->getSymbol().AliasedSymbol(); + return NULL; +} + const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Target, const MCFixup &Fixup, bool IsPCRel) const { diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h index 3ea59f0..68de8c1 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h @@ -25,6 +25,14 @@ enum Fixups { /// branches. fixup_ppc_brcond14, + /// fixup_ppc_br24abs - 24-bit absolute relocation for direct branches + /// like 'ba' and 'bla'. + fixup_ppc_br24abs, + + /// fixup_ppc_brcond14abs - 14-bit absolute relocation for conditional + /// branches. + fixup_ppc_brcond14abs, + /// fixup_ppc_half16 - A 16-bit fixup corresponding to lo16(_foo) /// or ha16(_foo) for instrs like 'li' or 'addis'. fixup_ppc_half16, @@ -33,11 +41,9 @@ enum Fixups { /// implied 2 zero bits for instrs like 'std'. fixup_ppc_half16ds, - /// fixup_ppc_tlsreg - Insert thread-pointer register number. - fixup_ppc_tlsreg, - /// fixup_ppc_nofixup - Not a true fixup, but ties a symbol to a call - /// to __tls_get_addr for the TLS general and local dynamic models. + /// to __tls_get_addr for the TLS general and local dynamic models, + /// or inserts the thread-pointer register number. fixup_ppc_nofixup, // Marker diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index bb7ce6f..6822507 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -66,6 +66,6 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) { ZeroDirective = "\t.space\t"; Data64bitsDirective = is64Bit ? "\t.quad\t" : 0; - AssemblerDialect = 0; // Old-Style mnemonics. + AssemblerDialect = 1; // New-Style mnemonics. } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index 31c73ae..59ba9c4 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -48,7 +48,11 @@ public: SmallVectorImpl<MCFixup> &Fixups) const; unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; - unsigned getS16ImmEncoding(const MCInst &MI, unsigned OpNo, + unsigned getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getImm16Encoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; unsigned getMemRIEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; @@ -56,6 +60,8 @@ public: SmallVectorImpl<MCFixup> &Fixups) const; unsigned getTLSRegEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getTLSCallEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; unsigned get_crbitm_encoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; @@ -76,7 +82,7 @@ public: unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value! unsigned Opcode = MI.getOpcode(); if (Opcode == PPC::BL8_NOP || Opcode == PPC::BLA8_NOP || - Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD) + Opcode == PPC::BL8_NOP_TLS) Size = 8; // Output the constant in big endian byte order. @@ -109,17 +115,6 @@ getDirectBrEncoding(const MCInst &MI, unsigned OpNo, // Add a fixup for the branch target. Fixups.push_back(MCFixup::Create(0, MO.getExpr(), (MCFixupKind)PPC::fixup_ppc_br24)); - - // For special TLS calls, add another fixup for the symbol. Apparently - // BL8_NOP, BL8_NOP_TLSGD, and BL8_NOP_TLSLD are sufficiently - // similar that TblGen will not generate a separate case for the latter - // two, so this is the only way to get the extra fixup generated. - unsigned Opcode = MI.getOpcode(); - if (Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD) { - const MCOperand &MO2 = MI.getOperand(OpNo+1); - Fixups.push_back(MCFixup::Create(0, MO2.getExpr(), - (MCFixupKind)PPC::fixup_ppc_nofixup)); - } return 0; } @@ -134,12 +129,36 @@ unsigned PPCMCCodeEmitter::getCondBrEncoding(const MCInst &MI, unsigned OpNo, return 0; } -unsigned PPCMCCodeEmitter::getS16ImmEncoding(const MCInst &MI, unsigned OpNo, +unsigned PPCMCCodeEmitter:: +getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups); + + // Add a fixup for the branch target. + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_br24abs)); + return 0; +} + +unsigned PPCMCCodeEmitter:: +getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups); + + // Add a fixup for the branch target. + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_brcond14abs)); + return 0; +} + +unsigned PPCMCCodeEmitter::getImm16Encoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups); - // Add a fixup for the branch target. + // Add a fixup for the immediate field. Fixups.push_back(MCFixup::Create(2, MO.getExpr(), (MCFixupKind)PPC::fixup_ppc_half16)); return 0; @@ -190,19 +209,29 @@ unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo, // hint to the linker that this statement is part of a relocation sequence. // Return the thread-pointer register's encoding. Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_tlsreg)); - return CTX.getRegisterInfo().getEncodingValue(PPC::X13); + (MCFixupKind)PPC::fixup_ppc_nofixup)); + return CTX.getRegisterInfo()->getEncodingValue(PPC::X13); +} + +unsigned PPCMCCodeEmitter::getTLSCallEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + // For special TLS calls, we need two fixups; one for the branch target + // (__tls_get_addr), which we create via getDirectBrEncoding as usual, + // and one for the TLSGD or TLSLD symbol, which is emitted here. + const MCOperand &MO = MI.getOperand(OpNo+1); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_nofixup)); + return getDirectBrEncoding(MI, OpNo, Fixups); } unsigned PPCMCCodeEmitter:: get_crbitm_encoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); - assert((MI.getOpcode() == PPC::MTCRF || - MI.getOpcode() == PPC::MFOCRF || - MI.getOpcode() == PPC::MTCRF8) && + assert((MI.getOpcode() == PPC::MTOCRF || MI.getOpcode() == PPC::MTOCRF8 || + MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MFOCRF8) && (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)); - return 0x80 >> CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + return 0x80 >> CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); } @@ -210,11 +239,12 @@ unsigned PPCMCCodeEmitter:: getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) { - // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand. + // MTOCRF/MFOCRF should go through get_crbitm_encoding for the CR operand. // The GPR operand should come through here though. - assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) || + assert((MI.getOpcode() != PPC::MTOCRF && MI.getOpcode() != PPC::MTOCRF8 && + MI.getOpcode() != PPC::MFOCRF && MI.getOpcode() != PPC::MFOCRF8) || MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7); - return CTX.getRegisterInfo().getEncodingValue(MO.getReg()); + return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); } assert(MO.isImm() && diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp index f0613ff..9529267 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp @@ -11,25 +11,42 @@ #include "PPCMCExpr.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCAsmInfo.h" using namespace llvm; const PPCMCExpr* PPCMCExpr::Create(VariantKind Kind, const MCExpr *Expr, - MCContext &Ctx) { - return new (Ctx) PPCMCExpr(Kind, Expr); + bool isDarwin, MCContext &Ctx) { + return new (Ctx) PPCMCExpr(Kind, Expr, isDarwin); } void PPCMCExpr::PrintImpl(raw_ostream &OS) const { - switch (Kind) { - default: llvm_unreachable("Invalid kind!"); - case VK_PPC_HA16: OS << "ha16"; break; - case VK_PPC_LO16: OS << "lo16"; break; - } + if (isDarwinSyntax()) { + switch (Kind) { + default: llvm_unreachable("Invalid kind!"); + case VK_PPC_LO: OS << "lo16"; break; + case VK_PPC_HI: OS << "hi16"; break; + case VK_PPC_HA: OS << "ha16"; break; + } + + OS << '('; + getSubExpr()->print(OS); + OS << ')'; + } else { + getSubExpr()->print(OS); - OS << '('; - getSubExpr()->print(OS); - OS << ')'; + switch (Kind) { + default: llvm_unreachable("Invalid kind!"); + case VK_PPC_LO: OS << "@l"; break; + case VK_PPC_HI: OS << "@h"; break; + case VK_PPC_HA: OS << "@ha"; break; + case VK_PPC_HIGHER: OS << "@higher"; break; + case VK_PPC_HIGHERA: OS << "@highera"; break; + case VK_PPC_HIGHEST: OS << "@highest"; break; + case VK_PPC_HIGHESTA: OS << "@highesta"; break; + } + } } bool @@ -45,12 +62,27 @@ PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res, switch (Kind) { default: llvm_unreachable("Invalid kind!"); - case VK_PPC_HA16: - Result = ((Result >> 16) + ((Result & 0x8000) ? 1 : 0)) & 0xffff; - break; - case VK_PPC_LO16: + case VK_PPC_LO: Result = Result & 0xffff; break; + case VK_PPC_HI: + Result = (Result >> 16) & 0xffff; + break; + case VK_PPC_HA: + Result = ((Result + 0x8000) >> 16) & 0xffff; + break; + case VK_PPC_HIGHER: + Result = (Result >> 32) & 0xffff; + break; + case VK_PPC_HIGHERA: + Result = ((Result + 0x8000) >> 32) & 0xffff; + break; + case VK_PPC_HIGHEST: + Result = (Result >> 48) & 0xffff; + break; + case VK_PPC_HIGHESTA: + Result = ((Result + 0x8000) >> 48) & 0xffff; + break; } Res = MCValue::get(Result); } else { @@ -62,11 +94,26 @@ PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res, switch (Kind) { default: llvm_unreachable("Invalid kind!"); - case VK_PPC_HA16: - Modifier = MCSymbolRefExpr::VK_PPC_ADDR16_HA; + case VK_PPC_LO: + Modifier = MCSymbolRefExpr::VK_PPC_LO; + break; + case VK_PPC_HI: + Modifier = MCSymbolRefExpr::VK_PPC_HI; + break; + case VK_PPC_HA: + Modifier = MCSymbolRefExpr::VK_PPC_HA; + break; + case VK_PPC_HIGHERA: + Modifier = MCSymbolRefExpr::VK_PPC_HIGHERA; + break; + case VK_PPC_HIGHER: + Modifier = MCSymbolRefExpr::VK_PPC_HIGHER; + break; + case VK_PPC_HIGHEST: + Modifier = MCSymbolRefExpr::VK_PPC_HIGHEST; break; - case VK_PPC_LO16: - Modifier = MCSymbolRefExpr::VK_PPC_ADDR16_LO; + case VK_PPC_HIGHESTA: + Modifier = MCSymbolRefExpr::VK_PPC_HIGHESTA; break; } Sym = MCSymbolRefExpr::Create(&Sym->getSymbol(), Modifier, Context); diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h index a080537..e44c7c1 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h @@ -20,30 +20,44 @@ class PPCMCExpr : public MCTargetExpr { public: enum VariantKind { VK_PPC_None, - VK_PPC_HA16, - VK_PPC_LO16 + VK_PPC_LO, + VK_PPC_HI, + VK_PPC_HA, + VK_PPC_HIGHER, + VK_PPC_HIGHERA, + VK_PPC_HIGHEST, + VK_PPC_HIGHESTA }; private: const VariantKind Kind; const MCExpr *Expr; + bool IsDarwin; - explicit PPCMCExpr(VariantKind _Kind, const MCExpr *_Expr) - : Kind(_Kind), Expr(_Expr) {} + explicit PPCMCExpr(VariantKind _Kind, const MCExpr *_Expr, + bool _IsDarwin) + : Kind(_Kind), Expr(_Expr), IsDarwin(_IsDarwin) {} public: /// @name Construction /// @{ static const PPCMCExpr *Create(VariantKind Kind, const MCExpr *Expr, - MCContext &Ctx); + bool isDarwin, MCContext &Ctx); - static const PPCMCExpr *CreateHa16(const MCExpr *Expr, MCContext &Ctx) { - return Create(VK_PPC_HA16, Expr, Ctx); + static const PPCMCExpr *CreateLo(const MCExpr *Expr, + bool isDarwin, MCContext &Ctx) { + return Create(VK_PPC_LO, Expr, isDarwin, Ctx); } - static const PPCMCExpr *CreateLo16(const MCExpr *Expr, MCContext &Ctx) { - return Create(VK_PPC_LO16, Expr, Ctx); + static const PPCMCExpr *CreateHi(const MCExpr *Expr, + bool isDarwin, MCContext &Ctx) { + return Create(VK_PPC_HI, Expr, isDarwin, Ctx); + } + + static const PPCMCExpr *CreateHa(const MCExpr *Expr, + bool isDarwin, MCContext &Ctx) { + return Create(VK_PPC_HA, Expr, isDarwin, Ctx); } /// @} @@ -56,6 +70,10 @@ public: /// getSubExpr - Get the child of this expression. const MCExpr *getSubExpr() const { return Expr; } + /// isDarwinSyntax - True if expression is to be printed using Darwin syntax. + bool isDarwinSyntax() const { return IsDarwin; } + + /// @} void PrintImpl(raw_ostream &OS) const; diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp index 2da30f9..5f7a39a 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -42,7 +42,8 @@ static MCInstrInfo *createPPCMCInstrInfo() { static MCRegisterInfo *createPPCMCRegisterInfo(StringRef TT) { Triple TheTriple(TT); - bool isPPC64 = (TheTriple.getArch() == Triple::ppc64); + bool isPPC64 = (TheTriple.getArch() == Triple::ppc64 || + TheTriple.getArch() == Triple::ppc64le); unsigned Flavour = isPPC64 ? 0 : 1; unsigned RA = isPPC64 ? PPC::LR8 : PPC::LR; @@ -60,7 +61,8 @@ static MCSubtargetInfo *createPPCMCSubtargetInfo(StringRef TT, StringRef CPU, static MCAsmInfo *createPPCMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { Triple TheTriple(TT); - bool isPPC64 = TheTriple.getArch() == Triple::ppc64; + bool isPPC64 = (TheTriple.getArch() == Triple::ppc64 || + TheTriple.getArch() == Triple::ppc64le); MCAsmInfo *MAI; if (TheTriple.isOSDarwin()) @@ -91,7 +93,8 @@ static MCCodeGenInfo *createPPCMCCodeGenInfo(StringRef TT, Reloc::Model RM, } if (CM == CodeModel::Default) { Triple T(TT); - if (!T.isOSDarwin() && T.getArch() == Triple::ppc64) + if (!T.isOSDarwin() && + (T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le)) CM = CodeModel::Medium; } X->InitMCCodeGenInfo(RM, CM, OL); @@ -117,45 +120,60 @@ static MCInstPrinter *createPPCMCInstPrinter(const Target &T, const MCInstrInfo &MII, const MCRegisterInfo &MRI, const MCSubtargetInfo &STI) { - return new PPCInstPrinter(MAI, MII, MRI, SyntaxVariant); + bool isDarwin = Triple(STI.getTargetTriple()).isOSDarwin(); + return new PPCInstPrinter(MAI, MII, MRI, isDarwin); } extern "C" void LLVMInitializePowerPCTargetMC() { // Register the MC asm info. RegisterMCAsmInfoFn C(ThePPC32Target, createPPCMCAsmInfo); RegisterMCAsmInfoFn D(ThePPC64Target, createPPCMCAsmInfo); + RegisterMCAsmInfoFn E(ThePPC64LETarget, createPPCMCAsmInfo); // Register the MC codegen info. TargetRegistry::RegisterMCCodeGenInfo(ThePPC32Target, createPPCMCCodeGenInfo); TargetRegistry::RegisterMCCodeGenInfo(ThePPC64Target, createPPCMCCodeGenInfo); + TargetRegistry::RegisterMCCodeGenInfo(ThePPC64LETarget, + createPPCMCCodeGenInfo); // Register the MC instruction info. TargetRegistry::RegisterMCInstrInfo(ThePPC32Target, createPPCMCInstrInfo); TargetRegistry::RegisterMCInstrInfo(ThePPC64Target, createPPCMCInstrInfo); + TargetRegistry::RegisterMCInstrInfo(ThePPC64LETarget, + createPPCMCInstrInfo); // Register the MC register info. TargetRegistry::RegisterMCRegInfo(ThePPC32Target, createPPCMCRegisterInfo); TargetRegistry::RegisterMCRegInfo(ThePPC64Target, createPPCMCRegisterInfo); + TargetRegistry::RegisterMCRegInfo(ThePPC64LETarget, createPPCMCRegisterInfo); // Register the MC subtarget info. TargetRegistry::RegisterMCSubtargetInfo(ThePPC32Target, createPPCMCSubtargetInfo); TargetRegistry::RegisterMCSubtargetInfo(ThePPC64Target, createPPCMCSubtargetInfo); + TargetRegistry::RegisterMCSubtargetInfo(ThePPC64LETarget, + createPPCMCSubtargetInfo); // Register the MC Code Emitter TargetRegistry::RegisterMCCodeEmitter(ThePPC32Target, createPPCMCCodeEmitter); TargetRegistry::RegisterMCCodeEmitter(ThePPC64Target, createPPCMCCodeEmitter); + TargetRegistry::RegisterMCCodeEmitter(ThePPC64LETarget, + createPPCMCCodeEmitter); // Register the asm backend. TargetRegistry::RegisterMCAsmBackend(ThePPC32Target, createPPCAsmBackend); TargetRegistry::RegisterMCAsmBackend(ThePPC64Target, createPPCAsmBackend); + TargetRegistry::RegisterMCAsmBackend(ThePPC64LETarget, createPPCAsmBackend); // Register the object streamer. TargetRegistry::RegisterMCObjectStreamer(ThePPC32Target, createMCStreamer); TargetRegistry::RegisterMCObjectStreamer(ThePPC64Target, createMCStreamer); + TargetRegistry::RegisterMCObjectStreamer(ThePPC64LETarget, createMCStreamer); // Register the MCInstPrinter. TargetRegistry::RegisterMCInstPrinter(ThePPC32Target, createPPCMCInstPrinter); TargetRegistry::RegisterMCInstPrinter(ThePPC64Target, createPPCMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(ThePPC64LETarget, + createPPCMCInstPrinter); } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h index 38a7420..9f29132 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h @@ -33,6 +33,7 @@ class raw_ostream; extern Target ThePPC32Target; extern Target ThePPC64Target; +extern Target ThePPC64LETarget; MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp index 853e505..63facc5 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp @@ -26,6 +26,22 @@ PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) { case PPC::PRED_LE: return PPC::PRED_GT; case PPC::PRED_NU: return PPC::PRED_UN; case PPC::PRED_UN: return PPC::PRED_NU; + case PPC::PRED_EQ_MINUS: return PPC::PRED_NE_PLUS; + case PPC::PRED_NE_MINUS: return PPC::PRED_EQ_PLUS; + case PPC::PRED_LT_MINUS: return PPC::PRED_GE_PLUS; + case PPC::PRED_GE_MINUS: return PPC::PRED_LT_PLUS; + case PPC::PRED_GT_MINUS: return PPC::PRED_LE_PLUS; + case PPC::PRED_LE_MINUS: return PPC::PRED_GT_PLUS; + case PPC::PRED_NU_MINUS: return PPC::PRED_UN_PLUS; + case PPC::PRED_UN_MINUS: return PPC::PRED_NU_PLUS; + case PPC::PRED_EQ_PLUS: return PPC::PRED_NE_MINUS; + case PPC::PRED_NE_PLUS: return PPC::PRED_EQ_MINUS; + case PPC::PRED_LT_PLUS: return PPC::PRED_GE_MINUS; + case PPC::PRED_GE_PLUS: return PPC::PRED_LT_MINUS; + case PPC::PRED_GT_PLUS: return PPC::PRED_LE_MINUS; + case PPC::PRED_LE_PLUS: return PPC::PRED_GT_MINUS; + case PPC::PRED_NU_PLUS: return PPC::PRED_UN_MINUS; + case PPC::PRED_UN_PLUS: return PPC::PRED_NU_MINUS; } llvm_unreachable("Unknown PPC branch opcode!"); } @@ -40,6 +56,22 @@ PPC::Predicate PPC::getSwappedPredicate(PPC::Predicate Opcode) { case PPC::PRED_LE: return PPC::PRED_GE; case PPC::PRED_NU: return PPC::PRED_NU; case PPC::PRED_UN: return PPC::PRED_UN; + case PPC::PRED_EQ_MINUS: return PPC::PRED_EQ_MINUS; + case PPC::PRED_NE_MINUS: return PPC::PRED_NE_MINUS; + case PPC::PRED_LT_MINUS: return PPC::PRED_GT_MINUS; + case PPC::PRED_GE_MINUS: return PPC::PRED_LE_MINUS; + case PPC::PRED_GT_MINUS: return PPC::PRED_LT_MINUS; + case PPC::PRED_LE_MINUS: return PPC::PRED_GE_MINUS; + case PPC::PRED_NU_MINUS: return PPC::PRED_NU_MINUS; + case PPC::PRED_UN_MINUS: return PPC::PRED_UN_MINUS; + case PPC::PRED_EQ_PLUS: return PPC::PRED_EQ_PLUS; + case PPC::PRED_NE_PLUS: return PPC::PRED_NE_PLUS; + case PPC::PRED_LT_PLUS: return PPC::PRED_GT_PLUS; + case PPC::PRED_GE_PLUS: return PPC::PRED_LE_PLUS; + case PPC::PRED_GT_PLUS: return PPC::PRED_LT_PLUS; + case PPC::PRED_LE_PLUS: return PPC::PRED_GE_PLUS; + case PPC::PRED_NU_PLUS: return PPC::PRED_NU_PLUS; + case PPC::PRED_UN_PLUS: return PPC::PRED_UN_PLUS; } llvm_unreachable("Unknown PPC branch opcode!"); } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h index 3ab9005..d498c2f 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h @@ -25,15 +25,30 @@ namespace llvm { namespace PPC { /// Predicate - These are "(BI << 5) | BO" for various predicates. enum Predicate { - PRED_LT = (0 << 5) | 12, - PRED_LE = (1 << 5) | 4, - PRED_EQ = (2 << 5) | 12, - PRED_GE = (0 << 5) | 4, - PRED_GT = (1 << 5) | 12, - PRED_NE = (2 << 5) | 4, - PRED_UN = (3 << 5) | 12, - PRED_NU = (3 << 5) | 4, - PRED_BAD = 0 + PRED_LT = (0 << 5) | 12, + PRED_LE = (1 << 5) | 4, + PRED_EQ = (2 << 5) | 12, + PRED_GE = (0 << 5) | 4, + PRED_GT = (1 << 5) | 12, + PRED_NE = (2 << 5) | 4, + PRED_UN = (3 << 5) | 12, + PRED_NU = (3 << 5) | 4, + PRED_LT_MINUS = (0 << 5) | 14, + PRED_LE_MINUS = (1 << 5) | 6, + PRED_EQ_MINUS = (2 << 5) | 14, + PRED_GE_MINUS = (0 << 5) | 6, + PRED_GT_MINUS = (1 << 5) | 14, + PRED_NE_MINUS = (2 << 5) | 6, + PRED_UN_MINUS = (3 << 5) | 14, + PRED_NU_MINUS = (3 << 5) | 6, + PRED_LT_PLUS = (0 << 5) | 15, + PRED_LE_PLUS = (1 << 5) | 7, + PRED_EQ_PLUS = (2 << 5) | 15, + PRED_GE_PLUS = (0 << 5) | 7, + PRED_GT_PLUS = (1 << 5) | 15, + PRED_NE_PLUS = (2 << 5) | 7, + PRED_UN_PLUS = (3 << 5) | 15, + PRED_NU_PLUS = (3 << 5) | 7 }; /// Invert the specified predicate. != -> ==, < -> >=. diff --git a/lib/Target/PowerPC/Makefile b/lib/Target/PowerPC/Makefile index 6666694..21fdcd9 100644 --- a/lib/Target/PowerPC/Makefile +++ b/lib/Target/PowerPC/Makefile @@ -16,7 +16,7 @@ BUILT_SOURCES = PPCGenRegisterInfo.inc PPCGenAsmMatcher.inc \ PPCGenAsmWriter.inc PPCGenCodeEmitter.inc \ PPCGenInstrInfo.inc PPCGenDAGISel.inc \ PPCGenSubtargetInfo.inc PPCGenCallingConv.inc \ - PPCGenMCCodeEmitter.inc + PPCGenMCCodeEmitter.inc PPCGenFastISel.inc DIRS = AsmParser InstPrinter TargetInfo MCTargetDesc diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h index 2e79610..f0d5af2 100644 --- a/lib/Target/PowerPC/PPC.h +++ b/lib/Target/PowerPC/PPC.h @@ -74,18 +74,21 @@ namespace llvm { /// The next are not flags but distinct values. MO_ACCESS_MASK = 0xf0, - /// MO_LO16, MO_HA16 - lo16(symbol) and ha16(symbol) - MO_LO16 = 1 << 4, - MO_HA16 = 2 << 4, + /// MO_LO, MO_HA - lo16(symbol) and ha16(symbol) + MO_LO = 1 << 4, + MO_HA = 2 << 4, - MO_TPREL16_HA = 3 << 4, - MO_TPREL16_LO = 4 << 4, + MO_TPREL_LO = 4 << 4, + MO_TPREL_HA = 3 << 4, /// These values identify relocations on immediates folded /// into memory operations. - MO_DTPREL16_LO = 5 << 4, - MO_TLSLD16_LO = 6 << 4, - MO_TOC16_LO = 7 << 4 + MO_DTPREL_LO = 5 << 4, + MO_TLSLD_LO = 6 << 4, + MO_TOC_LO = 7 << 4, + + // Symbol for VK_PPC_TLS fixup attached to an ADD instruction + MO_TLS = 8 << 4 }; } // end namespace PPCII diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td index eb73c67..806822c 100644 --- a/lib/Target/PowerPC/PPC.td +++ b/lib/Target/PowerPC/PPC.td @@ -252,6 +252,11 @@ def : ProcessorModel<"ppc64", G5Model, FeatureMFOCRF, FeatureFSqrt, FeatureFRES, FeatureFRSQRTE, FeatureSTFIWX, Feature64Bit /*, Feature64BitRegs */]>; +def : ProcessorModel<"ppc64le", G5Model, + [Directive64, FeatureAltivec, + FeatureMFOCRF, FeatureFSqrt, FeatureFRES, + FeatureFRSQRTE, FeatureSTFIWX, + Feature64Bit /*, Feature64BitRegs */]>; //===----------------------------------------------------------------------===// // Calling Conventions @@ -272,10 +277,20 @@ def PPCAsmParser : AsmParser { let ShouldEmitMatchRegisterName = 0; } +def PPCAsmParserVariant : AsmParserVariant { + int Variant = 0; + + // We do not use hard coded registers in asm strings. However, some + // InstAlias definitions use immediate literals. Set RegisterPrefix + // so that those are not misinterpreted as registers. + string RegisterPrefix = "%"; +} + def PPC : Target { // Information about the instructions. let InstructionSet = PPCInstrInfo; let AssemblyWriters = [PPCAsmWriter]; let AssemblyParsers = [PPCAsmParser]; + let AssemblyParserVariants = [PPCAsmParserVariant]; } diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index c43b5c9..bbfad87 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -86,18 +86,6 @@ namespace { bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, const char *ExtraCode, raw_ostream &O); - - MachineLocation getDebugValueLocation(const MachineInstr *MI) const { - MachineLocation Location; - assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!"); - // Frame address. Currently handles register +- offset only. - if (MI->getOperand(0).isReg() && MI->getOperand(2).isImm()) - Location.set(MI->getOperand(0).getReg(), MI->getOperand(2).getImm()); - else { - DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n"); - } - return Location; - } }; /// PPCLinuxAsmPrinter - PowerPC assembly printer, customized for Linux @@ -340,28 +328,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { // Lower multi-instruction pseudo operations. switch (MI->getOpcode()) { default: break; - case TargetOpcode::DBG_VALUE: { - if (!isVerbose() || !OutStreamer.hasRawTextSupport()) return; - - SmallString<32> Str; - raw_svector_ostream O(Str); - unsigned NOps = MI->getNumOperands(); - assert(NOps==4); - O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - O << V.getName(); - O << " <- "; - // Frame address. Currently handles register +- offset only. - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm()); - O << '['; printOperand(MI, 0, O); O << '+'; printOperand(MI, 1, O); - O << ']'; - O << "+"; - printOperand(MI, NOps-2, O); - OutStreamer.EmitRawText(O.str()); - return; - } - + case TargetOpcode::DBG_VALUE: + llvm_unreachable("Should be handled target independently"); case PPC::MovePCtoLR: case PPC::MovePCtoLR8: { // Transform %LR = MovePCtoLR @@ -404,7 +372,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = - MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC_ENTRY, + MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -449,7 +417,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_HA, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_HA, OutContext); TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -486,7 +454,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } const MCExpr *Exp = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -522,7 +490,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO, OutContext); TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -536,7 +504,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymGotTprel = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL16_HA, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) @@ -554,7 +522,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *Exp = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -568,7 +536,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymGotTlsGD = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_HA, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) @@ -584,7 +552,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymGotTlsGD = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) @@ -594,7 +562,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::GETtlsADDR: { // Transform: %X3 = GETtlsADDR %X3, <ga:@sym> - // Into: BL8_NOP_TLSGD __tls_get_addr(sym@tlsgd) + // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsgd) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; @@ -607,7 +575,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSGD) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS) .addExpr(TlsRef) .addExpr(SymVar)); return; @@ -620,7 +588,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymGotTlsLD = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_HA, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) @@ -636,7 +604,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymGotTlsLD = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) @@ -646,7 +614,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::GETtlsldADDR: { // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym> - // Into: BL8_NOP_TLSLD __tls_get_addr(sym@tlsld) + // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsld) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; @@ -659,7 +627,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSLD) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS) .addExpr(TlsRef) .addExpr(SymVar)); return; @@ -672,7 +640,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymDtprel = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL16_HA, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) @@ -688,7 +656,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); const MCExpr *SymDtprel = - MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL16_LO, + MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) @@ -696,21 +664,62 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { .addExpr(SymDtprel)); return; } - case PPC::MFCRpseud: - case PPC::MFCR8pseud: - // Transform: %R3 = MFCRpseud %CR7 - // Into: %R3 = MFCR ;; cr7 - OutStreamer.AddComment(PPCInstPrinter:: - getRegisterName(MI->getOperand(1).getReg())); - OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::MFCR8 : PPC::MFCR) - .addReg(MI->getOperand(0).getReg())); - return; + case PPC::MFOCRF: + case PPC::MFOCRF8: + if (!Subtarget.hasMFOCRF()) { + // Transform: %R3 = MFOCRF %CR7 + // Into: %R3 = MFCR ;; cr7 + unsigned NewOpcode = + MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8; + OutStreamer.AddComment(PPCInstPrinter:: + getRegisterName(MI->getOperand(1).getReg())); + OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode) + .addReg(MI->getOperand(0).getReg())); + return; + } + break; + case PPC::MTOCRF: + case PPC::MTOCRF8: + if (!Subtarget.hasMFOCRF()) { + // Transform: %CR7 = MTOCRF %R3 + // Into: MTCRF mask, %R3 ;; cr7 + unsigned NewOpcode = + MI->getOpcode() == PPC::MTOCRF ? PPC::MTCRF : PPC::MTCRF8; + unsigned Mask = 0x80 >> OutContext.getRegisterInfo() + ->getEncodingValue(MI->getOperand(0).getReg()); + OutStreamer.AddComment(PPCInstPrinter:: + getRegisterName(MI->getOperand(0).getReg())); + OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode) + .addImm(Mask) + .addReg(MI->getOperand(1).getReg())); + return; + } + break; case PPC::SYNC: // In Book E sync is called msync, handle this special case here... if (Subtarget.isBookE()) { OutStreamer.EmitRawText(StringRef("\tmsync")); return; } + break; + case PPC::LD: + case PPC::STD: + case PPC::LWA: { + // Verify alignment is legal, so we don't create relocations + // that can't be supported. + // FIXME: This test is currently disabled for Darwin. The test + // suite shows a handful of test cases that fail this check for + // Darwin. Those need to be investigated before this sanity test + // can be enabled for those subtargets. + if (!Subtarget.isDarwin()) { + unsigned OpNum = (MI->getOpcode() == PPC::STD) ? 2 : 1; + const MachineOperand &MO = MI->getOperand(OpNum); + if (MO.isGlobal() && MO.getGlobal()->getAlignment() < 4) + llvm_unreachable("Global must be word-aligned for LD, STD, LWA!"); + } + // Now process the instruction normally. + break; + } } LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); @@ -738,7 +747,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC.")); // Generates a R_PPC64_TOC relocation for TOC base insertion. OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2, - MCSymbolRefExpr::VK_PPC_TOC, OutContext), + MCSymbolRefExpr::VK_PPC_TOCBASE, OutContext), 8/*size*/); // Emit a null environment pointer. OutStreamer.EmitIntValue(0, 8 /* size */); @@ -782,7 +791,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) { // .long _foo OutStreamer.EmitValue(MCSymbolRefExpr::Create(Stubs[i].second.getPointer(), OutContext), - isPPC64 ? 8 : 4/*size*/, 0/*addrspace*/); + isPPC64 ? 8 : 4/*size*/); } Stubs.clear(); @@ -830,7 +839,8 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) { "power6", "power6x", "power7", - "ppc64" + "ppc64", + "ppc64le" }; unsigned Directive = Subtarget.getDarwinDirective(); @@ -844,7 +854,7 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) { // FIXME: This is a total hack, finish mc'izing the PPC backend. if (OutStreamer.hasRawTextSupport()) { - assert(Directive < sizeof(CPUDirectives) / sizeof(*CPUDirectives) && + assert(Directive < array_lengthof(CPUDirectives) && "CPUDirectives[] might not be up-to-date!"); OutStreamer.EmitRawText("\t.machine " + Twine(CPUDirectives[Directive])); } @@ -884,6 +894,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) { void PPCDarwinAsmPrinter:: EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64; + bool isDarwin = Subtarget.isDarwin(); const TargetLoweringObjectFileMachO &TLOFMacho = static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering()); @@ -923,7 +934,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { // mflr r11 OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R11)); // addis r11, r11, ha16(LazyPtr - AnonSymbol) - const MCExpr *SubHa16 = PPCMCExpr::CreateHa16(Sub, OutContext); + const MCExpr *SubHa16 = PPCMCExpr::CreateHa(Sub, isDarwin, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS) .addReg(PPC::R11) .addReg(PPC::R11) @@ -933,7 +944,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { // ldu r12, lo16(LazyPtr - AnonSymbol)(r11) // lwzu r12, lo16(LazyPtr - AnonSymbol)(r11) - const MCExpr *SubLo16 = PPCMCExpr::CreateLo16(Sub, OutContext); + const MCExpr *SubLo16 = PPCMCExpr::CreateLo(Sub, isDarwin, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU) .addReg(PPC::R12) .addExpr(SubLo16).addExpr(SubLo16) @@ -978,14 +989,16 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { OutStreamer.EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol); // lis r11, ha16(LazyPtr) - const MCExpr *LazyPtrHa16 = PPCMCExpr::CreateHa16(LazyPtrExpr, OutContext); + const MCExpr *LazyPtrHa16 = + PPCMCExpr::CreateHa(LazyPtrExpr, isDarwin, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::LIS) .addReg(PPC::R11) .addExpr(LazyPtrHa16)); // ldu r12, lo16(LazyPtr)(r11) // lwzu r12, lo16(LazyPtr)(r11) - const MCExpr *LazyPtrLo16 = PPCMCExpr::CreateLo16(LazyPtrExpr, OutContext); + const MCExpr *LazyPtrLo16 = + PPCMCExpr::CreateLo(LazyPtrExpr, isDarwin, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU) .addReg(PPC::R12) .addExpr(LazyPtrLo16).addExpr(LazyPtrLo16) diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index 08247c2..4e30c537 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -233,6 +233,13 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { #endif case Intrinsic::longjmp: + + // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp + // because, although it does clobber the counter register, the + // control can't then return to inside the loop unless there is also + // an eh_sjlj_setjmp. + case Intrinsic::eh_sjlj_setjmp: + case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: @@ -402,7 +409,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { BasicBlock *CountedExitBlock = 0; const SCEV *ExitCount = 0; BranchInst *CountedExitBranch = 0; - for (SmallVector<BasicBlock*, 4>::iterator I = ExitingBlocks.begin(), + for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(), IE = ExitingBlocks.end(); I != IE; ++I) { const SCEV *EC = SE->getExitCount(L, *I); DEBUG(dbgs() << "Exit Count for " << *L << " from block " << @@ -415,6 +422,9 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { } else if (!SE->isLoopInvariant(EC, L)) continue; + if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32)) + continue; + // We now have a loop-invariant count of loop iterations (which is not the // constant zero) for which we know that this loop will not exit via this // exisiting block. diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index c8a29a3..a584188 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -105,40 +105,45 @@ def CC_PPC32_SVR4_ByVal : CallingConv<[ CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> ]>; +def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, + V28, V29, V30, V31)>; + def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, - F27, F28, F29, F30, F31, CR2, CR3, CR4, - V20, V21, V22, V23, V24, V25, V26, V27, - V28, V29, V30, V31)>; + F27, F28, F29, F30, F31, CR2, CR3, CR4 + )>; + +def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>; -def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, VRSAVE, +def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, - F27, F28, F29, F30, F31, CR2, CR3, CR4, - V20, V21, V22, V23, V24, V25, V26, V27, - V28, V29, V30, V31)>; + F27, F28, F29, F30, F31, CR2, CR3, CR4 + )>; + +def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, - F27, F28, F29, F30, F31, CR2, CR3, CR4, - V20, V21, V22, V23, V24, V25, V26, V27, - V28, V29, V30, V31)>; + F27, F28, F29, F30, F31, CR2, CR3, CR4 + )>; -def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, VRSAVE, +def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>; + +def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, - F27, F28, F29, F30, F31, CR2, CR3, CR4, - V20, V21, V22, V23, V24, V25, V26, V27, - V28, V29, V30, V31)>; + F27, F28, F29, F30, F31, CR2, CR3, CR4 + )>; + -def CSR_NoRegs : CalleeSavedRegs<(add VRSAVE)>; -def CSR_NoRegs_Darwin : CalleeSavedRegs<(add)>; +def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>; -def CSR_NoRegs_Altivec : CalleeSavedRegs<(add (sequence "V%u", 0, 31), VRSAVE)>; +def CSR_NoRegs : CalleeSavedRegs<(add)>; diff --git a/lib/Target/PowerPC/PPCCodeEmitter.cpp b/lib/Target/PowerPC/PPCCodeEmitter.cpp index 40e4968..418736e 100644 --- a/lib/Target/PowerPC/PPCCodeEmitter.cpp +++ b/lib/Target/PowerPC/PPCCodeEmitter.cpp @@ -63,11 +63,15 @@ namespace { unsigned get_crbitm_encoding(const MachineInstr &MI, unsigned OpNo) const; unsigned getDirectBrEncoding(const MachineInstr &MI, unsigned OpNo) const; unsigned getCondBrEncoding(const MachineInstr &MI, unsigned OpNo) const; + unsigned getAbsDirectBrEncoding(const MachineInstr &MI, + unsigned OpNo) const; + unsigned getAbsCondBrEncoding(const MachineInstr &MI, unsigned OpNo) const; - unsigned getS16ImmEncoding(const MachineInstr &MI, unsigned OpNo) const; + unsigned getImm16Encoding(const MachineInstr &MI, unsigned OpNo) const; unsigned getMemRIEncoding(const MachineInstr &MI, unsigned OpNo) const; unsigned getMemRIXEncoding(const MachineInstr &MI, unsigned OpNo) const; unsigned getTLSRegEncoding(const MachineInstr &MI, unsigned OpNo) const; + unsigned getTLSCallEncoding(const MachineInstr &MI, unsigned OpNo) const; const char *getPassName() const { return "PowerPC Machine Code Emitter"; } @@ -138,8 +142,8 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) { unsigned PPCCodeEmitter::get_crbitm_encoding(const MachineInstr &MI, unsigned OpNo) const { const MachineOperand &MO = MI.getOperand(OpNo); - assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MTCRF8 || - MI.getOpcode() == PPC::MFOCRF) && + assert((MI.getOpcode() == PPC::MTOCRF || MI.getOpcode() == PPC::MTOCRF8 || + MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MFOCRF8) && (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)); return 0x80 >> TM.getRegisterInfo()->getEncodingValue(MO.getReg()); } @@ -193,16 +197,29 @@ unsigned PPCCodeEmitter::getCondBrEncoding(const MachineInstr &MI, return 0; } -unsigned PPCCodeEmitter::getS16ImmEncoding(const MachineInstr &MI, - unsigned OpNo) const { +unsigned PPCCodeEmitter::getAbsDirectBrEncoding(const MachineInstr &MI, + unsigned OpNo) const { + const MachineOperand &MO = MI.getOperand(OpNo); + if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO); + + llvm_unreachable("Absolute branch relocations unsupported on the old JIT."); +} + +unsigned PPCCodeEmitter::getAbsCondBrEncoding(const MachineInstr &MI, + unsigned OpNo) const { + llvm_unreachable("Absolute branch relocations unsupported on the old JIT."); +} + +unsigned PPCCodeEmitter::getImm16Encoding(const MachineInstr &MI, + unsigned OpNo) const { const MachineOperand &MO = MI.getOperand(OpNo); if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO); unsigned RelocID; switch (MO.getTargetFlags() & PPCII::MO_ACCESS_MASK) { default: llvm_unreachable("Unsupported target operand flags!"); - case PPCII::MO_HA16: RelocID = PPC::reloc_absolute_high; break; - case PPCII::MO_LO16: RelocID = PPC::reloc_absolute_low; break; + case PPCII::MO_LO: RelocID = PPC::reloc_absolute_low; break; + case PPCII::MO_HA: RelocID = PPC::reloc_absolute_high; break; } MCE.addRelocation(GetRelocation(MO, RelocID)); @@ -247,15 +264,20 @@ unsigned PPCCodeEmitter::getTLSRegEncoding(const MachineInstr &MI, return 0; } +unsigned PPCCodeEmitter::getTLSCallEncoding(const MachineInstr &MI, + unsigned OpNo) const { + llvm_unreachable("TLS not supported on the old JIT."); + return 0; +} unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI, const MachineOperand &MO) const { if (MO.isReg()) { - // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand. + // MTOCRF/MFOCRF should go through get_crbitm_encoding for the CR operand. // The GPR operand should come through here though. - assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MTCRF8 && - MI.getOpcode() != PPC::MFOCRF) || + assert((MI.getOpcode() != PPC::MTOCRF && MI.getOpcode() != PPC::MTOCRF8 && + MI.getOpcode() != PPC::MFOCRF && MI.getOpcode() != PPC::MFOCRF8) || MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7); return TM.getRegisterInfo()->getEncodingValue(MO.getReg()); } diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp new file mode 100644 index 0000000..8cbf1fb --- /dev/null +++ b/lib/Target/PowerPC/PPCFastISel.cpp @@ -0,0 +1,328 @@ +//===-- PPCFastISel.cpp - PowerPC FastISel implementation -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PowerPC-specific support for the FastISel class. Some +// of the target-specific code is generated by tablegen in the file +// PPCGenFastISel.inc, which is #included here. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "ppcfastisel" +#include "PPC.h" +#include "PPCISelLowering.h" +#include "PPCSubtarget.h" +#include "PPCTargetMachine.h" +#include "MCTargetDesc/PPCPredicates.h" +#include "llvm/ADT/Optional.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/FastISel.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Target/TargetLowering.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +namespace { + +typedef struct Address { + enum { + RegBase, + FrameIndexBase + } BaseType; + + union { + unsigned Reg; + int FI; + } Base; + + int Offset; + + // Innocuous defaults for our address. + Address() + : BaseType(RegBase), Offset(0) { + Base.Reg = 0; + } +} Address; + +class PPCFastISel : public FastISel { + + const TargetMachine &TM; + const TargetInstrInfo &TII; + const TargetLowering &TLI; + const PPCSubtarget &PPCSubTarget; + LLVMContext *Context; + + public: + explicit PPCFastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo) + : FastISel(FuncInfo, LibInfo), + TM(FuncInfo.MF->getTarget()), + TII(*TM.getInstrInfo()), + TLI(*TM.getTargetLowering()), + PPCSubTarget( + *((static_cast<const PPCTargetMachine *>(&TM))->getSubtargetImpl()) + ), + Context(&FuncInfo.Fn->getContext()) { } + + // Backend specific FastISel code. + private: + virtual bool TargetSelectInstruction(const Instruction *I); + virtual unsigned TargetMaterializeConstant(const Constant *C); + virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); + virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI); + virtual bool FastLowerArguments(); + + // Utility routines. + private: + unsigned PPCMaterializeFP(const ConstantFP *CFP, MVT VT); + unsigned PPCMaterializeInt(const Constant *C, MVT VT); + unsigned PPCMaterialize32BitInt(int64_t Imm, + const TargetRegisterClass *RC); + unsigned PPCMaterialize64BitInt(int64_t Imm, + const TargetRegisterClass *RC); + + private: + #include "PPCGenFastISel.inc" + +}; + +} // end anonymous namespace + +// Attempt to fast-select an instruction that wasn't handled by +// the table-generated machinery. TBD. +bool PPCFastISel::TargetSelectInstruction(const Instruction *I) { + return I && false; +} + +// Materialize a floating-point constant into a register, and return +// the register number (or zero if we failed to handle it). +unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) { + // No plans to handle long double here. + if (VT != MVT::f32 && VT != MVT::f64) + return 0; + + // All FP constants are loaded from the constant pool. + unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); + assert(Align > 0 && "Unexpectedly missing alignment information!"); + unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); + unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); + CodeModel::Model CModel = TM.getCodeModel(); + + MachineMemOperand *MMO = + FuncInfo.MF->getMachineMemOperand( + MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad, + (VT == MVT::f32) ? 4 : 8, Align); + + // For small code model, generate a LDtocCPT. + if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT), + DestReg) + .addConstantPoolIndex(Idx).addReg(PPC::X2).addMemOperand(MMO); + else { + // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)). + unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD; + unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA), + TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) + .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO) + .addReg(TmpReg) + .addMemOperand(MMO); + } + + return DestReg; +} + +// Materialize a 32-bit integer constant into a register, and return +// the register number (or zero if we failed to handle it). +unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm, + const TargetRegisterClass *RC) { + unsigned Lo = Imm & 0xFFFF; + unsigned Hi = (Imm >> 16) & 0xFFFF; + + unsigned ResultReg = createResultReg(RC); + bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass); + + if (isInt<16>(Imm)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg) + .addImm(Imm); + else if (Lo) { + // Both Lo and Hi have nonzero bits. + unsigned TmpReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg) + .addImm(Hi); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg) + .addReg(TmpReg).addImm(Lo); + } else + // Just Hi bits. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg) + .addImm(Hi); + + return ResultReg; +} + +// Materialize a 64-bit integer constant into a register, and return +// the register number (or zero if we failed to handle it). +unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm, + const TargetRegisterClass *RC) { + unsigned Remainder = 0; + unsigned Shift = 0; + + // If the value doesn't fit in 32 bits, see if we can shift it + // so that it fits in 32 bits. + if (!isInt<32>(Imm)) { + Shift = countTrailingZeros<uint64_t>(Imm); + int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift; + + if (isInt<32>(ImmSh)) + Imm = ImmSh; + else { + Remainder = Imm; + Shift = 32; + Imm >>= 32; + } + } + + // Handle the high-order 32 bits (if shifted) or the whole 32 bits + // (if not shifted). + unsigned TmpReg1 = PPCMaterialize32BitInt(Imm, RC); + if (!Shift) + return TmpReg1; + + // If upper 32 bits were not zero, we've built them and need to shift + // them into place. + unsigned TmpReg2; + if (Imm) { + TmpReg2 = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR), + TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift); + } else + TmpReg2 = TmpReg1; + + unsigned TmpReg3, Hi, Lo; + if ((Hi = (Remainder >> 16) & 0xFFFF)) { + TmpReg3 = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8), + TmpReg3).addReg(TmpReg2).addImm(Hi); + } else + TmpReg3 = TmpReg2; + + if ((Lo = Remainder & 0xFFFF)) { + unsigned ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8), + ResultReg).addReg(TmpReg3).addImm(Lo); + return ResultReg; + } + + return TmpReg3; +} + + +// Materialize an integer constant into a register, and return +// the register number (or zero if we failed to handle it). +unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) { + + if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 && + VT != MVT::i8 && VT != MVT::i1) + return 0; + + const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass : + &PPC::GPRCRegClass); + + // If the constant is in range, use a load-immediate. + const ConstantInt *CI = cast<ConstantInt>(C); + if (isInt<16>(CI->getSExtValue())) { + unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI; + unsigned ImmReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg) + .addImm(CI->getSExtValue()); + return ImmReg; + } + + // Construct the constant piecewise. + int64_t Imm = CI->getZExtValue(); + + if (VT == MVT::i64) + return PPCMaterialize64BitInt(Imm, RC); + else if (VT == MVT::i32) + return PPCMaterialize32BitInt(Imm, RC); + + return 0; +} + +// Materialize a constant into a register, and return the register +// number (or zero if we failed to handle it). +unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) { + EVT CEVT = TLI.getValueType(C->getType(), true); + + // Only handle simple types. + if (!CEVT.isSimple()) return 0; + MVT VT = CEVT.getSimpleVT(); + + if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) + return PPCMaterializeFP(CFP, VT); + else if (isa<ConstantInt>(C)) + return PPCMaterializeInt(C, VT); + // TBD: Global values. + + return 0; +} + +// Materialize the address created by an alloca into a register, and +// return the register number (or zero if we failed to handle it). TBD. +unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { + return AI && 0; +} + +// Fold loads into extends when possible. TBD. +bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) { + return MI && OpNo && LI && false; +} + +// Attempt to lower call arguments in a faster way than done by +// the selection DAG code. +bool PPCFastISel::FastLowerArguments() { + // Defer to normal argument lowering for now. It's reasonably + // efficient. Consider doing something like ARM to handle the + // case where all args fit in registers, no varargs, no float + // or vector args. + return false; +} + +namespace llvm { + // Create the fast instruction selector for PowerPC64 ELF. + FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo) { + const TargetMachine &TM = FuncInfo.MF->getTarget(); + + // Only available on 64-bit ELF for now. + const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); + if (Subtarget->isPPC64() && Subtarget->isSVR4ABI()) + return new PPCFastISel(FuncInfo, LibInfo); + + return 0; + } +} diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index dabe613..24d3a0b 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -26,17 +26,6 @@ using namespace llvm; -// FIXME This disables some code that aligns the stack to a boundary bigger than -// the default (16 bytes on Darwin) when there is a stack local of greater -// alignment. This does not currently work, because the delta between old and -// new stack pointers is added to offsets that reference incoming parameters -// after the prolog is generated, and the code that does that doesn't handle a -// variable delta. You don't want to do that anyway; a better approach is to -// reserve another register that retains to the incoming stack pointer, and -// reference parameters relative to that. -#define ALIGN_STACK 0 - - /// VRRegNo - Map from a numbered VR register to its enum value. /// static const uint16_t VRRegNo[] = { @@ -217,9 +206,12 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF, // Get the alignments provided by the target, and the maximum alignment // (if any) of the fixed frame objects. - unsigned MaxAlign = MFI->getMaxAlignment(); unsigned TargetAlign = getStackAlignment(); - unsigned AlignMask = TargetAlign - 1; // + unsigned MaxAlign = MFI->getMaxAlignment(); + unsigned AlignMask = std::max(MaxAlign, TargetAlign) - 1; + + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); // If we are a leaf function, and use up to 224 bytes of stack space, // don't have a frame pointer, calls, or dynamic alloca then we do not need @@ -235,7 +227,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF, FrameSize <= 224 && // Fits in red zone. !MFI->hasVarSizedObjects() && // No dynamic alloca. !MFI->adjustsStack() && // No calls. - (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment. + !RegInfo->hasBasePointer(MF)) { // No special alignment. // No need for frame if (UpdateMF) MFI->setStackSize(0); @@ -305,6 +297,12 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const { unsigned FPReg = is31 ? PPC::R31 : PPC::R1; unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1; + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); + bool HasBP = RegInfo->hasBasePointer(MF); + unsigned BPReg = HasBP ? (unsigned) PPC::R30 : FPReg; + unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FPReg; + for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) for (MachineBasicBlock::iterator MBBI = BI->end(); MBBI != BI->begin(); ) { @@ -321,6 +319,13 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const { case PPC::FP8: MO.setReg(FP8Reg); break; + case PPC::BP: + MO.setReg(BPReg); + break; + case PPC::BP8: + MO.setReg(BP8Reg); + break; + } } } @@ -332,9 +337,11 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { MachineFrameInfo *MFI = MF.getFrameInfo(); const PPCInstrInfo &TII = *static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo()); + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); DebugLoc dl; bool needsFrameMoves = MMI.hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry(); @@ -358,6 +365,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // Work out frame sizes. unsigned FrameSize = determineFrameLayout(MF); int NegFrameSize = -FrameSize; + if (!isInt<32>(NegFrameSize)) + llvm_unreachable("Unhandled stack size!"); if (MFI->isFrameAddressTaken()) replaceFPWithRealFP(MF); @@ -369,9 +378,10 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // Check if the link register (LR) must be saved. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); bool MustSaveLR = FI->mustSaveLR(); - const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs(); + const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs(); // Do we have a frame pointer for this function? bool HasFP = hasFP(MF); + bool HasBP = RegInfo->hasBasePointer(MF); int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); @@ -387,6 +397,19 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { } } + int BPOffset = 0; + if (HasBP) { + if (Subtarget.isSVR4ABI()) { + MachineFrameInfo *FFI = MF.getFrameInfo(); + int BPIndex = FI->getBasePointerSaveIndex(); + assert(BPIndex && "No Base Pointer Save Slot!"); + BPOffset = FFI->getObjectOffset(BPIndex); + } else { + BPOffset = + PPCFrameLowering::getBasePointerSaveOffset(isPPC64, isDarwinABI); + } + } + if (isPPC64) { if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR8), PPC::X0); @@ -404,6 +427,12 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { .addImm(FPOffset) .addReg(PPC::X1); + if (HasBP) + BuildMI(MBB, MBBI, dl, TII.get(PPC::STD)) + .addReg(PPC::X30) + .addImm(BPOffset) + .addReg(PPC::X1); + if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::STD)) .addReg(PPC::X0) @@ -427,6 +456,14 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { .addImm(FPOffset) .addReg(PPC::R1); + if (HasBP) + // FIXME: On PPC32 SVR4, FPOffset is negative and access to negative + // offsets of R1 is not allowed. + BuildMI(MBB, MBBI, dl, TII.get(PPC::STW)) + .addReg(PPC::R30) + .addImm(BPOffset) + .addReg(PPC::R1); + assert(MustSaveCRs.empty() && "Prologue CR saving supported only in 64-bit mode"); @@ -441,26 +478,43 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { if (!FrameSize) return; // Get stack alignments. - unsigned TargetAlign = getStackAlignment(); unsigned MaxAlign = MFI->getMaxAlignment(); // Adjust stack pointer: r1 += NegFrameSize. // If there is a preferred stack alignment, align R1 now if (!isPPC64) { // PPC32. - if (ALIGN_STACK && MaxAlign > TargetAlign) { + + if (HasBP) { + // Save a copy of r1 as the base pointer. + BuildMI(MBB, MBBI, dl, TII.get(PPC::OR), PPC::R30) + .addReg(PPC::R1) + .addReg(PPC::R1); + } + + if (HasBP && MaxAlign > 1) { assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) && "Invalid alignment!"); - assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!"); BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0) .addReg(PPC::R1) .addImm(0) .addImm(32 - Log2_32(MaxAlign)) .addImm(31); - BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC) ,PPC::R0) - .addReg(PPC::R0, RegState::Kill) - .addImm(NegFrameSize); + if (isInt<16>(NegFrameSize)) { + BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC), PPC::R0) + .addReg(PPC::R0, RegState::Kill) + .addImm(NegFrameSize); + } else { + BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R12) + .addImm(NegFrameSize >> 16); + BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R12) + .addReg(PPC::R12, RegState::Kill) + .addImm(NegFrameSize & 0xFFFF); + BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFC), PPC::R0) + .addReg(PPC::R0, RegState::Kill) + .addReg(PPC::R12, RegState::Kill); + } BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX), PPC::R1) .addReg(PPC::R1, RegState::Kill) .addReg(PPC::R1) @@ -482,18 +536,35 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { .addReg(PPC::R0); } } else { // PPC64. - if (ALIGN_STACK && MaxAlign > TargetAlign) { + if (HasBP) { + // Save a copy of r1 as the base pointer. + BuildMI(MBB, MBBI, dl, TII.get(PPC::OR8), PPC::X30) + .addReg(PPC::X1) + .addReg(PPC::X1); + } + + if (HasBP && MaxAlign > 1) { assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) && "Invalid alignment!"); - assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!"); BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0) .addReg(PPC::X1) .addImm(0) .addImm(64 - Log2_32(MaxAlign)); - BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0) - .addReg(PPC::X0) - .addImm(NegFrameSize); + if (isInt<16>(NegFrameSize)) { + BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0) + .addReg(PPC::X0, RegState::Kill) + .addImm(NegFrameSize); + } else { + BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X12) + .addImm(NegFrameSize >> 16); + BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X12) + .addReg(PPC::X12, RegState::Kill) + .addImm(NegFrameSize & 0xFFFF); + BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFC8), PPC::X0) + .addReg(PPC::X0, RegState::Kill) + .addReg(PPC::X12, RegState::Kill); + } BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX), PPC::X1) .addReg(PPC::X1, RegState::Kill) .addReg(PPC::X1) @@ -530,14 +601,21 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { if (HasFP) { unsigned Reg = isPPC64 ? PPC::X31 : PPC::R31; - Reg = MRI.getDwarfRegNum(Reg, true); + Reg = MRI->getDwarfRegNum(Reg, true); MMI.addFrameInst( MCCFIInstruction::createOffset(FrameLabel, Reg, FPOffset)); } + if (HasBP) { + unsigned Reg = isPPC64 ? PPC::X30 : PPC::R30; + Reg = MRI->getDwarfRegNum(Reg, true); + MMI.addFrameInst( + MCCFIInstruction::createOffset(FrameLabel, Reg, BPOffset)); + } + if (MustSaveLR) { unsigned Reg = isPPC64 ? PPC::LR8 : PPC::LR; - Reg = MRI.getDwarfRegNum(Reg, true); + Reg = MRI->getDwarfRegNum(Reg, true); MMI.addFrameInst( MCCFIInstruction::createOffset(FrameLabel, Reg, LROffset)); } @@ -565,7 +643,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { unsigned Reg = HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) : (isPPC64 ? PPC::X1 : PPC::R1); - Reg = MRI.getDwarfRegNum(Reg, true); + Reg = MRI->getDwarfRegNum(Reg, true); MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(ReadyLabel, Reg)); } } @@ -597,13 +675,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { && Subtarget.isPPC64() && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) { MMI.addFrameInst(MCCFIInstruction::createOffset( - Label, MRI.getDwarfRegNum(PPC::CR2, true), 8)); + Label, MRI->getDwarfRegNum(PPC::CR2, true), 8)); continue; } int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); MMI.addFrameInst(MCCFIInstruction::createOffset( - Label, MRI.getDwarfRegNum(Reg, true), Offset)); + Label, MRI->getDwarfRegNum(Reg, true), Offset)); } } } @@ -614,6 +692,8 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, assert(MBBI != MBB.end() && "Returning block has no terminator"); const PPCInstrInfo &TII = *static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo()); + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); unsigned RetOpcode = MBBI->getOpcode(); DebugLoc dl; @@ -629,8 +709,6 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, // Get alignment info so we know how to restore r1 const MachineFrameInfo *MFI = MF.getFrameInfo(); - unsigned TargetAlign = getStackAlignment(); - unsigned MaxAlign = MFI->getMaxAlignment(); // Get the number of bytes allocated from the FrameInfo. int FrameSize = MFI->getStackSize(); @@ -642,9 +720,10 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, // Check if the link register (LR) has been saved. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); bool MustSaveLR = FI->mustSaveLR(); - const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs(); + const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs(); // Do we have a frame pointer for this function? bool HasFP = hasFP(MF); + bool HasBP = RegInfo->hasBasePointer(MF); int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); @@ -660,6 +739,19 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, } } + int BPOffset = 0; + if (HasBP) { + if (Subtarget.isSVR4ABI()) { + MachineFrameInfo *FFI = MF.getFrameInfo(); + int BPIndex = FI->getBasePointerSaveIndex(); + assert(BPIndex && "No Base Pointer Save Slot!"); + BPOffset = FFI->getObjectOffset(BPIndex); + } else { + BPOffset = + PPCFrameLowering::getBasePointerSaveOffset(isPPC64, isDarwinABI); + } + } + bool UsesTCRet = RetOpcode == PPC::TCRETURNri || RetOpcode == PPC::TCRETURNdi || RetOpcode == PPC::TCRETURNai || @@ -704,7 +796,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, .addReg(PPC::R31) .addReg(PPC::R0); } else if (isInt<16>(FrameSize) && - (!ALIGN_STACK || TargetAlign >= MaxAlign) && + !HasBP && !MFI->hasVarSizedObjects()) { BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1) .addReg(PPC::R1).addImm(FrameSize); @@ -727,7 +819,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, .addReg(PPC::X1) .addReg(PPC::X31) .addReg(PPC::X0); - } else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign && + } else if (isInt<16>(FrameSize) && !HasBP && !MFI->hasVarSizedObjects()) { BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1) .addReg(PPC::X1).addImm(FrameSize); @@ -751,9 +843,13 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31) .addImm(FPOffset).addReg(PPC::X1); + if (HasBP) + BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X30) + .addImm(BPOffset).addReg(PPC::X1); + if (!MustSaveCRs.empty()) for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i) - BuildMI(MBB, MBBI, dl, TII.get(PPC::MTCRF8), MustSaveCRs[i]) + BuildMI(MBB, MBBI, dl, TII.get(PPC::MTOCRF8), MustSaveCRs[i]) .addReg(PPC::X12, getKillRegState(i == e-1)); if (MustSaveLR) @@ -770,6 +866,10 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R31) .addImm(FPOffset).addReg(PPC::R1); + if (HasBP) + BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R30) + .addImm(FPOffset).addReg(PPC::R1); + if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR)).addReg(PPC::R0); } @@ -848,7 +948,8 @@ static bool MustSaveLR(const MachineFunction &MF, unsigned LR) { void PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *) const { - const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo(); + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); // Save and clear the LR state. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); @@ -873,6 +974,15 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, FI->setFramePointerSaveIndex(FPSI); } + int BPSI = FI->getBasePointerSaveIndex(); + if (!BPSI && RegInfo->hasBasePointer(MF)) { + int BPOffset = getBasePointerSaveOffset(isPPC64, isDarwinABI); + // Allocate the frame index for the base pointer save area. + BPSI = MFI->CreateFixedObject(isPPC64? 8 : 4, BPOffset, true); + // Save the result. + FI->setBasePointerSaveIndex(BPSI); + } + // Reserve stack space to move the linkage area to in case of a tail call. int TCSPDelta = 0; if (MF.getTarget().Options.GuaranteedTailCallOpt && @@ -1004,6 +1114,17 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); } + const PPCRegisterInfo *RegInfo = + static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo()); + if (RegInfo->hasBasePointer(MF)) { + HasGPSaveArea = true; + + int FI = PFI->getBasePointerSaveIndex(); + assert(FI && "No Base Pointer Save Slot!"); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + // General register save area starts right below the Floating-point // register save area. if (HasGPSaveArea || HasG8SaveArea) { @@ -1116,8 +1237,12 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF, RC->getAlignment(), false)); + // Might we have over-aligned allocas? + bool HasAlVars = MFI->hasVarSizedObjects() && + MFI->getMaxAlignment() > getStackAlignment(); + // These kinds of spills might need two registers. - if (spillsCR(MF) || spillsVRSAVE(MF)) + if (spillsCR(MF) || spillsVRSAVE(MF) || HasAlVars) RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); @@ -1145,6 +1270,12 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); + // Only Darwin actually uses the VRSAVE register, but it can still appear + // here if, for example, @llvm.eh.unwind.init() is used. If we're not on + // Darwin, ignore it. + if (Reg == PPC::VRSAVE && !Subtarget.isDarwinABI()) + continue; + // CR2 through CR4 are the nonvolatile CR fields. bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4; @@ -1206,7 +1337,7 @@ restoreCRs(bool isPPC64, bool is31, MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), PPC::R12), CSI[CSIIndex].getFrameIdx())); - RestoreOp = PPC::MTCRF; + RestoreOp = PPC::MTOCRF; MoveReg = PPC::R12; } @@ -1294,6 +1425,12 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); + // Only Darwin actually uses the VRSAVE register, but it can still appear + // here if, for example, @llvm.eh.unwind.init() is used. If we're not on + // Darwin, ignore it. + if (Reg == PPC::VRSAVE && !Subtarget.isDarwinABI()) + continue; + if (Reg == PPC::CR2) { CR2Spilled = true; // The spill slot is associated only with CR2, which is the diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h index 6f5f936..7aab37e 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.h +++ b/lib/Target/PowerPC/PPCFrameLowering.h @@ -94,6 +94,16 @@ public: return isPPC64 ? -8U : -4U; } + /// getBasePointerSaveOffset - Return the previous frame offset to save the + /// base pointer. + static unsigned getBasePointerSaveOffset(bool isPPC64, bool isDarwinABI) { + if (isDarwinABI) + return isPPC64 ? -16U : -8U; + + // SVR4 ABI: First slot in the general register save area. + return isPPC64 ? -16U : -8U; + } + /// getLinkageSize - Return the size of the PowerPC ABI linkage area. /// static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI) { diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index e006945..475bde1 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -330,6 +330,9 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { } bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) { + if (!Val) + return false; + if (isShiftedMask_32(Val)) { // look for the first non-zero bit MB = countLeadingZeros(Val); @@ -435,7 +438,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { } unsigned MB, ME; - if (InsertMask && isRunOfOnes(InsertMask, MB, ME)) { + if (isRunOfOnes(InsertMask, MB, ME)) { SDValue Tmp1, Tmp2; if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) && @@ -447,10 +450,10 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { unsigned SHOpc = Op1.getOperand(0).getOpcode(); if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) { + // Note that Value must be in range here (less than 32) because + // otherwise there would not be any bits set in InsertMask. Op1 = Op1.getOperand(0).getOperand(0); SH = (SHOpc == ISD::SHL) ? Value : 32 - Value; - } else { - Op1 = Op1.getOperand(0); } } @@ -594,12 +597,8 @@ static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) { /// getCRIdxForSetCC - Return the index of the condition register field /// associated with the SetCC condition, and whether or not the field is /// treated as inverted. That is, lt = 0; ge = 0 inverted. -/// -/// If this returns with Other != -1, then the returned comparison is an or of -/// two simpler comparisons. In this case, Invert is guaranteed to be false. -static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) { +static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) { Invert = false; - Other = -1; switch (CC) { default: llvm_unreachable("Unknown condition!"); case ISD::SETOLT: @@ -847,8 +846,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { } bool Inv; - int OtherCondIdx; - unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx); + unsigned Idx = getCRIdxForSetCC(CC, Inv); SDValue CCReg = SelectCC(LHS, RHS, CC, dl); SDValue IntCR; @@ -859,35 +857,18 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg, InFlag).getValue(1); - if (PPCSubTarget.hasMFOCRF() && OtherCondIdx == -1) - IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg, - CCReg), 0); - else - IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32, - CR7Reg, CCReg), 0); + IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg, + CCReg), 0); SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31), getI32Imm(31), getI32Imm(31) }; - if (OtherCondIdx == -1 && !Inv) + if (!Inv) return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); // Get the specified bit. SDValue Tmp = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); - if (Inv) { - assert(OtherCondIdx == -1 && "Can't have split plus negation"); - return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1)); - } - - // Otherwise, we have to turn an operation like SETONE -> SETOLT | SETOGT. - // We already got the bit for the first part of the comparison (e.g. SETULE). - - // Get the other bit of the comparison. - Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31); - SDValue OtherCond = - SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); - - return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond); + return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1)); } @@ -992,15 +973,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { getSmallIPtrImm(0)); } - case PPCISD::MFCR: { + case PPCISD::MFOCRF: { SDValue InFlag = N->getOperand(1); - // Use MFOCRF if supported. - if (PPCSubTarget.hasMFOCRF()) - return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, - N->getOperand(0), InFlag); - else - return CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32, - N->getOperand(0), InFlag); + return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, + N->getOperand(0), InFlag); } case ISD::SDIV: { @@ -1502,13 +1478,13 @@ void PPCDAGToDAGISel::PostprocessISelDAG() { continue; break; case PPC::ADDIdtprelL: - Flags = PPCII::MO_DTPREL16_LO; + Flags = PPCII::MO_DTPREL_LO; break; case PPC::ADDItlsldL: - Flags = PPCII::MO_TLSLD16_LO; + Flags = PPCII::MO_TLSLD_LO; break; case PPC::ADDItocL: - Flags = PPCII::MO_TOC16_LO; + Flags = PPCII::MO_TOC_LO; break; } @@ -1530,6 +1506,14 @@ void PPCDAGToDAGISel::PostprocessISelDAG() { if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { SDLoc dl(GA); const GlobalValue *GV = GA->getGlobal(); + // We can't perform this optimization for data whose alignment + // is insufficient for the instruction encoding. + if (GV->getAlignment() < 4 && + (StorageOpcode == PPC::LD || StorageOpcode == PPC::STD || + StorageOpcode == PPC::LWA)) { + DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n"); + continue; + } ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, 0, Flags); } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(ImmOpnd)) { diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index e2433e7..664dd12 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -228,11 +228,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) // We cannot sextinreg(i1). Expand to shifts. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); - // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support // SjLj exception handling but a light-weight setjmp/longjmp replacement to // support continuation, user-level threading, and etc.. As a result, no @@ -285,8 +280,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) } else setOperationAction(ISD::VAARG, MVT::Other, Expand); + if (Subtarget->isSVR4ABI() && !isPPC64) + // VACOPY is custom lowered with the 32-bit SVR4 ABI. + setOperationAction(ISD::VACOPY , MVT::Other, Custom); + else + setOperationAction(ISD::VACOPY , MVT::Other, Expand); + // Use the default implementation. - setOperationAction(ISD::VACOPY , MVT::Other, Expand); setOperationAction(ISD::VAEND , MVT::Other, Expand); setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); @@ -397,6 +397,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::UDIV, VT, Expand); setOperationAction(ISD::UREM, VT, Expand); setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::FREM, VT, Expand); setOperationAction(ISD::FNEG, VT, Expand); setOperationAction(ISD::FSQRT, VT, Expand); setOperationAction(ISD::FLOG, VT, Expand); @@ -491,6 +492,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); + + setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); } if (Subtarget->has64BitSupport()) { @@ -626,7 +630,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; - case PPCISD::MFCR: return "PPCISD::MFCR"; + case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; case PPCISD::VCMP: return "PPCISD::VCMP"; case PPCISD::VCMPo: return "PPCISD::VCMPo"; case PPCISD::LBRX: return "PPCISD::LBRX"; @@ -1031,6 +1035,46 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, return false; } +// If we happen to be doing an i64 load or store into a stack slot that has +// less than a 4-byte alignment, then the frame-index elimination may need to +// use an indexed load or store instruction (because the offset may not be a +// multiple of 4). The extra register needed to hold the offset comes from the +// register scavenger, and it is possible that the scavenger will need to use +// an emergency spill slot. As a result, we need to make sure that a spill slot +// is allocated when doing an i64 load/store into a less-than-4-byte-aligned +// stack slot. +static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { + // FIXME: This does not handle the LWA case. + if (VT != MVT::i64) + return; + + // NOTE: We'll exclude negative FIs here, which come from argument + // lowering, because there are no known test cases triggering this problem + // using packed structures (or similar). We can remove this exclusion if + // we find such a test case. The reason why this is so test-case driven is + // because this entire 'fixup' is only to prevent crashes (from the + // register scavenger) on not-really-valid inputs. For example, if we have: + // %a = alloca i1 + // %b = bitcast i1* %a to i64* + // store i64* a, i64 b + // then the store should really be marked as 'align 1', but is not. If it + // were marked as 'align 1' then the indexed form would have been + // instruction-selected initially, and the problem this 'fixup' is preventing + // won't happen regardless. + if (FrameIdx < 0) + return; + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + + unsigned Align = MFI->getObjectAlignment(FrameIdx); + if (Align >= 4) + return; + + PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); + FuncInfo->setHasNonRISpills(); +} + /// Returns true if the address N can be represented by a base register plus /// a signed 16-bit displacement [r+imm], and if it is not better /// represented as reg+reg. If Aligned is true, only accept displacements @@ -1052,6 +1096,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, Disp = DAG.getTargetConstant(imm, N.getValueType()); if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); + fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); } else { Base = N.getOperand(0); } @@ -1116,9 +1161,10 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, } Disp = DAG.getTargetConstant(0, getPointerTy()); - if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) + if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); - else + fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); + } else Base = N; return true; // [r+0] } @@ -1236,8 +1282,8 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV = 0) { - HiOpFlags = PPCII::MO_HA16; - LoOpFlags = PPCII::MO_LO16; + HiOpFlags = PPCII::MO_HA; + LoOpFlags = PPCII::MO_LO; // Don't use the pic base if not in PIC relocation model. Or if we are on a // non-darwin platform. We don't support PIC on other platforms yet. @@ -1350,9 +1396,9 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, if (Model == TLSModel::LocalExec) { SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, - PPCII::MO_TPREL16_HA); + PPCII::MO_TPREL_HA); SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, - PPCII::MO_TPREL16_LO); + PPCII::MO_TPREL_LO); SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, is64bit ? MVT::i64 : MVT::i32); SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); @@ -1364,12 +1410,14 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, if (Model == TLSModel::InitialExec) { SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); + SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + PPCII::MO_TLS); SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, TPOffsetHi); - return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGA); + return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); } if (Model == TLSModel::GeneralDynamic) { @@ -1607,6 +1655,18 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, false, false, false, 0); } +SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, + const PPCSubtarget &Subtarget) const { + assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); + + // We have to copy the entire va_list struct: + // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte + return DAG.getMemcpy(Op.getOperand(0), Op, + Op.getOperand(1), Op.getOperand(2), + DAG.getConstant(12, MVT::i32), 8, false, true, + MachinePointerInfo(), MachinePointerInfo()); +} + SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { return Op.getOperand(0); @@ -2914,8 +2974,8 @@ struct TailCallArgumentInfo { static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, - const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, - SmallVector<SDValue, 8> &MemOpChains, + const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, + SmallVectorImpl<SDValue> &MemOpChains, SDLoc dl) { for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { SDValue Arg = TailCallArgs[i].Arg; @@ -2973,7 +3033,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, - SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { + SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { int Offset = ArgOffset + SPDiff; uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); @@ -3038,8 +3098,8 @@ static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, - bool isVector, SmallVector<SDValue, 8> &MemOpChains, - SmallVector<TailCallArgumentInfo, 8> &TailCallArguments, + bool isVector, SmallVectorImpl<SDValue> &MemOpChains, + SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, SDLoc dl) { EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); if (!isTailCall) { @@ -3063,7 +3123,7 @@ static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, bool isDarwinABI, - SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { + SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { MachineFunction &MF = DAG.getMachineFunction(); // Emit a sequence of copyto/copyfrom virtual registers for arguments that @@ -3090,8 +3150,8 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, - SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, - SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, + SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, + SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, const PPCSubtarget &PPCSubTarget) { bool isPPC64 = PPCSubTarget.isPPC64(); @@ -3417,10 +3477,10 @@ SDValue PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDLoc &dl = CLI.DL; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -5539,7 +5599,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Now that we have the comparison, emit a copy from the CR to a GPR. // This is flagged to the above dot comparison. - SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32, + SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, DAG.getRegister(PPC::CR6, MVT::i32), CompNode.getValue(1)); @@ -5672,6 +5732,9 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::VAARG: return LowerVAARG(Op, DAG, PPCSubTarget); + case ISD::VACOPY: + return LowerVACOPY(Op, DAG, PPCSubTarget); + case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); @@ -5767,6 +5830,9 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, return; } case ISD::FP_TO_SINT: + // LowerFP_TO_INT() can only handle f32 and f64. + if (N->getOperand(0).getValueType() == MVT::ppcf128) + return; Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); return; } @@ -6038,6 +6104,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, // thisMBB: const int64_t LabelOffset = 1 * PVT.getStoreSize(); const int64_t TOCOffset = 3 * PVT.getStoreSize(); + const int64_t BPOffset = 4 * PVT.getStoreSize(); // Prepare IP either in reg. const TargetRegisterClass *PtrRC = getRegClassFor(PVT); @@ -6049,10 +6116,25 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, .addReg(PPC::X2) .addImm(TOCOffset) .addReg(BufReg); - MIB.setMemRefs(MMOBegin, MMOEnd); } + // Naked functions never have a base pointer, and so we use r1. For all + // other functions, this decision must be delayed until during PEI. + unsigned BaseReg; + if (MF->getFunction()->getAttributes().hasAttribute( + AttributeSet::FunctionIndex, Attribute::Naked)) + BaseReg = PPCSubTarget.isPPC64() ? PPC::X1 : PPC::R1; + else + BaseReg = PPCSubTarget.isPPC64() ? PPC::BP8 : PPC::BP; + + MIB = BuildMI(*thisMBB, MI, DL, + TII->get(PPCSubTarget.isPPC64() ? PPC::STD : PPC::STW)) + .addReg(BaseReg) + .addImm(BPOffset) + .addReg(BufReg); + MIB.setMemRefs(MMOBegin, MMOEnd); + // Setup MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); const PPCRegisterInfo *TRI = @@ -6124,12 +6206,14 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, // Since FP is only updated here but NOT referenced, it's treated as GPR. unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; + unsigned BP = (PVT == MVT::i64) ? PPC::X30 : PPC::R30; MachineInstrBuilder MIB; const int64_t LabelOffset = 1 * PVT.getStoreSize(); const int64_t SPOffset = 2 * PVT.getStoreSize(); const int64_t TOCOffset = 3 * PVT.getStoreSize(); + const int64_t BPOffset = 4 * PVT.getStoreSize(); unsigned BufReg = MI->getOperand(0).getReg(); @@ -6171,8 +6255,17 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, } MIB.setMemRefs(MMOBegin, MMOEnd); - // FIXME: When we also support base pointers, that register must also be - // restored here. + // Reload BP + if (PVT == MVT::i64) { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) + .addImm(BPOffset) + .addReg(BufReg); + } else { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) + .addImm(BPOffset) + .addReg(BufReg); + } + MIB.setMemRefs(MMOBegin, MMOEnd); // Reload TOC if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { @@ -7293,16 +7386,16 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, } } - // If the user is a MFCR instruction, we know this is safe. Otherwise we - // give up for right now. - if (FlagUser->getOpcode() == PPCISD::MFCR) + // If the user is a MFOCRF instruction, we know this is safe. + // Otherwise we give up for right now. + if (FlagUser->getOpcode() == PPCISD::MFOCRF) return SDValue(VCMPoNode, 0); } break; } case ISD::BR_CC: { // If this is a branch on an altivec predicate comparison, lower this so - // that we don't have to do a MFCR: instead, branch directly on CR6. This + // that we don't have to do a MFOCRF: instead, branch directly on CR6. This // lowering is done pre-legalize, because the legalizer lowers the predicate // compare down to code that is difficult to reassemble. ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); @@ -7514,7 +7607,7 @@ PPCTargetLowering::getSingleConstraintMatchWeight( std::pair<unsigned, const TargetRegisterClass*> PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { // GCC RS6000 Constraint Letters switch (Constraint[0]) { @@ -7539,7 +7632,24 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } } - return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + std::pair<unsigned, const TargetRegisterClass*> R = + TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + + // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers + // (which we call X[0-9]+). If a 64-bit value has been requested, and a + // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent + // register. + // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use + // the AsmName field from *RegisterInfo.td, then this would not be necessary. + if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() && + PPC::GPRCRegClass.contains(R.first)) { + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + return std::make_pair(TRI->getMatchingSuperReg(R.first, + PPC::sub_32, &PPC::GPRCRegClass), + &PPC::G8RCRegClass); + } + + return R; } @@ -7768,18 +7878,15 @@ bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, return true; } -/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than -/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to -/// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd -/// is expanded to mul + add. -bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const { +bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + VT = VT.getScalarType(); + if (!VT.isSimple()) return false; switch (VT.getSimpleVT().SimpleTy) { case MVT::f32: case MVT::f64: - case MVT::v4f32: return true; default: break; @@ -7795,3 +7902,9 @@ Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { return Sched::ILP; } +// Create a fast isel object. +FastISel * +PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo) const { + return PPC::createFastISel(FuncInfo, LibInfo); +} diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index e85f96c..aa5e821 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -116,11 +116,10 @@ namespace llvm { /// Return with a flag operand, matched by 'blr' RET_FLAG, - /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF - /// instructions. This copies the bits corresponding to the specified - /// CRREG into the resultant GPR. Bits corresponding to other CR regs - /// are undefined. - MFCR, + /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction. + /// This copies the bits corresponding to the specified CRREG into the + /// resultant GPR. Bits corresponding to other CR regs are undefined. + MFOCRF, // EH_SJLJ_SETJMP - SjLj exception handling setjmp. EH_SJLJ_SETJMP, @@ -420,7 +419,7 @@ namespace llvm { std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual @@ -460,11 +459,16 @@ namespace llvm { /// relative to software emulation. virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const; - /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than - /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to - /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd - /// is expanded to mul + add. - virtual bool isFMAFasterThanMulAndAdd(EVT VT) const; + /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster + /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be + /// expanded to FMAs when this method returns true, otherwise fmuladd is + /// expanded to fmul + fadd. + virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const; + + /// createFastISel - This method returns a target-specific FastISel object, + /// or null if the target does not support "fast" instruction selection. + virtual FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo) const; private: SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; @@ -499,6 +503,8 @@ namespace llvm { const PPCSubtarget &Subtarget) const; SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const; + SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG, + const PPCSubtarget &Subtarget) const; SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, @@ -623,6 +629,11 @@ namespace llvm { SDValue DAGCombineFastRecipFSQRT(SDValue Op, DAGCombinerInfo &DCI) const; }; + namespace PPC { + FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo); + } + bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index 0245ba7..f78bb38 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -17,20 +17,39 @@ // def s16imm64 : Operand<i64> { let PrintMethod = "printS16ImmOperand"; - let EncoderMethod = "getS16ImmEncoding"; + let EncoderMethod = "getImm16Encoding"; let ParserMatchClass = PPCS16ImmAsmOperand; } def u16imm64 : Operand<i64> { let PrintMethod = "printU16ImmOperand"; + let EncoderMethod = "getImm16Encoding"; let ParserMatchClass = PPCU16ImmAsmOperand; } +def s17imm64 : Operand<i64> { + // This operand type is used for addis/lis to allow the assembler parser + // to accept immediates in the range -65536..65535 for compatibility with + // the GNU assembler. The operand is treated as 16-bit otherwise. + let PrintMethod = "printS16ImmOperand"; + let EncoderMethod = "getImm16Encoding"; + let ParserMatchClass = PPCS17ImmAsmOperand; +} def tocentry : Operand<iPTR> { let MIOperandInfo = (ops i64imm:$imm); } +def PPCTLSRegOperand : AsmOperandClass { + let Name = "TLSReg"; let PredicateMethod = "isTLSReg"; + let RenderMethod = "addTLSRegOperands"; +} def tlsreg : Operand<i64> { let EncoderMethod = "getTLSRegEncoding"; + let ParserMatchClass = PPCTLSRegOperand; } def tlsgd : Operand<i64> {} +def tlscall : Operand<i64> { + let PrintMethod = "printTLSCall"; + let MIOperandInfo = (ops calltarget:$func, tlsgd:$sym); + let EncoderMethod = "getTLSCallEncoding"; +} //===----------------------------------------------------------------------===// // 64-bit transformation functions. @@ -69,7 +88,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { let isCodeGenOnly = 1 in def BCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond), - "b${cond:cc}ctr ${cond:reg}", BrB, []>, + "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>, Requires<[In64BitMode]>; } } @@ -102,7 +121,10 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in { def BL8 : IForm<18, 0, 1, (outs), (ins calltarget:$func), "bl $func", BrB, []>; // See Pat patterns below. - def BLA8 : IForm<18, 1, 1, (outs), (ins aaddr:$func), + def BL8_TLS : IForm<18, 0, 1, (outs), (ins tlscall:$func), + "bl $func", BrB, []>; + + def BLA8 : IForm<18, 1, 1, (outs), (ins abscalltarget:$func), "bla $func", BrB, [(PPCcall (i64 imm:$func))]>; } let Uses = [RM], isCodeGenOnly = 1 in { @@ -110,16 +132,12 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in { (outs), (ins calltarget:$func), "bl $func\n\tnop", BrB, []>; - def BL8_NOP_TLSGD : IForm_and_DForm_4_zero<18, 0, 1, 24, - (outs), (ins calltarget:$func, tlsgd:$sym), - "bl $func($sym)\n\tnop", BrB, []>; - - def BL8_NOP_TLSLD : IForm_and_DForm_4_zero<18, 0, 1, 24, - (outs), (ins calltarget:$func, tlsgd:$sym), - "bl $func($sym)\n\tnop", BrB, []>; + def BL8_NOP_TLS : IForm_and_DForm_4_zero<18, 0, 1, 24, + (outs), (ins tlscall:$func), + "bl $func\n\tnop", BrB, []>; def BLA8_NOP : IForm_and_DForm_4_zero<18, 1, 1, 24, - (outs), (ins aaddr:$func), + (outs), (ins abscalltarget:$func), "bla $func\n\tnop", BrB, [(PPCcall_nop (i64 imm:$func))]>; } @@ -130,7 +148,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in { let isCodeGenOnly = 1 in def BCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond), - "b${cond:cc}ctrl ${cond:reg}", BrB, []>, + "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>, Requires<[In64BitMode]>; } } @@ -198,7 +216,7 @@ def TCRETURNdi8 :Pseudo< (outs), []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in -def TCRETURNai8 :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset), +def TCRETURNai8 :Pseudo<(outs), (ins abscalltarget:$func, i32imm:$offset), "#TC_RETURNa8 $func $offset", [(PPCtc_return (i64 imm:$func), imm:$offset)]>; @@ -224,7 +242,7 @@ def TAILB8 : IForm<18, 0, 0, (outs), (ins calltarget:$dst), let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in -def TAILBA8 : IForm<18, 0, 0, (outs), (ins aaddr:$dst), +def TAILBA8 : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst), "ba $dst", BrB, []>; @@ -244,22 +262,25 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm), // 64-bit CR instructions let Interpretation64Bit = 1 in { let neverHasSideEffects = 1 in { -def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins g8rc:$rS), +def MTOCRF8: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins g8rc:$ST), + "mtocrf $FXM, $ST", BrMCRX>, + PPC970_DGroup_First, PPC970_Unit_CRU; + +def MTCRF8 : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, g8rc:$rS), "mtcrf $FXM, $rS", BrMCRX>, PPC970_MicroCode, PPC970_Unit_CRU; -let isCodeGenOnly = 1 in -def MFCR8pseud: XFXForm_3<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM), - "#MFCR8pseud", SprMFCR>, - PPC970_MicroCode, PPC970_Unit_CRU; -} // neverHasSideEffects = 1 +def MFOCRF8: XFXForm_5a<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM), + "mfocrf $rT, $FXM", SprMFCR>, + PPC970_DGroup_First, PPC970_Unit_CRU; -let neverHasSideEffects = 1 in def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins), "mfcr $rT", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; +} // neverHasSideEffects = 1 let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in { + let Defs = [CTR8] in def EH_SjLj_SetJmp64 : Pseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP64", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, @@ -291,7 +312,7 @@ def MTCTR8loop : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS), PPC970_DGroup_First, PPC970_Unit_FXU; } -let Pattern = [(set i64:$rT, readcyclecounter)] in +let isCodeGenOnly = 1, Pattern = [(set i64:$rT, readcyclecounter)] in def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs g8rc:$rT), (ins), "mfspr $rT, 268", SprMFTB>, PPC970_DGroup_First, PPC970_Unit_FXU; @@ -329,7 +350,7 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins s16imm64:$imm), "li $rD, $imm", IntSimple, [(set i64:$rD, imm64SExt16:$imm)]>; -def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins s16imm64:$imm), +def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins s17imm64:$imm), "lis $rD, $imm", IntSimple, [(set i64:$rD, imm16ShiftedSExt:$imm)]>; } @@ -389,9 +410,8 @@ defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), [(set i64:$rT, (add i64:$rA, i64:$rB))]>; // ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the // initial-exec thread-local storage model. -let isCodeGenOnly = 1 in def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB), - "add $rT, $rA, $rB@tls", IntSimple, + "add $rT, $rA, $rB", IntSimple, [(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>; defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), @@ -405,7 +425,7 @@ def ADDIC8 : DForm_2<12, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm), def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s16imm64:$imm), "addi $rD, $rA, $imm", IntSimple, [(set i64:$rD, (add i64:$rA, imm64SExt16:$imm))]>; -def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s16imm64:$imm), +def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s17imm64:$imm), "addis $rD, $rA, $imm", IntSimple, [(set i64:$rD, (add i64:$rA, imm16ShiftedSExt:$imm))]>; @@ -522,6 +542,9 @@ defm DIVDU : XOForm_1r<31, 457, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), defm MULLD : XOForm_1r<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), "mulld", "$rT, $rA, $rB", IntMulHD, [(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64; +def MULLI8 : DForm_2<7, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm), + "mulli $rD, $rA, $imm", IntMulLI, + [(set i64:$rD, (mul i64:$rA, imm64SExt16:$imm))]>; } let neverHasSideEffects = 1 in { @@ -538,6 +561,10 @@ defm RLDCL : MDSForm_1r<30, 8, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE), "rldcl", "$rA, $rS, $rB, $MBE", IntRotateD, []>, isPPC64; +defm RLDCR : MDSForm_1r<30, 9, + (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE), + "rldcr", "$rA, $rS, $rB, $MBE", IntRotateD, + []>, isPPC64; defm RLDICL : MDForm_1r<30, 0, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE), "rldicl", "$rA, $rS, $SH, $MBE", IntRotateDI, @@ -546,6 +573,10 @@ defm RLDICR : MDForm_1r<30, 1, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE), "rldicr", "$rA, $rS, $SH, $MBE", IntRotateDI, []>, isPPC64; +defm RLDIC : MDForm_1r<30, 2, + (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE), + "rldic", "$rA, $rS, $SH, $MBE", IntRotateDI, + []>, isPPC64; let Interpretation64Bit = 1 in { defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA), diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index cc9cf0a..fdea51d 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -392,7 +392,7 @@ def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), // Defines with the UIM field set to 0 for floating-point // to integer (fp_to_sint/fp_to_uint) conversions and integer // to floating-point (sint_to_fp/uint_to_fp) conversions. -let VA = 0 in { +let isCodeGenOnly = 1, VA = 0 in { def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB), "vcfsx $vD, $vB, 0", VecFP, [(set v4f32:$vD, @@ -664,15 +664,29 @@ def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>; def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; -let isCodeGenOnly = 1 in -def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins), +let isCodeGenOnly = 1 in { +def V_SET0B : VXForm_setzero<1220, (outs vrrc:$vD), (ins), + "vxor $vD, $vD, $vD", VecFP, + [(set v16i8:$vD, (v16i8 immAllZerosV))]>; +def V_SET0H : VXForm_setzero<1220, (outs vrrc:$vD), (ins), + "vxor $vD, $vD, $vD", VecFP, + [(set v8i16:$vD, (v8i16 immAllZerosV))]>; +def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins), "vxor $vD, $vD, $vD", VecFP, [(set v4i32:$vD, (v4i32 immAllZerosV))]>; + let IMM=-1 in { -def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins), +def V_SETALLONESB : VXForm_3<908, (outs vrrc:$vD), (ins), + "vspltisw $vD, -1", VecFP, + [(set v16i8:$vD, (v16i8 immAllOnesV))]>; +def V_SETALLONESH : VXForm_3<908, (outs vrrc:$vD), (ins), + "vspltisw $vD, -1", VecFP, + [(set v8i16:$vD, (v8i16 immAllOnesV))]>; +def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins), "vspltisw $vD, -1", VecFP, [(set v4i32:$vD, (v4i32 immAllOnesV))]>; } +} } // VALU Operations. //===----------------------------------------------------------------------===// diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td index a244058..42adc02 100644 --- a/lib/Target/PowerPC/PPCInstrFormats.td +++ b/lib/Target/PowerPC/PPCInstrFormats.td @@ -145,6 +145,20 @@ class BForm_2<bits<6> opcode, bits<5> bo, bits<5> bi, bit aa, bit lk, let Inst{31} = lk; } +class BForm_3<bits<6> opcode, bit aa, bit lk, + dag OOL, dag IOL, string asmstr> + : I<opcode, OOL, IOL, asmstr, BrB> { + bits<5> BO; + bits<5> BI; + bits<14> BD; + + let Inst{6-10} = BO; + let Inst{11-15} = BI; + let Inst{16-29} = BD; + let Inst{30} = aa; + let Inst{31} = lk; +} + // 1.7.3 SC-Form class SCForm<bits<6> opcode, bits<1> xo, dag OOL, dag IOL, string asmstr, InstrItinClass itin, @@ -459,14 +473,23 @@ class XForm_24<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, class XForm_24_sync<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern> : I<opcode, OOL, IOL, asmstr, itin> { + bits<2> L; + let Pattern = pattern; - let Inst{6-10} = 0; + let Inst{6-8} = 0; + let Inst{9-10} = L; let Inst{11-15} = 0; let Inst{16-20} = 0; let Inst{21-30} = xo; let Inst{31} = 0; } +class XForm_24_eieio<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmstr, InstrItinClass itin, list<dag> pattern> + : XForm_24_sync<opcode, xo, OOL, IOL, asmstr, itin, pattern> { + let L = 0; +} + class XForm_25<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern> : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> { diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index a3eeb20..375daee 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -448,7 +448,9 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, // isel is for regular integer GPRs only. if (!PPC::GPRCRegClass.hasSubClassEq(RC) && - !PPC::G8RCRegClass.hasSubClassEq(RC)) + !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) && + !PPC::G8RCRegClass.hasSubClassEq(RC) && + !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) return false; // FIXME: These numbers are for the A2, how well they work for other cores is @@ -478,12 +480,15 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, const TargetRegisterClass *RC = RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); assert(RC && "TrueReg and FalseReg must have overlapping register classes"); - assert((PPC::GPRCRegClass.hasSubClassEq(RC) || - PPC::G8RCRegClass.hasSubClassEq(RC)) && + + bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) || + PPC::G8RC_NOX0RegClass.hasSubClassEq(RC); + assert((Is64Bit || + PPC::GPRCRegClass.hasSubClassEq(RC) || + PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"); - unsigned OpCode = - PPC::GPRCRegClass.hasSubClassEq(RC) ? PPC::ISEL : PPC::ISEL8; + unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL; unsigned SelectPred = Cond[0].getImm(); unsigned SubIdx; @@ -791,16 +796,6 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, NewMIs.back()->addMemOperand(MF, MMO); } -MachineInstr* -PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(PPC::DBG_VALUE)); - addFrameReference(MIB, FrameIx, 0, false).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - bool PPCInstrInfo:: ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { assert(Cond.size() == 2 && "Invalid PPC branch opcode!"); diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h index 34a1a73..bd72a4d 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.h +++ b/lib/Target/PowerPC/PPCInstrInfo.h @@ -148,12 +148,6 @@ public: const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const; - virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; - virtual bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const; diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 1b7ea93..398a11b 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -392,7 +392,7 @@ def vrrc : RegisterOperand<VRRC> { let ParserMatchClass = PPCRegVRRCAsmOperand; } def PPCRegCRBITRCAsmOperand : AsmOperandClass { - let Name = "RegCRBITRC"; let PredicateMethod = "isRegNumber"; + let Name = "RegCRBITRC"; let PredicateMethod = "isCRBitNumber"; } def crbitrc : RegisterOperand<CRBITRC> { let ParserMatchClass = PPCRegCRBITRCAsmOperand; @@ -434,7 +434,7 @@ def PPCS16ImmAsmOperand : AsmOperandClass { } def s16imm : Operand<i32> { let PrintMethod = "printS16ImmOperand"; - let EncoderMethod = "getS16ImmEncoding"; + let EncoderMethod = "getImm16Encoding"; let ParserMatchClass = PPCS16ImmAsmOperand; } def PPCU16ImmAsmOperand : AsmOperandClass { @@ -443,21 +443,58 @@ def PPCU16ImmAsmOperand : AsmOperandClass { } def u16imm : Operand<i32> { let PrintMethod = "printU16ImmOperand"; + let EncoderMethod = "getImm16Encoding"; let ParserMatchClass = PPCU16ImmAsmOperand; } +def PPCS17ImmAsmOperand : AsmOperandClass { + let Name = "S17Imm"; let PredicateMethod = "isS17Imm"; + let RenderMethod = "addImmOperands"; +} +def s17imm : Operand<i32> { + // This operand type is used for addis/lis to allow the assembler parser + // to accept immediates in the range -65536..65535 for compatibility with + // the GNU assembler. The operand is treated as 16-bit otherwise. + let PrintMethod = "printS16ImmOperand"; + let EncoderMethod = "getImm16Encoding"; + let ParserMatchClass = PPCS17ImmAsmOperand; +} +def PPCDirectBrAsmOperand : AsmOperandClass { + let Name = "DirectBr"; let PredicateMethod = "isDirectBr"; + let RenderMethod = "addBranchTargetOperands"; +} def directbrtarget : Operand<OtherVT> { let PrintMethod = "printBranchOperand"; let EncoderMethod = "getDirectBrEncoding"; + let ParserMatchClass = PPCDirectBrAsmOperand; +} +def absdirectbrtarget : Operand<OtherVT> { + let PrintMethod = "printAbsBranchOperand"; + let EncoderMethod = "getAbsDirectBrEncoding"; + let ParserMatchClass = PPCDirectBrAsmOperand; +} +def PPCCondBrAsmOperand : AsmOperandClass { + let Name = "CondBr"; let PredicateMethod = "isCondBr"; + let RenderMethod = "addBranchTargetOperands"; } def condbrtarget : Operand<OtherVT> { let PrintMethod = "printBranchOperand"; let EncoderMethod = "getCondBrEncoding"; + let ParserMatchClass = PPCCondBrAsmOperand; +} +def abscondbrtarget : Operand<OtherVT> { + let PrintMethod = "printAbsBranchOperand"; + let EncoderMethod = "getAbsCondBrEncoding"; + let ParserMatchClass = PPCCondBrAsmOperand; } def calltarget : Operand<iPTR> { + let PrintMethod = "printBranchOperand"; let EncoderMethod = "getDirectBrEncoding"; + let ParserMatchClass = PPCDirectBrAsmOperand; } -def aaddr : Operand<iPTR> { - let PrintMethod = "printAbsAddrOperand"; +def abscalltarget : Operand<iPTR> { + let PrintMethod = "printAbsBranchOperand"; + let EncoderMethod = "getAbsDirectBrEncoding"; + let ParserMatchClass = PPCDirectBrAsmOperand; } def PPCCRBitMaskOperand : AsmOperandClass { let Name = "CRBitMask"; let PredicateMethod = "isCRBitMask"; @@ -859,7 +896,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { let isCodeGenOnly = 1 in def BCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond), - "b${cond:cc}ctr ${cond:reg}", BrB, []>; + "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>; } } @@ -872,6 +909,8 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { def B : IForm<18, 0, 0, (outs), (ins directbrtarget:$dst), "b $dst", BrB, [(br bb:$dst)]>; + def BA : IForm<18, 1, 0, (outs), (ins absdirectbrtarget:$dst), + "ba $dst", BrB, []>; } // BCC represents an arbitrary conditional branch on a predicate. @@ -879,18 +918,29 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { // a two-value operand where a dag node expects two operands. :( let isCodeGenOnly = 1 in { def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), - "b${cond:cc} ${cond:reg}, $dst" + "b${cond:cc}${cond:pm} ${cond:reg}, $dst" /*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>; + def BCCA : BForm<16, 1, 0, (outs), (ins pred:$cond, abscondbrtarget:$dst), + "b${cond:cc}a${cond:pm} ${cond:reg}, $dst">; + let isReturn = 1, Uses = [LR, RM] in def BCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond), - "b${cond:cc}lr ${cond:reg}", BrB, []>; + "b${cond:cc}lr${cond:pm} ${cond:reg}", BrB, []>; + } - let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in { - def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins), + let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in { + def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins), "bdzlr", BrB, []>; - def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins), + def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins), "bdnzlr", BrB, []>; - } + def BDZLRp : XLForm_2_ext<19, 16, 27, 0, 0, (outs), (ins), + "bdzlr+", BrB, []>; + def BDNZLRp: XLForm_2_ext<19, 16, 25, 0, 0, (outs), (ins), + "bdnzlr+", BrB, []>; + def BDZLRm : XLForm_2_ext<19, 16, 26, 0, 0, (outs), (ins), + "bdzlr-", BrB, []>; + def BDNZLRm: XLForm_2_ext<19, 16, 24, 0, 0, (outs), (ins), + "bdnzlr-", BrB, []>; } let Defs = [CTR], Uses = [CTR] in { @@ -898,6 +948,26 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { "bdz $dst">; def BDNZ : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), "bdnz $dst">; + def BDZA : BForm_1<16, 18, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdza $dst">; + def BDNZA : BForm_1<16, 16, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdnza $dst">; + def BDZp : BForm_1<16, 27, 0, 0, (outs), (ins condbrtarget:$dst), + "bdz+ $dst">; + def BDNZp: BForm_1<16, 25, 0, 0, (outs), (ins condbrtarget:$dst), + "bdnz+ $dst">; + def BDZAp : BForm_1<16, 27, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdza+ $dst">; + def BDNZAp: BForm_1<16, 25, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdnza+ $dst">; + def BDZm : BForm_1<16, 26, 0, 0, (outs), (ins condbrtarget:$dst), + "bdz- $dst">; + def BDNZm: BForm_1<16, 24, 0, 0, (outs), (ins condbrtarget:$dst), + "bdnz- $dst">; + def BDZAm : BForm_1<16, 26, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdza- $dst">; + def BDNZAm: BForm_1<16, 24, 1, 0, (outs), (ins abscondbrtarget:$dst), + "bdnza- $dst">; } } @@ -914,8 +984,15 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { let Uses = [RM] in { def BL : IForm<18, 0, 1, (outs), (ins calltarget:$func), "bl $func", BrB, []>; // See Pat patterns below. - def BLA : IForm<18, 1, 1, (outs), (ins aaddr:$func), + def BLA : IForm<18, 1, 1, (outs), (ins abscalltarget:$func), "bla $func", BrB, [(PPCcall (i32 imm:$func))]>; + + let isCodeGenOnly = 1 in { + def BCCL : BForm<16, 0, 1, (outs), (ins pred:$cond, condbrtarget:$dst), + "b${cond:cc}l${cond:pm} ${cond:reg}, $dst">; + def BCCLA : BForm<16, 1, 1, (outs), (ins pred:$cond, abscondbrtarget:$dst), + "b${cond:cc}la${cond:pm} ${cond:reg}, $dst">; + } } let Uses = [CTR, RM] in { def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), @@ -924,7 +1001,55 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { let isCodeGenOnly = 1 in def BCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond), - "b${cond:cc}ctrl ${cond:reg}", BrB, []>; + "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>; + } + let Uses = [LR, RM] in { + def BLRL : XLForm_2_ext<19, 16, 20, 0, 1, (outs), (ins), + "blrl", BrB, []>; + + let isCodeGenOnly = 1 in + def BCLRL : XLForm_2_br<19, 16, 1, (outs), (ins pred:$cond), + "b${cond:cc}lrl${cond:pm} ${cond:reg}", BrB, []>; + } + let Defs = [CTR], Uses = [CTR, RM] in { + def BDZL : BForm_1<16, 18, 0, 1, (outs), (ins condbrtarget:$dst), + "bdzl $dst">; + def BDNZL : BForm_1<16, 16, 0, 1, (outs), (ins condbrtarget:$dst), + "bdnzl $dst">; + def BDZLA : BForm_1<16, 18, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdzla $dst">; + def BDNZLA : BForm_1<16, 16, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdnzla $dst">; + def BDZLp : BForm_1<16, 27, 0, 1, (outs), (ins condbrtarget:$dst), + "bdzl+ $dst">; + def BDNZLp: BForm_1<16, 25, 0, 1, (outs), (ins condbrtarget:$dst), + "bdnzl+ $dst">; + def BDZLAp : BForm_1<16, 27, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdzla+ $dst">; + def BDNZLAp: BForm_1<16, 25, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdnzla+ $dst">; + def BDZLm : BForm_1<16, 26, 0, 1, (outs), (ins condbrtarget:$dst), + "bdzl- $dst">; + def BDNZLm: BForm_1<16, 24, 0, 1, (outs), (ins condbrtarget:$dst), + "bdnzl- $dst">; + def BDZLAm : BForm_1<16, 26, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdzla- $dst">; + def BDNZLAm: BForm_1<16, 24, 1, 1, (outs), (ins abscondbrtarget:$dst), + "bdnzla- $dst">; + } + let Defs = [CTR], Uses = [CTR, LR, RM] in { + def BDZLRL : XLForm_2_ext<19, 16, 18, 0, 1, (outs), (ins), + "bdzlrl", BrB, []>; + def BDNZLRL : XLForm_2_ext<19, 16, 16, 0, 1, (outs), (ins), + "bdnzlrl", BrB, []>; + def BDZLRLp : XLForm_2_ext<19, 16, 27, 0, 1, (outs), (ins), + "bdzlrl+", BrB, []>; + def BDNZLRLp: XLForm_2_ext<19, 16, 25, 0, 1, (outs), (ins), + "bdnzlrl+", BrB, []>; + def BDZLRLm : XLForm_2_ext<19, 16, 26, 0, 1, (outs), (ins), + "bdzlrl-", BrB, []>; + def BDNZLRLm: XLForm_2_ext<19, 16, 24, 0, 1, (outs), (ins), + "bdnzlrl-", BrB, []>; } } @@ -936,7 +1061,7 @@ def TCRETURNdi :Pseudo< (outs), let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in -def TCRETURNai :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset), +def TCRETURNai :Pseudo<(outs), (ins abscalltarget:$func, i32imm:$offset), "#TC_RETURNa $func $offset", [(PPCtc_return (i32 imm:$func), imm:$offset)]>; @@ -953,23 +1078,22 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, Requires<[In32BitMode]>; - - let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst), "b $dst", BrB, []>; -} - let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in -def TAILBA : IForm<18, 0, 0, (outs), (ins aaddr:$dst), +def TAILBA : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst), "ba $dst", BrB, []>; +} + let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in { + let Defs = [CTR] in def EH_SjLj_SetJmp32 : Pseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP32", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, @@ -1115,6 +1239,15 @@ def STWCX : XForm_1<31, 150, (outs), (ins gprc:$rS, memrr:$dst), let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStLoad, [(trap)]>; +def TWI : DForm_base<3, (outs), (ins u5imm:$to, gprc:$rA, s16imm:$imm), + "twi $to, $rA, $imm", IntTrapW, []>; +def TW : XForm_1<31, 4, (outs), (ins u5imm:$to, gprc:$rA, gprc:$rB), + "tw $to, $rA, $rB", IntTrapW, []>; +def TDI : DForm_base<2, (outs), (ins u5imm:$to, g8rc:$rA, s16imm:$imm), + "tdi $to, $rA, $imm", IntTrapD, []>; +def TD : XForm_1<31, 68, (outs), (ins u5imm:$to, g8rc:$rA, g8rc:$rB), + "td $to, $rA, $rB", IntTrapD, []>; + //===----------------------------------------------------------------------===// // PPC32 Load Instructions. // @@ -1255,6 +1388,10 @@ def LFIWZX : XForm_25<31, 887, (outs f8rc:$frD), (ins memrr:$src), [(set f64:$frD, (PPClfiwzx xoaddr:$src))]>; } +// Load Multiple +def LMW : DForm_1<46, (outs gprc:$rD), (ins memri:$src), + "lmw $rD, $src", LdStLMW, []>; + //===----------------------------------------------------------------------===// // PPC32 Store Instructions. // @@ -1385,9 +1522,13 @@ def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iPTR:$ptroff), def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iPTR:$ptroff), (STFDUX $rS, $ptrreg, $ptroff)>; -def SYNC : XForm_24_sync<31, 598, (outs), (ins), - "sync", LdStSync, - [(int_ppc_sync)]>; +// Store Multiple +def STMW : DForm_1<47, (outs), (ins gprc:$rS, memri:$dst), + "stmw $rS, $dst", LdStLMW, []>; + +def SYNC : XForm_24_sync<31, 598, (outs), (ins i32imm:$L), + "sync $L", LdStSync, []>; +def : Pat<(int_ppc_sync), (SYNC 0)>; //===----------------------------------------------------------------------===// // PPC32 Arithmetic Instructions. @@ -1408,7 +1549,7 @@ def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "addic. $rD, $rA, $imm", IntGeneral, []>, isDOT, RecFormRel; } -def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$imm), +def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s17imm:$imm), "addis $rD, $rA, $imm", IntSimple, [(set i32:$rD, (add i32:$rA, imm16ShiftedSExt:$imm))]>; let isCodeGenOnly = 1 in @@ -1428,7 +1569,7 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { def LI : DForm_2_r0<14, (outs gprc:$rD), (ins s16imm:$imm), "li $rD, $imm", IntSimple, [(set i32:$rD, imm32SExt16:$imm)]>; - def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins s16imm:$imm), + def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins s17imm:$imm), "lis $rD, $imm", IntSimple, [(set i32:$rD, imm16ShiftedSExt:$imm)]>; } @@ -1653,15 +1794,37 @@ def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA), "mcrf $BF, $BFA", BrMCR>, PPC970_DGroup_First, PPC970_Unit_CRU; +def CRAND : XLForm_1<19, 257, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "crand $CRD, $CRA, $CRB", BrCR, []>; + +def CRNAND : XLForm_1<19, 225, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "crnand $CRD, $CRA, $CRB", BrCR, []>; + +def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "cror $CRD, $CRA, $CRB", BrCR, []>; + +def CRXOR : XLForm_1<19, 193, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "crxor $CRD, $CRA, $CRB", BrCR, []>; + +def CRNOR : XLForm_1<19, 33, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "crnor $CRD, $CRA, $CRB", BrCR, []>; + def CREQV : XLForm_1<19, 289, (outs crbitrc:$CRD), (ins crbitrc:$CRA, crbitrc:$CRB), - "creqv $CRD, $CRA, $CRB", BrCR, - []>; + "creqv $CRD, $CRA, $CRB", BrCR, []>; -def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD), +def CRANDC : XLForm_1<19, 129, (outs crbitrc:$CRD), (ins crbitrc:$CRA, crbitrc:$CRB), - "cror $CRD, $CRA, $CRB", BrCR, - []>; + "crandc $CRD, $CRA, $CRB", BrCR, []>; + +def CRORC : XLForm_1<19, 417, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), + "crorc $CRD, $CRA, $CRB", BrCR, []>; let isCodeGenOnly = 1 in { def CRSET : XLForm_1_ext<19, 289, (outs crbitrc:$dst), (ins), @@ -1685,6 +1848,15 @@ def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins), // XFX-Form instructions. Instructions that deal with SPRs. // + +def MFSPR : XFXForm_1<31, 339, (outs gprc:$RT), (ins i32imm:$SPR), + "mfspr $RT, $SPR", SprMFSPR>; +def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT), + "mtspr $SPR, $RT", SprMTSPR>; + +def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR), + "mftb $RT, $SPR", SprMFTB>; + let Uses = [CTR] in { def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins), "mfctr $rT", SprMFSPR>, @@ -1713,17 +1885,17 @@ def MFLR : XFXForm_1_ext<31, 339, 8, (outs gprc:$rT), (ins), PPC970_DGroup_First, PPC970_Unit_FXU; } -// Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed like -// a GPR on the PPC970. As such, copies in and out have the same performance -// characteristics as an OR instruction. -def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS), - "mtspr 256, $rS", IntGeneral>, - PPC970_DGroup_Single, PPC970_Unit_FXU; -def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins), - "mfspr $rT, 256", IntGeneral>, - PPC970_DGroup_First, PPC970_Unit_FXU; - let isCodeGenOnly = 1 in { + // Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed + // like a GPR on the PPC970. As such, copies in and out have the same + // performance characteristics as an OR instruction. + def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS), + "mtspr 256, $rS", IntGeneral>, + PPC970_DGroup_Single, PPC970_Unit_FXU; + def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins), + "mfspr $rT, 256", IntGeneral>, + PPC970_DGroup_First, PPC970_Unit_FXU; + def MTVRSAVEv : XFXForm_7_ext<31, 467, 256, (outs VRSAVERC:$reg), (ins gprc:$rS), "mtspr 256, $rS", IntGeneral>, @@ -1747,34 +1919,22 @@ def RESTORE_VRSAVE : Pseudo<(outs VRSAVERC:$vrsave), (ins memri:$F), "#RESTORE_VRSAVE", []>; let neverHasSideEffects = 1 in { -def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins gprc:$rS), - "mtcrf $FXM, $rS", BrMCRX>, - PPC970_MicroCode, PPC970_Unit_CRU; +def MTOCRF: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins gprc:$ST), + "mtocrf $FXM, $ST", BrMCRX>, + PPC970_DGroup_First, PPC970_Unit_CRU; -// This is a pseudo for MFCR, which implicitly uses all 8 of its subregisters; -// declaring that here gives the local register allocator problems with this: -// vreg = MCRF CR0 -// MFCR <kill of whatever preg got assigned to vreg> -// while not declaring it breaks DeadMachineInstructionElimination. -// As it turns out, in all cases where we currently use this, -// we're only interested in one subregister of it. Represent this in the -// instruction to keep the register allocator from becoming confused. -// -// FIXME: Make this a real Pseudo instruction when the JIT switches to MC. -let isCodeGenOnly = 1 in -def MFCRpseud: XFXForm_3<31, 19, (outs gprc:$rT), (ins crbitm:$FXM), - "#MFCRpseud", SprMFCR>, +def MTCRF : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, gprc:$rS), + "mtcrf $FXM, $rS", BrMCRX>, PPC970_MicroCode, PPC970_Unit_CRU; def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM), "mfocrf $rT, $FXM", SprMFCR>, PPC970_DGroup_First, PPC970_Unit_CRU; -} // neverHasSideEffects = 1 -let neverHasSideEffects = 1 in def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins), "mfcr $rT", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; +} // neverHasSideEffects = 1 // Pseudo instruction to perform FADD in round-to-zero mode. let usesCustomInserter = 1, Uses = [RM] in { @@ -2108,7 +2268,7 @@ def : Pat<(f64 (extloadf32 xaddr:$src)), def : Pat<(f64 (fextend f32:$src)), (COPY_TO_REGCLASS $src, F8RC)>; -def : Pat<(atomic_fence (imm), (imm)), (SYNC)>; +def : Pat<(atomic_fence (imm), (imm)), (SYNC 0)>; // Additional FNMSUB patterns: -a*c + b == -(a*c - b) def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B), @@ -2134,6 +2294,12 @@ def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins), def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src), "icbi $src", LdStICBI, []>; +def EIEIO : XForm_24_eieio<31, 854, (outs), (ins), + "eieio", LdStLoad, []>; + +def WAIT : XForm_24_sync<31, 62, (outs), (ins i32imm:$L), + "wait $L", LdStLoad, []>; + //===----------------------------------------------------------------------===// // PowerPC Assembler Instruction Aliases // @@ -2156,38 +2322,247 @@ class PPCAsmPseudo<string asm, dag iops> def : InstAlias<"sc", (SC 0)>; -def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>; +def : InstAlias<"sync", (SYNC 0)>; +def : InstAlias<"msync", (SYNC 0)>; +def : InstAlias<"lwsync", (SYNC 1)>; +def : InstAlias<"ptesync", (SYNC 2)>; +def : InstAlias<"wait", (WAIT 0)>; +def : InstAlias<"waitrsv", (WAIT 1)>; +def : InstAlias<"waitimpl", (WAIT 2)>; + +def : InstAlias<"crset $bx", (CREQV crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>; +def : InstAlias<"crclr $bx", (CRXOR crbitrc:$bx, crbitrc:$bx, crbitrc:$bx)>; +def : InstAlias<"crmove $bx, $by", (CROR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>; +def : InstAlias<"crnot $bx, $by", (CRNOR crbitrc:$bx, crbitrc:$by, crbitrc:$by)>; + +def : InstAlias<"mtxer $Rx", (MTSPR 1, gprc:$Rx)>; +def : InstAlias<"mfxer $Rx", (MFSPR gprc:$Rx, 1)>; + +def : InstAlias<"mftb $Rx", (MFTB gprc:$Rx, 268)>; +def : InstAlias<"mftbu $Rx", (MFTB gprc:$Rx, 269)>; + +def : InstAlias<"xnop", (XORI R0, R0, 0)>; + +def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>; +def : InstAlias<"mr. $rA, $rB", (OR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>; + +def : InstAlias<"not $rA, $rB", (NOR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>; +def : InstAlias<"not. $rA, $rB", (NOR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>; + +def : InstAlias<"mtcr $rA", (MTCRF8 255, g8rc:$rA)>; + +def LAx : PPCAsmPseudo<"la $rA, $addr", (ins gprc:$rA, memri:$addr)>; + +def SUBI : PPCAsmPseudo<"subi $rA, $rB, $imm", + (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; +def SUBIS : PPCAsmPseudo<"subis $rA, $rB, $imm", + (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; +def SUBIC : PPCAsmPseudo<"subic $rA, $rB, $imm", + (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; +def SUBICo : PPCAsmPseudo<"subic. $rA, $rB, $imm", + (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; + +def : InstAlias<"sub $rA, $rB, $rC", (SUBF8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>; +def : InstAlias<"sub. $rA, $rB, $rC", (SUBF8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>; +def : InstAlias<"subc $rA, $rB, $rC", (SUBFC8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>; +def : InstAlias<"subc. $rA, $rB, $rC", (SUBFC8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>; + +def EXTLWI : PPCAsmPseudo<"extlwi $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def EXTLWIo : PPCAsmPseudo<"extlwi. $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def EXTRWI : PPCAsmPseudo<"extrwi $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def EXTRWIo : PPCAsmPseudo<"extrwi. $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def INSLWI : PPCAsmPseudo<"inslwi $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def INSLWIo : PPCAsmPseudo<"inslwi. $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def INSRWI : PPCAsmPseudo<"insrwi $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def INSRWIo : PPCAsmPseudo<"insrwi. $rA, $rS, $n, $b", + (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; +def ROTRWI : PPCAsmPseudo<"rotrwi $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def ROTRWIo : PPCAsmPseudo<"rotrwi. $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def SLWI : PPCAsmPseudo<"slwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def SLWIo : PPCAsmPseudo<"slwi. $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def SRWI : PPCAsmPseudo<"srwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def SRWIo : PPCAsmPseudo<"srwi. $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def CLRRWI : PPCAsmPseudo<"clrrwi $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def CLRRWIo : PPCAsmPseudo<"clrrwi. $rA, $rS, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$n)>; +def CLRLSLWI : PPCAsmPseudo<"clrlslwi $rA, $rS, $b, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>; +def CLRLSLWIo : PPCAsmPseudo<"clrlslwi. $rA, $rS, $b, $n", + (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>; + +def : InstAlias<"rotlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>; +def : InstAlias<"rotlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>; +def : InstAlias<"rotlw $rA, $rS, $rB", (RLWNM gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>; +def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNMo gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>; +def : InstAlias<"clrlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>; +def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>; + +def EXTLDI : PPCAsmPseudo<"extldi $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def EXTLDIo : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def EXTRDI : PPCAsmPseudo<"extrdi $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def EXTRDIo : PPCAsmPseudo<"extrdi. $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def INSRDI : PPCAsmPseudo<"insrdi $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def INSRDIo : PPCAsmPseudo<"insrdi. $rA, $rS, $n, $b", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; +def ROTRDI : PPCAsmPseudo<"rotrdi $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; +def ROTRDIo : PPCAsmPseudo<"rotrdi. $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def SLDI : PPCAsmPseudo<"sldi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; +def SLDIo : PPCAsmPseudo<"sldi. $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def SRDI : PPCAsmPseudo<"srdi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; - -multiclass BranchExtendedMnemonic<string name, int bibo> { - def : InstAlias<"b"#name#" $cc, $dst", +def SRDIo : PPCAsmPseudo<"srdi. $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; +def CLRRDI : PPCAsmPseudo<"clrrdi $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; +def CLRRDIo : PPCAsmPseudo<"clrrdi. $rA, $rS, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; +def CLRLSLDI : PPCAsmPseudo<"clrlsldi $rA, $rS, $b, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>; +def CLRLSLDIo : PPCAsmPseudo<"clrlsldi. $rA, $rS, $b, $n", + (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>; + +def : InstAlias<"rotldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>; +def : InstAlias<"rotldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>; +def : InstAlias<"rotld $rA, $rS, $rB", (RLDCL g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>; +def : InstAlias<"rotld. $rA, $rS, $rB", (RLDCLo g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>; +def : InstAlias<"clrldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>; +def : InstAlias<"clrldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>; + +// These generic branch instruction forms are used for the assembler parser only. +// Defs and Uses are conservative, since we don't know the BO value. +let PPC970_Unit = 7 in { + let Defs = [CTR], Uses = [CTR, RM] in { + def gBC : BForm_3<16, 0, 0, (outs), + (ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst), + "bc $bo, $bi, $dst">; + def gBCA : BForm_3<16, 1, 0, (outs), + (ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst), + "bca $bo, $bi, $dst">; + } + let Defs = [LR, CTR], Uses = [CTR, RM] in { + def gBCL : BForm_3<16, 0, 1, (outs), + (ins u5imm:$bo, crbitrc:$bi, condbrtarget:$dst), + "bcl $bo, $bi, $dst">; + def gBCLA : BForm_3<16, 1, 1, (outs), + (ins u5imm:$bo, crbitrc:$bi, abscondbrtarget:$dst), + "bcla $bo, $bi, $dst">; + } + let Defs = [CTR], Uses = [CTR, LR, RM] in + def gBCLR : XLForm_2<19, 16, 0, (outs), + (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh), + "bclr $bo, $bi, $bh", BrB, []>; + let Defs = [LR, CTR], Uses = [CTR, LR, RM] in + def gBCLRL : XLForm_2<19, 16, 1, (outs), + (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh), + "bclrl $bo, $bi, $bh", BrB, []>; + let Defs = [CTR], Uses = [CTR, LR, RM] in + def gBCCTR : XLForm_2<19, 528, 0, (outs), + (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh), + "bcctr $bo, $bi, $bh", BrB, []>; + let Defs = [LR, CTR], Uses = [CTR, LR, RM] in + def gBCCTRL : XLForm_2<19, 528, 1, (outs), + (ins u5imm:$bo, crbitrc:$bi, i32imm:$bh), + "bcctrl $bo, $bi, $bh", BrB, []>; +} +def : InstAlias<"bclr $bo, $bi", (gBCLR u5imm:$bo, crbitrc:$bi, 0)>; +def : InstAlias<"bclrl $bo, $bi", (gBCLRL u5imm:$bo, crbitrc:$bi, 0)>; +def : InstAlias<"bcctr $bo, $bi", (gBCCTR u5imm:$bo, crbitrc:$bi, 0)>; +def : InstAlias<"bcctrl $bo, $bi", (gBCCTRL u5imm:$bo, crbitrc:$bi, 0)>; + +multiclass BranchSimpleMnemonic1<string name, string pm, int bo> { + def : InstAlias<"b"#name#pm#" $bi, $dst", (gBC bo, crbitrc:$bi, condbrtarget:$dst)>; + def : InstAlias<"b"#name#"a"#pm#" $bi, $dst", (gBCA bo, crbitrc:$bi, abscondbrtarget:$dst)>; + def : InstAlias<"b"#name#"lr"#pm#" $bi", (gBCLR bo, crbitrc:$bi, 0)>; + def : InstAlias<"b"#name#"l"#pm#" $bi, $dst", (gBCL bo, crbitrc:$bi, condbrtarget:$dst)>; + def : InstAlias<"b"#name#"la"#pm#" $bi, $dst", (gBCLA bo, crbitrc:$bi, abscondbrtarget:$dst)>; + def : InstAlias<"b"#name#"lrl"#pm#" $bi", (gBCLRL bo, crbitrc:$bi, 0)>; +} +multiclass BranchSimpleMnemonic2<string name, string pm, int bo> + : BranchSimpleMnemonic1<name, pm, bo> { + def : InstAlias<"b"#name#"ctr"#pm#" $bi", (gBCCTR bo, crbitrc:$bi, 0)>; + def : InstAlias<"b"#name#"ctrl"#pm#" $bi", (gBCCTRL bo, crbitrc:$bi, 0)>; +} +defm : BranchSimpleMnemonic2<"t", "", 12>; +defm : BranchSimpleMnemonic2<"f", "", 4>; +defm : BranchSimpleMnemonic2<"t", "-", 14>; +defm : BranchSimpleMnemonic2<"f", "-", 6>; +defm : BranchSimpleMnemonic2<"t", "+", 15>; +defm : BranchSimpleMnemonic2<"f", "+", 7>; +defm : BranchSimpleMnemonic1<"dnzt", "", 8>; +defm : BranchSimpleMnemonic1<"dnzf", "", 0>; +defm : BranchSimpleMnemonic1<"dzt", "", 10>; +defm : BranchSimpleMnemonic1<"dzf", "", 2>; + +multiclass BranchExtendedMnemonicPM<string name, string pm, int bibo> { + def : InstAlias<"b"#name#pm#" $cc, $dst", (BCC bibo, crrc:$cc, condbrtarget:$dst)>; - def : InstAlias<"b"#name#" $dst", + def : InstAlias<"b"#name#pm#" $dst", (BCC bibo, CR0, condbrtarget:$dst)>; - def : InstAlias<"b"#name#"lr $cc", + def : InstAlias<"b"#name#"a"#pm#" $cc, $dst", + (BCCA bibo, crrc:$cc, abscondbrtarget:$dst)>; + def : InstAlias<"b"#name#"a"#pm#" $dst", + (BCCA bibo, CR0, abscondbrtarget:$dst)>; + + def : InstAlias<"b"#name#"lr"#pm#" $cc", (BCLR bibo, crrc:$cc)>; - def : InstAlias<"b"#name#"lr", + def : InstAlias<"b"#name#"lr"#pm, (BCLR bibo, CR0)>; - def : InstAlias<"b"#name#"ctr $cc", + def : InstAlias<"b"#name#"ctr"#pm#" $cc", (BCCTR bibo, crrc:$cc)>; - def : InstAlias<"b"#name#"ctr", + def : InstAlias<"b"#name#"ctr"#pm, (BCCTR bibo, CR0)>; - def : InstAlias<"b"#name#"ctrl $cc", + def : InstAlias<"b"#name#"l"#pm#" $cc, $dst", + (BCCL bibo, crrc:$cc, condbrtarget:$dst)>; + def : InstAlias<"b"#name#"l"#pm#" $dst", + (BCCL bibo, CR0, condbrtarget:$dst)>; + + def : InstAlias<"b"#name#"la"#pm#" $cc, $dst", + (BCCLA bibo, crrc:$cc, abscondbrtarget:$dst)>; + def : InstAlias<"b"#name#"la"#pm#" $dst", + (BCCLA bibo, CR0, abscondbrtarget:$dst)>; + + def : InstAlias<"b"#name#"lrl"#pm#" $cc", + (BCLRL bibo, crrc:$cc)>; + def : InstAlias<"b"#name#"lrl"#pm, + (BCLRL bibo, CR0)>; + + def : InstAlias<"b"#name#"ctrl"#pm#" $cc", (BCCTRL bibo, crrc:$cc)>; - def : InstAlias<"b"#name#"ctrl", + def : InstAlias<"b"#name#"ctrl"#pm, (BCCTRL bibo, CR0)>; } +multiclass BranchExtendedMnemonic<string name, int bibo> { + defm : BranchExtendedMnemonicPM<name, "", bibo>; + defm : BranchExtendedMnemonicPM<name, "-", !add(bibo, 2)>; + defm : BranchExtendedMnemonicPM<name, "+", !add(bibo, 3)>; +} defm : BranchExtendedMnemonic<"lt", 12>; defm : BranchExtendedMnemonic<"gt", 44>; defm : BranchExtendedMnemonic<"eq", 76>; @@ -2201,3 +2576,43 @@ defm : BranchExtendedMnemonic<"ne", 68>; defm : BranchExtendedMnemonic<"nu", 100>; defm : BranchExtendedMnemonic<"ns", 100>; +def : InstAlias<"cmpwi $rA, $imm", (CMPWI CR0, gprc:$rA, s16imm:$imm)>; +def : InstAlias<"cmpw $rA, $rB", (CMPW CR0, gprc:$rA, gprc:$rB)>; +def : InstAlias<"cmplwi $rA, $imm", (CMPLWI CR0, gprc:$rA, u16imm:$imm)>; +def : InstAlias<"cmplw $rA, $rB", (CMPLW CR0, gprc:$rA, gprc:$rB)>; +def : InstAlias<"cmpdi $rA, $imm", (CMPDI CR0, g8rc:$rA, s16imm:$imm)>; +def : InstAlias<"cmpd $rA, $rB", (CMPD CR0, g8rc:$rA, g8rc:$rB)>; +def : InstAlias<"cmpldi $rA, $imm", (CMPLDI CR0, g8rc:$rA, u16imm:$imm)>; +def : InstAlias<"cmpld $rA, $rB", (CMPLD CR0, g8rc:$rA, g8rc:$rB)>; + +def : InstAlias<"cmpi $bf, 0, $rA, $imm", (CMPWI crrc:$bf, gprc:$rA, s16imm:$imm)>; +def : InstAlias<"cmp $bf, 0, $rA, $rB", (CMPW crrc:$bf, gprc:$rA, gprc:$rB)>; +def : InstAlias<"cmpli $bf, 0, $rA, $imm", (CMPLWI crrc:$bf, gprc:$rA, u16imm:$imm)>; +def : InstAlias<"cmpl $bf, 0, $rA, $rB", (CMPLW crrc:$bf, gprc:$rA, gprc:$rB)>; +def : InstAlias<"cmpi $bf, 1, $rA, $imm", (CMPDI crrc:$bf, g8rc:$rA, s16imm:$imm)>; +def : InstAlias<"cmp $bf, 1, $rA, $rB", (CMPD crrc:$bf, g8rc:$rA, g8rc:$rB)>; +def : InstAlias<"cmpli $bf, 1, $rA, $imm", (CMPLDI crrc:$bf, g8rc:$rA, u16imm:$imm)>; +def : InstAlias<"cmpl $bf, 1, $rA, $rB", (CMPLD crrc:$bf, g8rc:$rA, g8rc:$rB)>; + +multiclass TrapExtendedMnemonic<string name, int to> { + def : InstAlias<"td"#name#"i $rA, $imm", (TDI to, g8rc:$rA, s16imm:$imm)>; + def : InstAlias<"td"#name#" $rA, $rB", (TD to, g8rc:$rA, g8rc:$rB)>; + def : InstAlias<"tw"#name#"i $rA, $imm", (TWI to, gprc:$rA, s16imm:$imm)>; + def : InstAlias<"tw"#name#" $rA, $rB", (TW to, gprc:$rA, gprc:$rB)>; +} +defm : TrapExtendedMnemonic<"lt", 16>; +defm : TrapExtendedMnemonic<"le", 20>; +defm : TrapExtendedMnemonic<"eq", 4>; +defm : TrapExtendedMnemonic<"ge", 12>; +defm : TrapExtendedMnemonic<"gt", 8>; +defm : TrapExtendedMnemonic<"nl", 12>; +defm : TrapExtendedMnemonic<"ne", 24>; +defm : TrapExtendedMnemonic<"ng", 20>; +defm : TrapExtendedMnemonic<"llt", 2>; +defm : TrapExtendedMnemonic<"lle", 6>; +defm : TrapExtendedMnemonic<"lge", 5>; +defm : TrapExtendedMnemonic<"lgt", 1>; +defm : TrapExtendedMnemonic<"lnl", 5>; +defm : TrapExtendedMnemonic<"lng", 6>; +defm : TrapExtendedMnemonic<"u", 31>; + diff --git a/lib/Target/PowerPC/PPCJITInfo.cpp b/lib/Target/PowerPC/PPCJITInfo.cpp index cfcd749..5e3a48d 100644 --- a/lib/Target/PowerPC/PPCJITInfo.cpp +++ b/lib/Target/PowerPC/PPCJITInfo.cpp @@ -71,8 +71,13 @@ static void EmitBranchToAt(uint64_t At, uint64_t To, bool isCall, bool is64Bit){ extern "C" void PPC32CompilationCallback(); extern "C" void PPC64CompilationCallback(); -#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \ - !(defined(__ppc64__) || defined(__FreeBSD__)) +// The first clause of the preprocessor directive looks wrong, but it is +// necessary when compiling this code on non-PowerPC hosts. +#if (!defined(__ppc__) && !defined(__powerpc__)) || defined(__powerpc64__) || defined(__ppc64__) +void PPC32CompilationCallback() { + llvm_unreachable("This is not a 32bit PowerPC, you can't execute this!"); +} +#elif !defined(__ELF__) // CompilationCallback stub - We can't use a C function with inline assembly in // it, because we the prolog/epilog inserted by GCC won't work for us. Instead, // write our own wrapper, which does things our way, so we have complete control @@ -137,8 +142,8 @@ asm( "bctr\n" ); -#elif defined(__PPC__) && !defined(__ppc64__) -// Linux & FreeBSD / PPC 32 support +#else +// ELF PPC 32 support // CompilationCallback stub - We can't use a C function with inline assembly in // it, because we the prolog/epilog inserted by GCC won't work for us. Instead, @@ -197,15 +202,14 @@ asm( "mtlr 0\n" "bctr\n" ); -#else -void PPC32CompilationCallback() { - llvm_unreachable("This is not a power pc, you can't execute this!"); -} #endif -#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \ - defined(__ppc64__) -#ifdef __ELF__ +#if !defined(__powerpc64__) && !defined(__ppc64__) +void PPC64CompilationCallback() { + llvm_unreachable("This is not a 64bit PowerPC, you can't execute this!"); +} +#else +# ifdef __ELF__ asm( ".text\n" ".align 2\n" @@ -219,13 +223,13 @@ asm( ".align 4\n" ".type PPC64CompilationCallback,@function\n" ".L.PPC64CompilationCallback:\n" -#else +# else asm( ".text\n" ".align 2\n" ".globl _PPC64CompilationCallback\n" "_PPC64CompilationCallback:\n" -#endif +# endif // Make space for 8 ints r[3-10] and 13 doubles f[1-13] and the // FIXME: need to save v[0-19] for altivec? // Set up a proper stack frame @@ -258,12 +262,12 @@ asm( "ld 5, 280(1)\n" // stub's frame "ld 4, 16(5)\n" // stub's lr "li 5, 1\n" // 1 == 64 bit -#ifdef __ELF__ +# ifdef __ELF__ "bl LLVMPPCCompilationCallback\n" "nop\n" -#else +# else "bl _LLVMPPCCompilationCallback\n" -#endif +# endif "mtctr 3\n" // Restore all int arg registers "ld 10, 272(1)\n" "ld 9, 264(1)\n" @@ -285,10 +289,6 @@ asm( // XXX: any special TOC handling in the ELF case for JIT? "bctr\n" ); -#else -void PPC64CompilationCallback() { - llvm_unreachable("This is not a power pc, you can't execute this!"); -} #endif extern "C" { diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp index ba7efc1..d69aa4a 100644 --- a/lib/Target/PowerPC/PPCMCInstLower.cpp +++ b/lib/Target/PowerPC/PPCMCInstLower.cpp @@ -111,30 +111,25 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol, unsigned access = MO.getTargetFlags() & PPCII::MO_ACCESS_MASK; - if (!isDarwin) { - switch (access) { - case PPCII::MO_HA16: - RefKind = MCSymbolRefExpr::VK_PPC_ADDR16_HA; - break; - case PPCII::MO_LO16: - RefKind = MCSymbolRefExpr::VK_PPC_ADDR16_LO; - break; - case PPCII::MO_TPREL16_HA: - RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_HA; - break; - case PPCII::MO_TPREL16_LO: - RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_LO; - break; - case PPCII::MO_DTPREL16_LO: - RefKind = MCSymbolRefExpr::VK_PPC_DTPREL16_LO; - break; - case PPCII::MO_TLSLD16_LO: - RefKind = MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO; - break; - case PPCII::MO_TOC16_LO: - RefKind = MCSymbolRefExpr::VK_PPC_TOC16_LO; - break; - } + switch (access) { + case PPCII::MO_TPREL_LO: + RefKind = MCSymbolRefExpr::VK_PPC_TPREL_LO; + break; + case PPCII::MO_TPREL_HA: + RefKind = MCSymbolRefExpr::VK_PPC_TPREL_HA; + break; + case PPCII::MO_DTPREL_LO: + RefKind = MCSymbolRefExpr::VK_PPC_DTPREL_LO; + break; + case PPCII::MO_TLSLD_LO: + RefKind = MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO; + break; + case PPCII::MO_TOC_LO: + RefKind = MCSymbolRefExpr::VK_PPC_TOC_LO; + break; + case PPCII::MO_TLS: + RefKind = MCSymbolRefExpr::VK_PPC_TLS; + break; } const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, RefKind, Ctx); @@ -152,16 +147,14 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol, Expr = MCBinaryExpr::CreateSub(Expr, PB, Ctx); } - // Add Darwin ha16() / lo16() markers if required. - if (isDarwin) { - switch (access) { - case PPCII::MO_HA16: - Expr = PPCMCExpr::CreateHa16(Expr, Ctx); - break; - case PPCII::MO_LO16: - Expr = PPCMCExpr::CreateLo16(Expr, Ctx); - break; - } + // Add ha16() / lo16() markers if required. + switch (access) { + case PPCII::MO_LO: + Expr = PPCMCExpr::CreateLo(Expr, isDarwin, Ctx); + break; + case PPCII::MO_HA: + Expr = PPCMCExpr::CreateHa(Expr, isDarwin, Ctx); + break; } return MCOperand::CreateExpr(Expr); diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h index 40d1f3a..33f843d 100644 --- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h +++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h @@ -32,6 +32,9 @@ class PPCFunctionInfo : public MachineFunctionInfo { /// int ReturnAddrSaveIndex; + /// Frame index where the old base pointer is stored. + int BasePointerSaveIndex; + /// MustSaveLR - Indicates whether LR is defined (or clobbered) in the current /// function. This is only valid after the initial scan of the function by /// PEI. @@ -93,6 +96,7 @@ public: explicit PPCFunctionInfo(MachineFunction &MF) : FramePointerSaveIndex(0), ReturnAddrSaveIndex(0), + BasePointerSaveIndex(0), HasSpills(false), HasNonRISpills(false), SpillsCR(false), @@ -113,6 +117,9 @@ public: int getReturnAddrSaveIndex() const { return ReturnAddrSaveIndex; } void setReturnAddrSaveIndex(int idx) { ReturnAddrSaveIndex = idx; } + int getBasePointerSaveIndex() const { return BasePointerSaveIndex; } + void setBasePointerSaveIndex(int Idx) { BasePointerSaveIndex = Idx; } + unsigned getMinReservedArea() const { return MinReservedArea; } void setMinReservedArea(unsigned size) { MinReservedArea = size; } @@ -160,7 +167,7 @@ public: int getCRSpillFrameIndex() const { return CRSpillFrameIndex; } void setCRSpillFrameIndex(int idx) { CRSpillFrameIndex = idx; } - const SmallVector<unsigned, 3> & + const SmallVectorImpl<unsigned> & getMustSaveCRs() const { return MustSaveCRs; } void addMustSaveCR(unsigned Reg) { MustSaveCRs.push_back(Reg); } }; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index a4e328e..adba613 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -48,6 +48,14 @@ using namespace llvm; +static cl::opt<bool> +EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true), + cl::desc("Enable use of a base pointer for complex stack frames")); + +static cl::opt<bool> +AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false), + cl::desc("Force the use of a base pointer in every function")); + PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST) : PPCGenRegisterInfo(ST.isPPC64() ? PPC::LR8 : PPC::LR, ST.isPPC64() ? 0 : 1, @@ -91,32 +99,41 @@ PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) const uint16_t* PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { if (Subtarget.isDarwinABI()) - return Subtarget.isPPC64() ? CSR_Darwin64_SaveList : - CSR_Darwin32_SaveList; - - return Subtarget.isPPC64() ? CSR_SVR464_SaveList : CSR_SVR432_SaveList; + return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ? + CSR_Darwin64_Altivec_SaveList : + CSR_Darwin64_SaveList) : + (Subtarget.hasAltivec() ? + CSR_Darwin32_Altivec_SaveList : + CSR_Darwin32_SaveList); + + return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ? + CSR_SVR464_Altivec_SaveList : + CSR_SVR464_SaveList) : + (Subtarget.hasAltivec() ? + CSR_SVR432_Altivec_SaveList : + CSR_SVR432_SaveList); } const uint32_t* PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { if (Subtarget.isDarwinABI()) - return Subtarget.isPPC64() ? CSR_Darwin64_RegMask : - CSR_Darwin32_RegMask; - - return Subtarget.isPPC64() ? CSR_SVR464_RegMask : CSR_SVR432_RegMask; + return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ? + CSR_Darwin64_Altivec_RegMask : + CSR_Darwin64_RegMask) : + (Subtarget.hasAltivec() ? + CSR_Darwin32_Altivec_RegMask : + CSR_Darwin32_RegMask); + + return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ? + CSR_SVR464_Altivec_RegMask : + CSR_SVR464_RegMask) : + (Subtarget.hasAltivec() ? + CSR_SVR432_Altivec_RegMask : + CSR_SVR432_RegMask); } const uint32_t* PPCRegisterInfo::getNoPreservedMask() const { - // The naming here is inverted: The CSR_NoRegs_Altivec has the - // Altivec registers masked so that they're not saved and restored around - // instructions with this preserved mask. - - if (!Subtarget.hasAltivec()) - return CSR_NoRegs_Altivec_RegMask; - - if (Subtarget.isDarwin()) - return CSR_NoRegs_Darwin_RegMask; return CSR_NoRegs_RegMask; } @@ -135,6 +152,11 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(PPC::FP); Reserved.set(PPC::FP8); + // The BP register is also not really a register, but is the representation + // of the base pointer register used by setjmp. + Reserved.set(PPC::BP); + Reserved.set(PPC::BP8); + // The counter registers must be reserved so that counter-based loops can // be correctly formed (and the mtctr instructions are not DCE'd). Reserved.set(PPC::CTR); @@ -145,6 +167,9 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(PPC::LR8); Reserved.set(PPC::RM); + if (!Subtarget.isDarwinABI() || !Subtarget.hasAltivec()) + Reserved.set(PPC::VRSAVE); + // The SVR4 ABI reserves r2 and r13 if (Subtarget.isSVR4ABI()) { Reserved.set(PPC::R2); // System-reserved register @@ -161,6 +186,9 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { if (PPCFI->needsFP(MF)) Reserved.set(PPC::X31); + if (hasBasePointer(MF)) + Reserved.set(PPC::X30); + // The 64-bit SVR4 ABI reserves r2 for the TOC pointer. if (Subtarget.isSVR4ABI()) { Reserved.set(PPC::X2); @@ -170,6 +198,15 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { if (PPCFI->needsFP(MF)) Reserved.set(PPC::R31); + if (hasBasePointer(MF)) + Reserved.set(PPC::R30); + + // Reserve Altivec registers when Altivec is unavailable. + if (!Subtarget.hasAltivec()) + for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(), + IE = PPC::VRRCRegClass.end(); I != IE; ++I) + Reserved.set(*I); + return Reserved; } @@ -232,8 +269,8 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { // Get stack alignments. unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); unsigned MaxAlign = MFI->getMaxAlignment(); - if (MaxAlign > TargetAlign) - report_fatal_error("Dynamic alloca with large aligns not supported"); + assert((maxCallFrameSize & (MaxAlign-1)) == 0 && + "Maximum call-frame size not sufficiently aligned"); // Determine the previous frame's address. If FrameSize can't be // represented as 16 bits or we need special alignment, then we load the @@ -258,40 +295,62 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { .addImm(0) .addReg(PPC::R1); } - + + bool KillNegSizeReg = MI.getOperand(1).isKill(); + unsigned NegSizeReg = MI.getOperand(1).getReg(); + // Grow the stack and update the stack pointer link, then determine the // address of new allocated space. if (LP64) { + if (MaxAlign > TargetAlign) { + unsigned UnalNegSizeReg = NegSizeReg; + NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); + + // Unfortunately, there is no andi, only andi., and we can't insert that + // here because we might clobber cr0 while it is live. + BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg) + .addImm(~(MaxAlign-1)); + + unsigned NegSizeReg1 = NegSizeReg; + NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); + BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg) + .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) + .addReg(NegSizeReg1, RegState::Kill); + KillNegSizeReg = true; + } + BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1) .addReg(Reg, RegState::Kill) .addReg(PPC::X1) - .addReg(MI.getOperand(1).getReg()); - if (!MI.getOperand(1).isKill()) - BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg()) - .addReg(PPC::X1) - .addImm(maxCallFrameSize); - else - // Implicitly kill the register. - BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg()) - .addReg(PPC::X1) - .addImm(maxCallFrameSize) - .addReg(MI.getOperand(1).getReg(), RegState::ImplicitKill); + .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); + BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg()) + .addReg(PPC::X1) + .addImm(maxCallFrameSize); } else { + if (MaxAlign > TargetAlign) { + unsigned UnalNegSizeReg = NegSizeReg; + NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); + + // Unfortunately, there is no andi, only andi., and we can't insert that + // here because we might clobber cr0 while it is live. + BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg) + .addImm(~(MaxAlign-1)); + + unsigned NegSizeReg1 = NegSizeReg; + NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); + BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg) + .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) + .addReg(NegSizeReg1, RegState::Kill); + KillNegSizeReg = true; + } + BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1) .addReg(Reg, RegState::Kill) .addReg(PPC::R1) - .addReg(MI.getOperand(1).getReg()); - - if (!MI.getOperand(1).isKill()) - BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg()) - .addReg(PPC::R1) - .addImm(maxCallFrameSize); - else - // Implicitly kill the register. - BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg()) - .addReg(PPC::R1) - .addImm(maxCallFrameSize) - .addReg(MI.getOperand(1).getReg(), RegState::ImplicitKill); + .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); + BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg()) + .addReg(PPC::R1) + .addImm(maxCallFrameSize); } // Discard the DYNALLOC instruction. @@ -324,8 +383,8 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, unsigned SrcReg = MI.getOperand(0).getReg(); // We need to store the CR in the low 4-bits of the saved value. First, issue - // an MFCRpsued to save all of the CRBits and, if needed, kill the SrcReg. - BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFCR8pseud : PPC::MFCRpseud), Reg) + // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. + BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); // If the saved register wasn't CR0, shift the bits left so that they are in @@ -385,7 +444,7 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, .addImm(31); } - BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTCRF8 : PPC::MTCRF), DestReg) + BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg) .addReg(Reg, RegState::Kill); // Discard the pseudo instruction. @@ -506,7 +565,6 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); // Get the frame info. MachineFrameInfo *MFI = MF.getFrameInfo(); - const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); DebugLoc dl = MI.getDebugLoc(); unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); @@ -544,12 +602,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, } // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP). - - bool is64Bit = Subtarget.isPPC64(); - MI.getOperand(FIOperandNum).ChangeToRegister(TFI->hasFP(MF) ? - (is64Bit ? PPC::X31 : PPC::R31) : - (is64Bit ? PPC::X1 : PPC::R1), - false); + MI.getOperand(FIOperandNum).ChangeToRegister( + FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false); // Figure out if the offset in the instruction is shifted right two bits. bool isIXAddr = usesIXAddr(MI); @@ -568,8 +622,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Naked functions have stack size 0, although getStackSize may not reflect that // because we didn't call all the pieces that compute it for naked functions. if (!MF.getFunction()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked)) - Offset += MFI->getStackSize(); + hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked)) { + if (!(hasBasePointer(MF) && FrameIndex < 0)) + Offset += MFI->getStackSize(); + } // If we can, encode the offset directly into the instruction. If this is a // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If @@ -577,9 +633,9 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // clear can be encoded. This is extremely uncommon, because normally you // only "std" to a stack slot that is at least 4-byte aligned, but it can // happen in invalid code. - if (OpC == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm - (!noImmForm && - isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) { + assert(OpC != PPC::DBG_VALUE && + "This should be handle in a target independent way"); + if (!noImmForm && isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) { MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); return; } @@ -587,6 +643,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // The offset doesn't fit into a single register, scavenge one to build the // offset in. + bool is64Bit = Subtarget.isPPC64(); const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC; @@ -640,6 +697,44 @@ unsigned PPCRegisterInfo::getEHHandlerRegister() const { return !Subtarget.isPPC64() ? PPC::R4 : PPC::X4; } +unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { + if (!hasBasePointer(MF)) + return getFrameRegister(MF); + + return Subtarget.isPPC64() ? PPC::X30 : PPC::R30; +} + +bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const { + if (!EnableBasePointer) + return false; + if (AlwaysBasePointer) + return true; + + // If we need to realign the stack, then the stack pointer can no longer + // serve as an offset into the caller's stack space. As a result, we need a + // base pointer. + return needsStackRealignment(MF); +} + +bool PPCRegisterInfo::canRealignStack(const MachineFunction &MF) const { + if (MF.getFunction()->hasFnAttribute("no-realign-stack")) + return false; + + return true; +} + +bool PPCRegisterInfo::needsStackRealignment(const MachineFunction &MF) const { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + const Function *F = MF.getFunction(); + unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); + bool requiresRealignment = + ((MFI->getMaxAlignment() > StackAlign) || + F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackAlignment)); + + return requiresRealignment && canRealignStack(MF); +} + /// Returns true if the instruction's frame index /// reference would be better served by a base register other than FP /// or SP. Used by LocalStackFrameAllocation to determine which frame index diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h index 93626a9..d02af9e 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.h +++ b/lib/Target/PowerPC/PPCRegisterInfo.h @@ -92,6 +92,12 @@ public: // Debug information queries. unsigned getFrameRegister(const MachineFunction &MF) const; + // Base pointer (stack realignment) support. + unsigned getBaseRegister(const MachineFunction &MF) const; + bool hasBasePointer(const MachineFunction &MF) const; + bool canRealignStack(const MachineFunction &MF) const; + bool needsStackRealignment(const MachineFunction &MF) const; + // Exception handling queries. unsigned getEHExceptionRegister() const; unsigned getEHHandlerRegister() const; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td index b1b4f06..d566e2c 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/lib/Target/PowerPC/PPCRegisterInfo.td @@ -94,6 +94,10 @@ def ZERO8 : GP8<ZERO, "0">; def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">; def FP8 : GP8<FP, "**FRAME POINTER**">; +// Representations of the base pointer used by setjmp. +def BP : GPR<0 /* arbitrary */, "**BASE POINTER**">; +def BP8 : GP8<BP, "**BASE POINTER**">; + // Condition register bits def CR0LT : CRBIT< 0, "0">; def CR0GT : CRBIT< 1, "1">; @@ -150,7 +154,7 @@ def CTR : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>; def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>; // VRsave register -def VRSAVE: SPR<256, "VRsave">, DwarfRegNum<[109]>; +def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>; // Carry bit. In the architecture this is really bit 0 of the XER register // (which really is SPR register 1); this is the only bit interesting to a @@ -172,11 +176,11 @@ def RM: SPR<512, "**ROUNDING MODE**">; // then nonvolatiles in reverse order since stmw/lmw save from rN to r31 def GPRC : RegisterClass<"PPC", [i32], 32, (add (sequence "R%u", 2, 12), (sequence "R%u", 30, 13), - R31, R0, R1, FP)>; + R31, R0, R1, FP, BP)>; def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12), (sequence "X%u", 30, 14), - X31, X13, X0, X1, FP8)>; + X31, X13, X0, X1, FP8, BP8)>; // For some instructions r0 is special (representing the value 0 instead of // the value in the r0 register), and we use these register subclasses to diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp index a8f2b3f..12d0326 100644 --- a/lib/Target/PowerPC/PPCSubtarget.cpp +++ b/lib/Target/PowerPC/PPCSubtarget.cpp @@ -14,7 +14,10 @@ #include "PPCSubtarget.h" #include "PPC.h" #include "PPCRegisterInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/IR/Attributes.h" #include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Function.h" #include "llvm/Support/Host.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetMachine.h" @@ -29,32 +32,67 @@ using namespace llvm; PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool is64Bit) : PPCGenSubtargetInfo(TT, CPU, FS) - , StackAlignment(16) - , DarwinDirective(PPC::DIR_NONE) - , HasMFOCRF(false) - , Has64BitSupport(false) - , Use64BitRegs(false) , IsPPC64(is64Bit) - , HasAltivec(false) - , HasQPX(false) - , HasFSQRT(false) - , HasFRE(false) - , HasFRES(false) - , HasFRSQRTE(false) - , HasFRSQRTES(false) - , HasRecipPrec(false) - , HasSTFIWX(false) - , HasLFIWAX(false) - , HasFPRND(false) - , HasFPCVT(false) - , HasISEL(false) - , HasPOPCNTD(false) - , HasLDBRX(false) - , IsBookE(false) - , HasLazyResolverStubs(false) - , IsJITCodeModel(false) , TargetTriple(TT) { + initializeEnvironment(); + resetSubtargetFeatures(CPU, FS); +} + +/// SetJITMode - This is called to inform the subtarget info that we are +/// producing code for the JIT. +void PPCSubtarget::SetJITMode() { + // JIT mode doesn't want lazy resolver stubs, it knows exactly where + // everything is. This matters for PPC64, which codegens in PIC mode without + // stubs. + HasLazyResolverStubs = false; + + // Calls to external functions need to use indirect calls + IsJITCodeModel = true; +} + +void PPCSubtarget::resetSubtargetFeatures(const MachineFunction *MF) { + AttributeSet FnAttrs = MF->getFunction()->getAttributes(); + Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, + "target-cpu"); + Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, + "target-features"); + std::string CPU = + !CPUAttr.hasAttribute(Attribute::None) ? CPUAttr.getValueAsString() : ""; + std::string FS = + !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : ""; + if (!FS.empty()) { + initializeEnvironment(); + resetSubtargetFeatures(CPU, FS); + } +} +void PPCSubtarget::initializeEnvironment() { + StackAlignment = 16; + DarwinDirective = PPC::DIR_NONE; + HasMFOCRF = false; + Has64BitSupport = false; + Use64BitRegs = false; + HasAltivec = false; + HasQPX = false; + HasFSQRT = false; + HasFRE = false; + HasFRES = false; + HasFRSQRTE = false; + HasFRSQRTES = false; + HasRecipPrec = false; + HasSTFIWX = false; + HasLFIWAX = false; + HasFPRND = false; + HasFPCVT = false; + HasISEL = false; + HasPOPCNTD = false; + HasLDBRX = false; + IsBookE = false; + HasLazyResolverStubs = false; + IsJITCodeModel = false; +} + +void PPCSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) { // Determine default and user specified characteristics std::string CPUName = CPU; if (CPUName.empty()) @@ -72,7 +110,7 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU, std::string FullFS = FS; // If we are generating code for ppc64, verify that options make sense. - if (is64Bit) { + if (IsPPC64) { Has64BitSupport = true; // Silently force 64-bit register use on ppc64. Use64BitRegs = true; @@ -99,21 +137,11 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU, // is enabled because external functions will assume this alignment. if (hasQPX() || isBGQ()) StackAlignment = 32; -} -/// SetJITMode - This is called to inform the subtarget info that we are -/// producing code for the JIT. -void PPCSubtarget::SetJITMode() { - // JIT mode doesn't want lazy resolver stubs, it knows exactly where - // everything is. This matters for PPC64, which codegens in PIC mode without - // stubs. - HasLazyResolverStubs = false; - - // Calls to external functions need to use indirect calls - IsJITCodeModel = true; + // Determine endianness. + IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le); } - /// hasLazyResolverStub - Return true if accesses to the specified global have /// to go through a dyld lazy resolution stub. This means that an extra load /// is required to get the address of the global. diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h index 65b4d21..3f3fc0e 100644 --- a/lib/Target/PowerPC/PPCSubtarget.h +++ b/lib/Target/PowerPC/PPCSubtarget.h @@ -89,6 +89,7 @@ protected: bool IsBookE; bool HasLazyResolverStubs; bool IsJITCodeModel; + bool IsLittleEndian; /// TargetTriple - What processor and OS we're targeting. Triple TargetTriple; @@ -128,7 +129,7 @@ public: // documentation are wrong; these are correct (i.e. "what gcc does"). if (isPPC64() && isSVR4ABI()) { if (TargetTriple.getOS() == llvm::Triple::FreeBSD) - return "E-p:64:64-f64:64:64-i64:64:64-f128:64:64-v128:128:128-n32:64"; + return "E-p:64:64-f64:64:64-i64:64:64-v128:128:128-n32:64"; else return "E-p:64:64-f64:64:64-i64:64:64-f128:128:128-v128:128:128-n32:64"; } @@ -137,6 +138,13 @@ public: : "E-p:32:32-f64:64:64-i64:64:64-f128:64:128-n32"; } + /// \brief Reset the features for the PowerPC target. + virtual void resetSubtargetFeatures(const MachineFunction *MF); +private: + void initializeEnvironment(); + void resetSubtargetFeatures(StringRef CPU, StringRef FS); + +public: /// isPPC64 - Return true if we are generating code for 64-bit pointer mode. /// bool isPPC64() const { return IsPPC64; } @@ -159,6 +167,9 @@ public: // isJITCodeModel - True if we're generating code for the JIT bool isJITCodeModel() const { return IsJITCodeModel; } + // isLittleEndian - True if generating little-endian code + bool isLittleEndian() const { return IsLittleEndian; } + // Specific obvious features. bool hasFSQRT() const { return HasFSQRT; } bool hasFRE() const { return HasFRE; } diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index da03b4c..9acefe5 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -30,6 +30,7 @@ extern "C" void LLVMInitializePowerPCTarget() { // Register the targets RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target); RegisterTargetMachine<PPC64TargetMachine> B(ThePPC64Target); + RegisterTargetMachine<PPC64TargetMachine> C(ThePPC64LETarget); } PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, @@ -162,7 +163,7 @@ void PPCTargetMachine::addAnalysisPasses(PassManagerBase &PM) { // Add first the target-independent BasicTTI pass, then our PPC pass. This // allows the PPC pass to delegate to the target independent layer when // appropriate. - PM.add(createBasicTargetTransformInfoPass(getTargetLowering())); + PM.add(createBasicTargetTransformInfoPass(this)); PM.add(createPPCTargetTransformInfoPass(this)); } diff --git a/lib/Target/PowerPC/PPCTargetObjectFile.cpp b/lib/Target/PowerPC/PPCTargetObjectFile.cpp index 90e4f15..ec1e606 100644 --- a/lib/Target/PowerPC/PPCTargetObjectFile.cpp +++ b/lib/Target/PowerPC/PPCTargetObjectFile.cpp @@ -55,3 +55,13 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, return DefaultSection; } + +const MCExpr *PPC64LinuxTargetObjectFile:: +getDebugThreadLocalSymbol(const MCSymbol *Sym) const { + const MCExpr *Expr = + MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_PPC_DTPREL, getContext()); + return MCBinaryExpr::CreateAdd(Expr, + MCConstantExpr::Create(0x8000, getContext()), + getContext()); +} + diff --git a/lib/Target/PowerPC/PPCTargetObjectFile.h b/lib/Target/PowerPC/PPCTargetObjectFile.h index 9203e23..262c522 100644 --- a/lib/Target/PowerPC/PPCTargetObjectFile.h +++ b/lib/Target/PowerPC/PPCTargetObjectFile.h @@ -25,6 +25,9 @@ namespace llvm { virtual const MCSection * SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler *Mang, const TargetMachine &TM) const; + + /// \brief Describe a TLS variable address within debug info. + virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const; }; } // end namespace llvm diff --git a/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp b/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp index fa44331..5727dbc 100644 --- a/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp +++ b/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp @@ -12,7 +12,7 @@ #include "llvm/Support/TargetRegistry.h" using namespace llvm; -Target llvm::ThePPC32Target, llvm::ThePPC64Target; +Target llvm::ThePPC32Target, llvm::ThePPC64Target, llvm::ThePPC64LETarget; extern "C" void LLVMInitializePowerPCTargetInfo() { RegisterTarget<Triple::ppc, /*HasJIT=*/true> @@ -20,4 +20,7 @@ extern "C" void LLVMInitializePowerPCTargetInfo() { RegisterTarget<Triple::ppc64, /*HasJIT=*/true> Y(ThePPC64Target, "ppc64", "PowerPC 64"); + + RegisterTarget<Triple::ppc64le, /*HasJIT=*/true> + Z(ThePPC64LETarget, "ppc64le", "PowerPC 64 LE"); } diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h index f284291..6b374cb 100644 --- a/lib/Target/R600/AMDGPU.h +++ b/lib/Target/R600/AMDGPU.h @@ -31,12 +31,12 @@ FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm); FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm); FunctionPass *createR600Packetizer(TargetMachine &tm); FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm); -FunctionPass *createAMDGPUCFGPreparationPass(TargetMachine &tm); FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm); // SI Passes FunctionPass *createSIAnnotateControlFlowPass(); FunctionPass *createSILowerControlFlowPass(TargetMachine &tm); +FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm); FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS); FunctionPass *createSIInsertWaits(TargetMachine &tm); @@ -46,6 +46,10 @@ FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm); FunctionPass *createAMDGPUIndirectAddressingPass(TargetMachine &tm); FunctionPass *createAMDGPUISelDag(TargetMachine &tm); +/// \brief Creates an AMDGPU-specific Target Transformation Info pass. +ImmutablePass * +createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM); + extern Target TheAMDGPUTarget; } // End namespace llvm @@ -75,6 +79,12 @@ enum AddressSpaces { ADDRESS_NONE = 5, ///< Address space for unknown memory. PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0) PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1) + + // Do not re-order the CONSTANT_BUFFER_* enums. Several places depend on this + // order to be able to dynamically index a constant buffer, for example: + // + // ConstantBufferAS = CONSTANT_BUFFER_0 + CBIdx + CONSTANT_BUFFER_0 = 8, CONSTANT_BUFFER_1 = 9, CONSTANT_BUFFER_2 = 10, diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp index f720c7e..e039b77 100644 --- a/lib/Target/R600/AMDGPUAsmPrinter.cpp +++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp @@ -29,6 +29,7 @@ #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Support/ELF.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetLoweringObjectFile.h" @@ -130,6 +131,11 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) { S_STACK_SIZE(MFI->StackSize), 4); OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4); OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4); + + if (MFI->ShaderType == ShaderType::COMPUTE) { + OutStreamer.EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4); + OutStreamer.EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4); + } } void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) { @@ -227,7 +233,14 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) { OutStreamer.EmitIntValue(RsrcReg, 4); OutStreamer.EmitIntValue(S_00B028_VGPRS(MaxVGPR / 4) | S_00B028_SGPRS(MaxSGPR / 8), 4); + + if (MFI->ShaderType == ShaderType::COMPUTE) { + OutStreamer.EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4); + OutStreamer.EmitIntValue(S_00B84C_LDS_SIZE(RoundUpToAlignment(MFI->LDSSize, 256) >> 8), 4); + } if (MFI->ShaderType == ShaderType::PIXEL) { + OutStreamer.EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4); + OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(RoundUpToAlignment(MFI->LDSSize, 256) >> 8), 4); OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4); OutStreamer.EmitIntValue(MFI->PSInputAddr, 4); } diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td index 84e4f3a..fc95d58 100644 --- a/lib/Target/R600/AMDGPUCallingConv.td +++ b/lib/Target/R600/AMDGPUCallingConv.td @@ -36,17 +36,24 @@ def CC_SI : CallingConv<[ ]>; -// Calling convention for SI compute kernels -def CC_SI_Kernel : CallingConv<[ - CCIfType<[i64], CCAssignToStack <8, 4>>, - CCIfType<[i32, f32], CCAssignToStack <4, 4>>, - CCIfType<[i16], CCAssignToStack <2, 4>>, - CCIfType<[i8], CCAssignToStack <1, 4>> +// Calling convention for compute kernels +def CC_AMDGPU_Kernel : CallingConv<[ + CCIfType<[v4i32, v4f32], CCAssignToStack <16, 16>>, + CCIfType<[i64, f64, v2f32, v2i32], CCAssignToStack < 8, 8>>, + CCIfType<[i32, f32], CCAssignToStack < 4, 4>>, + CCIfType<[i16], CCAssignToStack < 2, 4>>, + CCIfType<[i8], CCAssignToStack < 1, 4>> ]>; def CC_AMDGPU : CallingConv<[ - CCIf<"State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"# - "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_SI_Kernel>>, + CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() == " + "AMDGPUSubtarget::SOUTHERN_ISLANDS && " + "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"# + "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>, + CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() < " + "AMDGPUSubtarget::SOUTHERN_ISLANDS && " + "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()->" + "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>, CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"# ".getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>> ]>; diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index 93432a2..f222901 100644 --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -17,6 +17,7 @@ #include "R600InstrInfo.h" #include "SIISelLowering.h" #include "llvm/ADT/ValueMap.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGen/SelectionDAG.h" @@ -49,7 +50,7 @@ public: private: inline SDValue getSmallIPtrImm(unsigned Imm); bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs, - const R600InstrInfo *TII, std::vector<unsigned> Cst); + const R600InstrInfo *TII); bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); @@ -57,22 +58,24 @@ private: bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2); bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2); + SDValue SimplifyI24(SDValue &Op); + bool SelectI24(SDValue Addr, SDValue &Op); + bool SelectU24(SDValue Addr, SDValue &Op); static bool checkType(const Value *ptr, unsigned int addrspace); - static const Value *getBasePointerValue(const Value *V); static bool isGlobalStore(const StoreSDNode *N); static bool isPrivateStore(const StoreSDNode *N); static bool isLocalStore(const StoreSDNode *N); static bool isRegionStore(const StoreSDNode *N); - static bool isCPLoad(const LoadSDNode *N); - static bool isConstantLoad(const LoadSDNode *N, int cbID); - static bool isGlobalLoad(const LoadSDNode *N); - static bool isParamLoad(const LoadSDNode *N); - static bool isPrivateLoad(const LoadSDNode *N); - static bool isLocalLoad(const LoadSDNode *N); - static bool isRegionLoad(const LoadSDNode *N); + bool isCPLoad(const LoadSDNode *N) const; + bool isConstantLoad(const LoadSDNode *N, int cbID) const; + bool isGlobalLoad(const LoadSDNode *N) const; + bool isParamLoad(const LoadSDNode *N) const; + bool isPrivateLoad(const LoadSDNode *N) const; + bool isLocalLoad(const LoadSDNode *N) const; + bool isRegionLoad(const LoadSDNode *N) const; bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); bool SelectGlobalValueVariableOffset(SDValue Addr, @@ -92,8 +95,7 @@ FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM return new AMDGPUDAGToDAGISel(TM); } -AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM - ) +AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM) : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) { } @@ -159,23 +161,118 @@ bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) { } SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { + const R600InstrInfo *TII = + static_cast<const R600InstrInfo*>(TM.getInstrInfo()); unsigned int Opc = N->getOpcode(); if (N->isMachineOpcode()) { return NULL; // Already selected. } switch (Opc) { default: break; + case AMDGPUISD::CONST_ADDRESS: { + for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I); + I != SDNode::use_end(); I = Next) { + Next = llvm::next(I); + if (!I->isMachineOpcode()) { + continue; + } + unsigned Opcode = I->getMachineOpcode(); + bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; + int SrcIdx = I.getOperandNo(); + int SelIdx; + // Unlike MachineInstrs, SDNodes do not have results in their operand + // list, so we need to increment the SrcIdx, since + // R600InstrInfo::getOperandIdx is based on the MachineInstr indices. + if (HasDst) { + SrcIdx++; + } + + SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx); + if (SelIdx < 0) { + continue; + } + + SDValue CstOffset; + if (N->getValueType(0).isVector() || + !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset)) + continue; + + // Gather constants values + int SrcIndices[] = { + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) + }; + std::vector<unsigned> Consts; + for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) { + int OtherSrcIdx = SrcIndices[i]; + int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx); + if (OtherSrcIdx < 0 || OtherSelIdx < 0) { + continue; + } + if (HasDst) { + OtherSrcIdx--; + OtherSelIdx--; + } + if (RegisterSDNode *Reg = + dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) { + if (Reg->getReg() == AMDGPU::ALU_CONST) { + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx)); + Consts.push_back(Cst->getZExtValue()); + } + } + } + + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset); + Consts.push_back(Cst->getZExtValue()); + if (!TII->fitsConstReadLimitations(Consts)) + continue; + + // Convert back to SDNode indices + if (HasDst) { + SrcIdx--; + SelIdx--; + } + std::vector<SDValue> Ops; + for (int i = 0, e = I->getNumOperands(); i != e; ++i) { + if (i == SrcIdx) { + Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32)); + } else if (i == SelIdx) { + Ops.push_back(CstOffset); + } else { + Ops.push_back(I->getOperand(i)); + } + } + CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size()); + } + break; + } case ISD::BUILD_VECTOR: { const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } + + unsigned RegClassID; + switch(N->getValueType(0).getVectorNumElements()) { + case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break; + case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break; + default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); + } // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG // that adds a 128 bits reg copy when going through TwoAddressInstructions // pass. We want to avoid 128 bits copies as much as possible because they // can't be bundled by our scheduler. SDValue RegSeqArgs[9] = { - CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32), + CurDAG->getTargetConstant(RegClassID, MVT::i32), SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32), SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32), @@ -205,7 +302,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32); SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32); } else if (N->getValueType(0) == MVT::i64) { - RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32); + RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32); SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); } else { @@ -225,7 +322,6 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } - const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo()); uint64_t ImmValue = 0; unsigned ImmReg = AMDGPU::ALU_LITERAL_X; @@ -281,12 +377,18 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { continue; } - int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM); - assert(ImmIdx != -1); + int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), + AMDGPU::OpName::literal); + if (ImmIdx == -1) { + continue; + } - // subtract one from ImmIdx, because the DST operand is usually index - // 0 for MachineInstrs, but we have no DST in the Ops vector. - ImmIdx--; + if (TII->getOperandIdx(Use->getMachineOpcode(), + AMDGPU::OpName::dst) != -1) { + // subtract one from ImmIdx, because the DST operand is usually index + // 0 for MachineInstrs, but we have no DST in the Ops vector. + ImmIdx--; + } // Check that we aren't already using an immediate. // XXX: It's possible for an instruction to have more than one @@ -332,12 +434,12 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size()); } } while (IsModified); - + } if (Result && Result->isMachineOpcode() && !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR) - && TII->isALUInstr(Result->getMachineOpcode())) { - // Fold FNEG/FABS/CONST_ADDRESS + && TII->hasInstrModifiers(Result->getMachineOpcode())) { + // Fold FNEG/FABS // TODO: Isel can generate multiple MachineInst, we need to recursively // parse Result bool IsModified = false; @@ -358,7 +460,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { if (PotentialClamp->isMachineOpcode() && PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) { unsigned ClampIdx = - TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP); + TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp); std::vector<SDValue> Ops; unsigned NumOp = Result->getNumOperands(); for (unsigned i = 0; i < NumOp; ++i) { @@ -377,24 +479,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { } bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, - SDValue &Abs, const R600InstrInfo *TII, - std::vector<unsigned> Consts) { + SDValue &Abs, const R600InstrInfo *TII) { switch (Src.getOpcode()) { - case AMDGPUISD::CONST_ADDRESS: { - SDValue CstOffset; - if (Src.getValueType().isVector() || - !SelectGlobalValueConstantOffset(Src.getOperand(0), CstOffset)) - return false; - - ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset); - Consts.push_back(Cst->getZExtValue()); - if (!TII->fitsConstReadLimitations(Consts)) - return false; - - Src = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32); - Sel = CstOffset; - return true; - } case ISD::FNEG: Src = Src.getOperand(0); Neg = CurDAG->getTargetConstant(1, MVT::i32); @@ -416,39 +502,26 @@ bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, const R600InstrInfo *TII, std::vector<SDValue> &Ops) { int OperandIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0), - TII->getOperandIdx(Opcode, R600Operands::SRC1), - TII->getOperandIdx(Opcode, R600Operands::SRC2) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2) }; int SelIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL), - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL), - TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel) }; int NegIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG), - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG), - TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg) }; int AbsIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS), - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs), -1 }; - // Gather constants values - std::vector<unsigned> Consts; - for (unsigned j = 0; j < 3; j++) { - int SrcIdx = OperandIdx[j]; - if (SrcIdx < 0) - break; - if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { - if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); - Consts.push_back(Cst->getZExtValue()); - } - } - } for (unsigned i = 0; i < 3; i++) { if (OperandIdx[i] < 0) @@ -458,7 +531,7 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, SDValue &Neg = Ops[NegIdx[i] - 1]; SDValue FakeAbs; SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs; - if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts)) + if (FoldOperand(Src, Sel, Neg, Abs, TII)) return true; } return false; @@ -467,60 +540,46 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode, const R600InstrInfo *TII, std::vector<SDValue> &Ops) { int OperandIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_X), - TII->getOperandIdx(Opcode, R600Operands::SRC0_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC0_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC0_W), - TII->getOperandIdx(Opcode, R600Operands::SRC1_X), - TII->getOperandIdx(Opcode, R600Operands::SRC1_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC1_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC1_W) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) }; int SelIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_X), - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_W), - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_X), - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_W) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W) }; int NegIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_X), - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_W), - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_X), - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_W) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W) }; int AbsIdx[] = { - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_X), - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_W), - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_X), - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Y), - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Z), - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_W) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z), + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W) }; - // Gather constants values - std::vector<unsigned> Consts; - for (unsigned j = 0; j < 8; j++) { - int SrcIdx = OperandIdx[j]; - if (SrcIdx < 0) - break; - if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { - if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); - Consts.push_back(Cst->getZExtValue()); - } - } - } - for (unsigned i = 0; i < 8; i++) { if (OperandIdx[i] < 0) return false; @@ -528,7 +587,7 @@ bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode, SDValue &Sel = Ops[SelIdx[i] - 1]; SDValue &Neg = Ops[NegIdx[i] - 1]; SDValue &Abs = Ops[AbsIdx[i] - 1]; - if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts)) + if (FoldOperand(Src, Sel, Neg, Abs, TII)) return true; } return false; @@ -542,46 +601,6 @@ bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) { return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace; } -const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) { - if (!V) { - return NULL; - } - const Value *ret = NULL; - ValueMap<const Value *, bool> ValueBitMap; - std::queue<const Value *, std::list<const Value *> > ValueQueue; - ValueQueue.push(V); - while (!ValueQueue.empty()) { - V = ValueQueue.front(); - if (ValueBitMap.find(V) == ValueBitMap.end()) { - ValueBitMap[V] = true; - if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) { - ret = V; - break; - } else if (dyn_cast<GlobalVariable>(V)) { - ret = V; - break; - } else if (dyn_cast<Constant>(V)) { - const ConstantExpr *CE = dyn_cast<ConstantExpr>(V); - if (CE) { - ValueQueue.push(CE->getOperand(0)); - } - } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { - ret = AI; - break; - } else if (const Instruction *I = dyn_cast<Instruction>(V)) { - uint32_t numOps = I->getNumOperands(); - for (uint32_t x = 0; x < numOps; ++x) { - ValueQueue.push(I->getOperand(x)); - } - } else { - assert(!"Found a Value that we didn't know how to handle!"); - } - } - ValueQueue.pop(); - } - return ret; -} - bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) { return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); } @@ -600,41 +619,37 @@ bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) { return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); } -bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) { - if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) { - return true; - } - MachineMemOperand *MMO = N->getMemOperand(); - const Value *V = MMO->getValue(); - const Value *BV = getBasePointerValue(V); - if (MMO - && MMO->getValue() - && ((V && dyn_cast<GlobalValue>(V)) - || (BV && dyn_cast<GlobalValue>( - getBasePointerValue(MMO->getValue()))))) { - return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS); - } else { - return false; +bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const { + if (CbId == -1) { + return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS); } + return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId); } -bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const { + if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) { + const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); + if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS || + N->getMemoryVT().bitsLT(MVT::i32)) { + return true; + } + } return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); } -bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const { return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS); } -bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const { return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS); } -bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const { return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); } -bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const { MachineMemOperand *MMO = N->getMemOperand(); if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { if (MMO) { @@ -648,7 +663,7 @@ bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) { return false; } -bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) { +bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const { if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { // Check to make sure we are not a constant pool load or a constant load // that is marked as a private load @@ -676,7 +691,9 @@ const char *AMDGPUDAGToDAGISel::getPassName() const { #endif #undef DEBUGTMP -///==== AMDGPU Functions ====/// +//===----------------------------------------------------------------------===// +// Complex Patterns +//===----------------------------------------------------------------------===// bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr) { @@ -743,6 +760,49 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, return true; } +SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) { + APInt Demanded = APInt(32, 0x00FFFFFF); + APInt KnownZero, KnownOne; + TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true); + const TargetLowering *TLI = getTargetLowering(); + if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) { + CurDAG->ReplaceAllUsesWith(Op, TLO.New); + CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode()); + return SimplifyI24(TLO.New); + } else { + return Op; + } +} + +bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) { + + assert(Op.getValueType() == MVT::i32); + + if (CurDAG->ComputeNumSignBits(Op) == 9) { + I24 = SimplifyI24(Op); + return true; + } + return false; +} + +bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) { + APInt KnownZero; + APInt KnownOne; + CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne); + + assert (Op.getValueType() == MVT::i32); + + // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than + // i32. These smaller types are legal to use with the i24 instructions. + if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 || + Op.getOpcode() == ISD::ANY_EXTEND || + ISD::isEXTLoad(Op.getNode())) { + U24 = SimplifyI24(Op); + return true; + } + return false; +} + void AMDGPUDAGToDAGISel::PostprocessISelDAG() { if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { @@ -750,32 +810,12 @@ void AMDGPUDAGToDAGISel::PostprocessISelDAG() { } // Go over all selected nodes and try to fold them a bit more - const AMDGPUTargetLowering& Lowering = (*(const AMDGPUTargetLowering*)TLI); + const AMDGPUTargetLowering& Lowering = + (*(const AMDGPUTargetLowering*)getTargetLowering()); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), E = CurDAG->allnodes_end(); I != E; ++I) { SDNode *Node = I; - switch (Node->getOpcode()) { - // Fix the register class in copy to CopyToReg nodes - ISel will always - // use SReg classes for 64-bit copies, but this is not always what we want. - case ISD::CopyToReg: { - unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); - SDValue Val = Node->getOperand(2); - const TargetRegisterClass *RC = RegInfo->getRegClass(Reg); - if (RC != &AMDGPU::SReg_64RegClass) { - continue; - } - - if (!Val.getNode()->isMachineOpcode()) { - continue; - } - - const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode()); - const TargetRegisterInfo *TRI = TM.getRegisterInfo(); - RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass)); - continue; - } - } MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I); if (!MachineNode) diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 02d6fab..efd2756 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -18,12 +18,14 @@ #include "AMDGPURegisterInfo.h" #include "AMDGPUSubtarget.h" #include "AMDILIntrinsicInfo.h" +#include "R600MachineFunctionInfo.h" #include "SIMachineFunctionInfo.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/DataLayout.h" using namespace llvm; @@ -56,20 +58,90 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::STORE, MVT::f32, Promote); AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); + setOperationAction(ISD::STORE, MVT::v2f32, Promote); + AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); + setOperationAction(ISD::STORE, MVT::v4f32, Promote); AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); + setOperationAction(ISD::STORE, MVT::f64, Promote); + AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64); + setOperationAction(ISD::LOAD, MVT::f32, Promote); AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); + setOperationAction(ISD::LOAD, MVT::v2f32, Promote); + AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); + setOperationAction(ISD::LOAD, MVT::v4f32, Promote); AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); + setOperationAction(ISD::LOAD, MVT::f64, Promote); + AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64); + + setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Expand); + setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Expand); + + setOperationAction(ISD::FNEG, MVT::v2f32, Expand); + setOperationAction(ISD::FNEG, MVT::v4f32, Expand); + setOperationAction(ISD::MUL, MVT::i64, Expand); setOperationAction(ISD::UDIV, MVT::i32, Expand); setOperationAction(ISD::UDIVREM, MVT::i32, Custom); setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::VSELECT, MVT::v2f32, Expand); + setOperationAction(ISD::VSELECT, MVT::v4f32, Expand); + + static const int types[] = { + (int)MVT::v2i32, + (int)MVT::v4i32 + }; + const size_t NumTypes = array_lengthof(types); + + for (unsigned int x = 0; x < NumTypes; ++x) { + MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x]; + //Expand the following operations for the current type by default + setOperationAction(ISD::ADD, VT, Expand); + setOperationAction(ISD::AND, VT, Expand); + setOperationAction(ISD::FP_TO_SINT, VT, Expand); + setOperationAction(ISD::FP_TO_UINT, VT, Expand); + setOperationAction(ISD::MUL, VT, Expand); + setOperationAction(ISD::OR, VT, Expand); + setOperationAction(ISD::SHL, VT, Expand); + setOperationAction(ISD::SINT_TO_FP, VT, Expand); + setOperationAction(ISD::SRL, VT, Expand); + setOperationAction(ISD::SRA, VT, Expand); + setOperationAction(ISD::SUB, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::UINT_TO_FP, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::VSELECT, VT, Expand); + setOperationAction(ISD::XOR, VT, Expand); + } +} + +//===----------------------------------------------------------------------===// +// Target Information +//===----------------------------------------------------------------------===// + +MVT AMDGPUTargetLowering::getVectorIdxTy() const { + return MVT::i32; +} + + +//===---------------------------------------------------------------------===// +// Target Properties +//===---------------------------------------------------------------------===// + +bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { + assert(VT.isFloatingPoint()); + return VT == MVT::f32; +} + +bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { + assert(VT.isFloatingPoint()); + return VT == MVT::f32; } //===---------------------------------------------------------------------===// @@ -116,6 +188,26 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) return Op; } +SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, + SDValue Op, + SelectionDAG &DAG) const { + + const DataLayout *TD = getTargetMachine().getDataLayout(); + GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); + // XXX: What does the value of G->getOffset() mean? + assert(G->getOffset() == 0 && + "Do not know what to do with an non-zero offset"); + + unsigned Offset = MFI->LDSSize; + const GlobalValue *GV = G->getGlobal(); + uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType()); + + // XXX: Account for alignment? + MFI->LDSSize += Size; + + return DAG.getConstant(Offset, TD->getPointerSize() == 8 ? MVT::i64 : MVT::i32); +} + SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index 69a0ac9..f614e23 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -20,6 +20,7 @@ namespace llvm { +class AMDGPUMachineFunction; class MachineRegisterInfo; class AMDGPUTargetLowering : public TargetLowering { @@ -36,6 +37,8 @@ protected: virtual SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, unsigned Reg, EVT VT) const; + SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, + SelectionDAG &DAG) const; bool isHWTrueValue(SDValue Op) const; bool isHWFalseValue(SDValue Op) const; @@ -46,6 +49,9 @@ protected: public: AMDGPUTargetLowering(TargetMachine &TM); + virtual bool isFAbsFree(EVT VT) const; + virtual bool isFNegFree(EVT VT) const; + virtual MVT getVectorIdxTy() const; virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, @@ -118,6 +124,8 @@ enum { // End AMDIL ISD Opcodes DWORDADDR, FRACT, + COS_HW, + SIN_HW, FMAX, SMAX, UMAX, diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp index 31b3002..61437e9 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.cpp +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -21,6 +21,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #define GET_INSTRINFO_CTOR +#define GET_INSTRINFO_NAMED_OPS #define GET_INSTRMAP_INFO #include "AMDGPUGenInstrInfo.inc" diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h index 3909e4e..306f467 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.h +++ b/lib/Target/R600/AMDGPUInstrInfo.h @@ -23,6 +23,7 @@ #define GET_INSTRINFO_HEADER #define GET_INSTRINFO_ENUM +#define GET_INSTRINFO_OPERAND_ENUM #include "AMDGPUGenInstrInfo.inc" #define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT @@ -198,6 +199,10 @@ public: }; +namespace AMDGPU { + int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); +} // End namespace AMDGPU + } // End llvm namespace #define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63) diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td index 29df374..d6a7759 100644 --- a/lib/Target/R600/AMDGPUInstructions.td +++ b/lib/Target/R600/AMDGPUInstructions.td @@ -86,14 +86,75 @@ def COND_NULL : PatLeaf < // Load/Store Pattern Fragments //===----------------------------------------------------------------------===// -def zextloadi8_global : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{ +def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + LoadSDNode *L = cast<LoadSDNode>(N); + return L->getExtensionType() == ISD::ZEXTLOAD || + L->getExtensionType() == ISD::EXTLOAD; +}]>; + +def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; + +def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ return isGlobalLoad(dyn_cast<LoadSDNode>(N)); }]>; -def zextloadi8_constant : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{ +def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ + return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); +}]>; + +def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ + return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); +}]>; + +def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ return isGlobalLoad(dyn_cast<LoadSDNode>(N)); }]>; +def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; + +def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ + return isGlobalLoad(dyn_cast<LoadSDNode>(N)); +}]>; + +def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ + return isGlobalLoad(dyn_cast<LoadSDNode>(N)); +}]>; + +def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ + return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); +}]>; + +def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ + return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); +}]>; + +def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +def az_extloadi32_global : PatFrag<(ops node:$ptr), + (az_extloadi32 node:$ptr), [{ + return isGlobalLoad(dyn_cast<LoadSDNode>(N)); +}]>; + +def az_extloadi32_constant : PatFrag<(ops node:$ptr), + (az_extloadi32 node:$ptr), [{ + return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); +}]>; + +def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return isLocalLoad(dyn_cast<LoadSDNode>(N)); +}]>; + +def local_store : PatFrag<(ops node:$val, node:$ptr), + (store node:$val, node:$ptr), [{ + return isLocalStore(dyn_cast<StoreSDNode>(N)); +}]>; + class Constants { int TWO_PI = 0x40c90fdb; int PI = 0x40490fdb; @@ -112,6 +173,9 @@ def FP_ONE : PatLeaf < [{return N->isExactlyValue(1.0);}] >; +def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>; +def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>; + let isCodeGenOnly = 1, isPseudo = 1 in { let usesCustomInserter = 1 in { @@ -305,6 +369,16 @@ class ROTRPattern <Instruction BIT_ALIGN> : Pat < (BIT_ALIGN $src0, $src0, $src1) >; +// 24-bit arithmetic patterns +def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; + +/* +class UMUL24Pattern <Instruction UMUL24> : Pat < + (mul U24:$x, U24:$y), + (UMUL24 $x, $y) +>; +*/ + include "R600Instructions.td" include "SIInstrInfo.td" diff --git a/lib/Target/R600/AMDGPUIntrinsics.td b/lib/Target/R600/AMDGPUIntrinsics.td index eecb25b..9f975bf 100644 --- a/lib/Target/R600/AMDGPUIntrinsics.td +++ b/lib/Target/R600/AMDGPUIntrinsics.td @@ -50,6 +50,8 @@ let TargetPrefix = "AMDGPU", isTarget = 1 in { def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + + def int_AMDGPU_barrier_local : Intrinsic<[], [], []>; } let TargetPrefix = "TGSI", isTarget = 1 in { diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp index 0461025..f2342b0 100644 --- a/lib/Target/R600/AMDGPUMachineFunction.cpp +++ b/lib/Target/R600/AMDGPUMachineFunction.cpp @@ -2,14 +2,14 @@ #include "AMDGPU.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/Function.h" +using namespace llvm; -namespace llvm { - -const char *AMDGPUMachineFunction::ShaderTypeAttribute = "ShaderType"; +static const char *const ShaderTypeAttribute = "ShaderType"; AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : MachineFunctionInfo() { ShaderType = ShaderType::COMPUTE; + LDSSize = 0; AttributeSet Set = MF.getFunction()->getAttributes(); Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, ShaderTypeAttribute); @@ -20,5 +20,3 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : llvm_unreachable("Can't parse shader type!"); } } - -} diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/R600/AMDGPUMachineFunction.h index 21c8c51..789b96a 100644 --- a/lib/Target/R600/AMDGPUMachineFunction.h +++ b/lib/Target/R600/AMDGPUMachineFunction.h @@ -18,11 +18,11 @@ namespace llvm { class AMDGPUMachineFunction : public MachineFunctionInfo { -private: - static const char *ShaderTypeAttribute; public: AMDGPUMachineFunction(const MachineFunction &MF); unsigned ShaderType; + /// Number of bytes in the LDS that are being used. + unsigned LDSSize; }; } diff --git a/lib/Target/R600/AMDGPUStructurizeCFG.cpp b/lib/Target/R600/AMDGPUStructurizeCFG.cpp deleted file mode 100644 index d26783d..0000000 --- a/lib/Target/R600/AMDGPUStructurizeCFG.cpp +++ /dev/null @@ -1,896 +0,0 @@ -//===-- AMDGPUStructurizeCFG.cpp - ------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -/// \file -/// The pass implemented in this file transforms the programs control flow -/// graph into a form that's suitable for code generation on hardware that -/// implements control flow by execution masking. This currently includes all -/// AMD GPUs but may as well be useful for other types of hardware. -// -//===----------------------------------------------------------------------===// - -#include "AMDGPU.h" -#include "llvm/ADT/MapVector.h" -#include "llvm/ADT/SCCIterator.h" -#include "llvm/Analysis/RegionInfo.h" -#include "llvm/Analysis/RegionIterator.h" -#include "llvm/Analysis/RegionPass.h" -#include "llvm/IR/Module.h" -#include "llvm/Support/PatternMatch.h" -#include "llvm/Transforms/Utils/SSAUpdater.h" - -using namespace llvm; -using namespace llvm::PatternMatch; - -namespace { - -// Definition of the complex types used in this pass. - -typedef std::pair<BasicBlock *, Value *> BBValuePair; - -typedef SmallVector<RegionNode*, 8> RNVector; -typedef SmallVector<BasicBlock*, 8> BBVector; -typedef SmallVector<BranchInst*, 8> BranchVector; -typedef SmallVector<BBValuePair, 2> BBValueVector; - -typedef SmallPtrSet<BasicBlock *, 8> BBSet; - -typedef MapVector<PHINode *, BBValueVector> PhiMap; -typedef MapVector<BasicBlock *, BBVector> BB2BBVecMap; - -typedef DenseMap<DomTreeNode *, unsigned> DTN2UnsignedMap; -typedef DenseMap<BasicBlock *, PhiMap> BBPhiMap; -typedef DenseMap<BasicBlock *, Value *> BBPredicates; -typedef DenseMap<BasicBlock *, BBPredicates> PredMap; -typedef DenseMap<BasicBlock *, BasicBlock*> BB2BBMap; - -// The name for newly created blocks. - -static const char *FlowBlockName = "Flow"; - -/// @brief Find the nearest common dominator for multiple BasicBlocks -/// -/// Helper class for AMDGPUStructurizeCFG -/// TODO: Maybe move into common code -class NearestCommonDominator { - - DominatorTree *DT; - - DTN2UnsignedMap IndexMap; - - BasicBlock *Result; - unsigned ResultIndex; - bool ExplicitMentioned; - -public: - /// \brief Start a new query - NearestCommonDominator(DominatorTree *DomTree) { - DT = DomTree; - Result = 0; - } - - /// \brief Add BB to the resulting dominator - void addBlock(BasicBlock *BB, bool Remember = true) { - - DomTreeNode *Node = DT->getNode(BB); - - if (Result == 0) { - unsigned Numbering = 0; - for (;Node;Node = Node->getIDom()) - IndexMap[Node] = ++Numbering; - Result = BB; - ResultIndex = 1; - ExplicitMentioned = Remember; - return; - } - - for (;Node;Node = Node->getIDom()) - if (IndexMap.count(Node)) - break; - else - IndexMap[Node] = 0; - - assert(Node && "Dominator tree invalid!"); - - unsigned Numbering = IndexMap[Node]; - if (Numbering > ResultIndex) { - Result = Node->getBlock(); - ResultIndex = Numbering; - ExplicitMentioned = Remember && (Result == BB); - } else if (Numbering == ResultIndex) { - ExplicitMentioned |= Remember; - } - } - - /// \brief Is "Result" one of the BBs added with "Remember" = True? - bool wasResultExplicitMentioned() { - return ExplicitMentioned; - } - - /// \brief Get the query result - BasicBlock *getResult() { - return Result; - } -}; - -/// @brief Transforms the control flow graph on one single entry/exit region -/// at a time. -/// -/// After the transform all "If"/"Then"/"Else" style control flow looks like -/// this: -/// -/// \verbatim -/// 1 -/// || -/// | | -/// 2 | -/// | / -/// |/ -/// 3 -/// || Where: -/// | | 1 = "If" block, calculates the condition -/// 4 | 2 = "Then" subregion, runs if the condition is true -/// | / 3 = "Flow" blocks, newly inserted flow blocks, rejoins the flow -/// |/ 4 = "Else" optional subregion, runs if the condition is false -/// 5 5 = "End" block, also rejoins the control flow -/// \endverbatim -/// -/// Control flow is expressed as a branch where the true exit goes into the -/// "Then"/"Else" region, while the false exit skips the region -/// The condition for the optional "Else" region is expressed as a PHI node. -/// The incomming values of the PHI node are true for the "If" edge and false -/// for the "Then" edge. -/// -/// Additionally to that even complicated loops look like this: -/// -/// \verbatim -/// 1 -/// || -/// | | -/// 2 ^ Where: -/// | / 1 = "Entry" block -/// |/ 2 = "Loop" optional subregion, with all exits at "Flow" block -/// 3 3 = "Flow" block, with back edge to entry block -/// | -/// \endverbatim -/// -/// The back edge of the "Flow" block is always on the false side of the branch -/// while the true side continues the general flow. So the loop condition -/// consist of a network of PHI nodes where the true incoming values expresses -/// breaks and the false values expresses continue states. -class AMDGPUStructurizeCFG : public RegionPass { - - static char ID; - - Type *Boolean; - ConstantInt *BoolTrue; - ConstantInt *BoolFalse; - UndefValue *BoolUndef; - - Function *Func; - Region *ParentRegion; - - DominatorTree *DT; - - RNVector Order; - BBSet Visited; - - BBPhiMap DeletedPhis; - BB2BBVecMap AddedPhis; - - PredMap Predicates; - BranchVector Conditions; - - BB2BBMap Loops; - PredMap LoopPreds; - BranchVector LoopConds; - - RegionNode *PrevNode; - - void orderNodes(); - - void analyzeLoops(RegionNode *N); - - Value *invert(Value *Condition); - - Value *buildCondition(BranchInst *Term, unsigned Idx, bool Invert); - - void gatherPredicates(RegionNode *N); - - void collectInfos(); - - void insertConditions(bool Loops); - - void delPhiValues(BasicBlock *From, BasicBlock *To); - - void addPhiValues(BasicBlock *From, BasicBlock *To); - - void setPhiValues(); - - void killTerminator(BasicBlock *BB); - - void changeExit(RegionNode *Node, BasicBlock *NewExit, - bool IncludeDominator); - - BasicBlock *getNextFlow(BasicBlock *Dominator); - - BasicBlock *needPrefix(bool NeedEmpty); - - BasicBlock *needPostfix(BasicBlock *Flow, bool ExitUseAllowed); - - void setPrevNode(BasicBlock *BB); - - bool dominatesPredicates(BasicBlock *BB, RegionNode *Node); - - bool isPredictableTrue(RegionNode *Node); - - void wireFlow(bool ExitUseAllowed, BasicBlock *LoopEnd); - - void handleLoops(bool ExitUseAllowed, BasicBlock *LoopEnd); - - void createFlow(); - - void rebuildSSA(); - -public: - AMDGPUStructurizeCFG(): - RegionPass(ID) { - - initializeRegionInfoPass(*PassRegistry::getPassRegistry()); - } - - using Pass::doInitialization; - virtual bool doInitialization(Region *R, RGPassManager &RGM); - - virtual bool runOnRegion(Region *R, RGPassManager &RGM); - - virtual const char *getPassName() const { - return "AMDGPU simplify control flow"; - } - - void getAnalysisUsage(AnalysisUsage &AU) const { - - AU.addRequired<DominatorTree>(); - AU.addPreserved<DominatorTree>(); - RegionPass::getAnalysisUsage(AU); - } - -}; - -} // end anonymous namespace - -char AMDGPUStructurizeCFG::ID = 0; - -/// \brief Initialize the types and constants used in the pass -bool AMDGPUStructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) { - LLVMContext &Context = R->getEntry()->getContext(); - - Boolean = Type::getInt1Ty(Context); - BoolTrue = ConstantInt::getTrue(Context); - BoolFalse = ConstantInt::getFalse(Context); - BoolUndef = UndefValue::get(Boolean); - - return false; -} - -/// \brief Build up the general order of nodes -void AMDGPUStructurizeCFG::orderNodes() { - scc_iterator<Region *> I = scc_begin(ParentRegion), - E = scc_end(ParentRegion); - for (Order.clear(); I != E; ++I) { - std::vector<RegionNode *> &Nodes = *I; - Order.append(Nodes.begin(), Nodes.end()); - } -} - -/// \brief Determine the end of the loops -void AMDGPUStructurizeCFG::analyzeLoops(RegionNode *N) { - - if (N->isSubRegion()) { - // Test for exit as back edge - BasicBlock *Exit = N->getNodeAs<Region>()->getExit(); - if (Visited.count(Exit)) - Loops[Exit] = N->getEntry(); - - } else { - // Test for sucessors as back edge - BasicBlock *BB = N->getNodeAs<BasicBlock>(); - BranchInst *Term = cast<BranchInst>(BB->getTerminator()); - - for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) { - BasicBlock *Succ = Term->getSuccessor(i); - - if (Visited.count(Succ)) - Loops[Succ] = BB; - } - } -} - -/// \brief Invert the given condition -Value *AMDGPUStructurizeCFG::invert(Value *Condition) { - - // First: Check if it's a constant - if (Condition == BoolTrue) - return BoolFalse; - - if (Condition == BoolFalse) - return BoolTrue; - - if (Condition == BoolUndef) - return BoolUndef; - - // Second: If the condition is already inverted, return the original value - if (match(Condition, m_Not(m_Value(Condition)))) - return Condition; - - // Third: Check all the users for an invert - BasicBlock *Parent = cast<Instruction>(Condition)->getParent(); - for (Value::use_iterator I = Condition->use_begin(), - E = Condition->use_end(); I != E; ++I) { - - Instruction *User = dyn_cast<Instruction>(*I); - if (!User || User->getParent() != Parent) - continue; - - if (match(*I, m_Not(m_Specific(Condition)))) - return *I; - } - - // Last option: Create a new instruction - return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator()); -} - -/// \brief Build the condition for one edge -Value *AMDGPUStructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx, - bool Invert) { - Value *Cond = Invert ? BoolFalse : BoolTrue; - if (Term->isConditional()) { - Cond = Term->getCondition(); - - if (Idx != (unsigned)Invert) - Cond = invert(Cond); - } - return Cond; -} - -/// \brief Analyze the predecessors of each block and build up predicates -void AMDGPUStructurizeCFG::gatherPredicates(RegionNode *N) { - - RegionInfo *RI = ParentRegion->getRegionInfo(); - BasicBlock *BB = N->getEntry(); - BBPredicates &Pred = Predicates[BB]; - BBPredicates &LPred = LoopPreds[BB]; - - for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); - PI != PE; ++PI) { - - // Ignore it if it's a branch from outside into our region entry - if (!ParentRegion->contains(*PI)) - continue; - - Region *R = RI->getRegionFor(*PI); - if (R == ParentRegion) { - - // It's a top level block in our region - BranchInst *Term = cast<BranchInst>((*PI)->getTerminator()); - for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) { - BasicBlock *Succ = Term->getSuccessor(i); - if (Succ != BB) - continue; - - if (Visited.count(*PI)) { - // Normal forward edge - if (Term->isConditional()) { - // Try to treat it like an ELSE block - BasicBlock *Other = Term->getSuccessor(!i); - if (Visited.count(Other) && !Loops.count(Other) && - !Pred.count(Other) && !Pred.count(*PI)) { - - Pred[Other] = BoolFalse; - Pred[*PI] = BoolTrue; - continue; - } - } - Pred[*PI] = buildCondition(Term, i, false); - - } else { - // Back edge - LPred[*PI] = buildCondition(Term, i, true); - } - } - - } else { - - // It's an exit from a sub region - while(R->getParent() != ParentRegion) - R = R->getParent(); - - // Edge from inside a subregion to its entry, ignore it - if (R == N) - continue; - - BasicBlock *Entry = R->getEntry(); - if (Visited.count(Entry)) - Pred[Entry] = BoolTrue; - else - LPred[Entry] = BoolFalse; - } - } -} - -/// \brief Collect various loop and predicate infos -void AMDGPUStructurizeCFG::collectInfos() { - - // Reset predicate - Predicates.clear(); - - // and loop infos - Loops.clear(); - LoopPreds.clear(); - - // Reset the visited nodes - Visited.clear(); - - for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend(); - OI != OE; ++OI) { - - // Analyze all the conditions leading to a node - gatherPredicates(*OI); - - // Remember that we've seen this node - Visited.insert((*OI)->getEntry()); - - // Find the last back edges - analyzeLoops(*OI); - } -} - -/// \brief Insert the missing branch conditions -void AMDGPUStructurizeCFG::insertConditions(bool Loops) { - BranchVector &Conds = Loops ? LoopConds : Conditions; - Value *Default = Loops ? BoolTrue : BoolFalse; - SSAUpdater PhiInserter; - - for (BranchVector::iterator I = Conds.begin(), - E = Conds.end(); I != E; ++I) { - - BranchInst *Term = *I; - assert(Term->isConditional()); - - BasicBlock *Parent = Term->getParent(); - BasicBlock *SuccTrue = Term->getSuccessor(0); - BasicBlock *SuccFalse = Term->getSuccessor(1); - - PhiInserter.Initialize(Boolean, ""); - PhiInserter.AddAvailableValue(&Func->getEntryBlock(), Default); - PhiInserter.AddAvailableValue(Loops ? SuccFalse : Parent, Default); - - BBPredicates &Preds = Loops ? LoopPreds[SuccFalse] : Predicates[SuccTrue]; - - NearestCommonDominator Dominator(DT); - Dominator.addBlock(Parent, false); - - Value *ParentValue = 0; - for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end(); - PI != PE; ++PI) { - - if (PI->first == Parent) { - ParentValue = PI->second; - break; - } - PhiInserter.AddAvailableValue(PI->first, PI->second); - Dominator.addBlock(PI->first); - } - - if (ParentValue) { - Term->setCondition(ParentValue); - } else { - if (!Dominator.wasResultExplicitMentioned()) - PhiInserter.AddAvailableValue(Dominator.getResult(), Default); - - Term->setCondition(PhiInserter.GetValueInMiddleOfBlock(Parent)); - } - } -} - -/// \brief Remove all PHI values coming from "From" into "To" and remember -/// them in DeletedPhis -void AMDGPUStructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) { - PhiMap &Map = DeletedPhis[To]; - for (BasicBlock::iterator I = To->begin(), E = To->end(); - I != E && isa<PHINode>(*I);) { - - PHINode &Phi = cast<PHINode>(*I++); - while (Phi.getBasicBlockIndex(From) != -1) { - Value *Deleted = Phi.removeIncomingValue(From, false); - Map[&Phi].push_back(std::make_pair(From, Deleted)); - } - } -} - -/// \brief Add a dummy PHI value as soon as we knew the new predecessor -void AMDGPUStructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) { - for (BasicBlock::iterator I = To->begin(), E = To->end(); - I != E && isa<PHINode>(*I);) { - - PHINode &Phi = cast<PHINode>(*I++); - Value *Undef = UndefValue::get(Phi.getType()); - Phi.addIncoming(Undef, From); - } - AddedPhis[To].push_back(From); -} - -/// \brief Add the real PHI value as soon as everything is set up -void AMDGPUStructurizeCFG::setPhiValues() { - - SSAUpdater Updater; - for (BB2BBVecMap::iterator AI = AddedPhis.begin(), AE = AddedPhis.end(); - AI != AE; ++AI) { - - BasicBlock *To = AI->first; - BBVector &From = AI->second; - - if (!DeletedPhis.count(To)) - continue; - - PhiMap &Map = DeletedPhis[To]; - for (PhiMap::iterator PI = Map.begin(), PE = Map.end(); - PI != PE; ++PI) { - - PHINode *Phi = PI->first; - Value *Undef = UndefValue::get(Phi->getType()); - Updater.Initialize(Phi->getType(), ""); - Updater.AddAvailableValue(&Func->getEntryBlock(), Undef); - Updater.AddAvailableValue(To, Undef); - - NearestCommonDominator Dominator(DT); - Dominator.addBlock(To, false); - for (BBValueVector::iterator VI = PI->second.begin(), - VE = PI->second.end(); VI != VE; ++VI) { - - Updater.AddAvailableValue(VI->first, VI->second); - Dominator.addBlock(VI->first); - } - - if (!Dominator.wasResultExplicitMentioned()) - Updater.AddAvailableValue(Dominator.getResult(), Undef); - - for (BBVector::iterator FI = From.begin(), FE = From.end(); - FI != FE; ++FI) { - - int Idx = Phi->getBasicBlockIndex(*FI); - assert(Idx != -1); - Phi->setIncomingValue(Idx, Updater.GetValueAtEndOfBlock(*FI)); - } - } - - DeletedPhis.erase(To); - } - assert(DeletedPhis.empty()); -} - -/// \brief Remove phi values from all successors and then remove the terminator. -void AMDGPUStructurizeCFG::killTerminator(BasicBlock *BB) { - TerminatorInst *Term = BB->getTerminator(); - if (!Term) - return; - - for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); - SI != SE; ++SI) { - - delPhiValues(BB, *SI); - } - - Term->eraseFromParent(); -} - -/// \brief Let node exit(s) point to NewExit -void AMDGPUStructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit, - bool IncludeDominator) { - - if (Node->isSubRegion()) { - Region *SubRegion = Node->getNodeAs<Region>(); - BasicBlock *OldExit = SubRegion->getExit(); - BasicBlock *Dominator = 0; - - // Find all the edges from the sub region to the exit - for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit); - I != E;) { - - BasicBlock *BB = *I++; - if (!SubRegion->contains(BB)) - continue; - - // Modify the edges to point to the new exit - delPhiValues(BB, OldExit); - BB->getTerminator()->replaceUsesOfWith(OldExit, NewExit); - addPhiValues(BB, NewExit); - - // Find the new dominator (if requested) - if (IncludeDominator) { - if (!Dominator) - Dominator = BB; - else - Dominator = DT->findNearestCommonDominator(Dominator, BB); - } - } - - // Change the dominator (if requested) - if (Dominator) - DT->changeImmediateDominator(NewExit, Dominator); - - // Update the region info - SubRegion->replaceExit(NewExit); - - } else { - BasicBlock *BB = Node->getNodeAs<BasicBlock>(); - killTerminator(BB); - BranchInst::Create(NewExit, BB); - addPhiValues(BB, NewExit); - if (IncludeDominator) - DT->changeImmediateDominator(NewExit, BB); - } -} - -/// \brief Create a new flow node and update dominator tree and region info -BasicBlock *AMDGPUStructurizeCFG::getNextFlow(BasicBlock *Dominator) { - LLVMContext &Context = Func->getContext(); - BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() : - Order.back()->getEntry(); - BasicBlock *Flow = BasicBlock::Create(Context, FlowBlockName, - Func, Insert); - DT->addNewBlock(Flow, Dominator); - ParentRegion->getRegionInfo()->setRegionFor(Flow, ParentRegion); - return Flow; -} - -/// \brief Create a new or reuse the previous node as flow node -BasicBlock *AMDGPUStructurizeCFG::needPrefix(bool NeedEmpty) { - - BasicBlock *Entry = PrevNode->getEntry(); - - if (!PrevNode->isSubRegion()) { - killTerminator(Entry); - if (!NeedEmpty || Entry->getFirstInsertionPt() == Entry->end()) - return Entry; - - } - - // create a new flow node - BasicBlock *Flow = getNextFlow(Entry); - - // and wire it up - changeExit(PrevNode, Flow, true); - PrevNode = ParentRegion->getBBNode(Flow); - return Flow; -} - -/// \brief Returns the region exit if possible, otherwise just a new flow node -BasicBlock *AMDGPUStructurizeCFG::needPostfix(BasicBlock *Flow, - bool ExitUseAllowed) { - - if (Order.empty() && ExitUseAllowed) { - BasicBlock *Exit = ParentRegion->getExit(); - DT->changeImmediateDominator(Exit, Flow); - addPhiValues(Flow, Exit); - return Exit; - } - return getNextFlow(Flow); -} - -/// \brief Set the previous node -void AMDGPUStructurizeCFG::setPrevNode(BasicBlock *BB) { - PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : 0; -} - -/// \brief Does BB dominate all the predicates of Node ? -bool AMDGPUStructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) { - BBPredicates &Preds = Predicates[Node->getEntry()]; - for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end(); - PI != PE; ++PI) { - - if (!DT->dominates(BB, PI->first)) - return false; - } - return true; -} - -/// \brief Can we predict that this node will always be called? -bool AMDGPUStructurizeCFG::isPredictableTrue(RegionNode *Node) { - - BBPredicates &Preds = Predicates[Node->getEntry()]; - bool Dominated = false; - - // Regionentry is always true - if (PrevNode == 0) - return true; - - for (BBPredicates::iterator I = Preds.begin(), E = Preds.end(); - I != E; ++I) { - - if (I->second != BoolTrue) - return false; - - if (!Dominated && DT->dominates(I->first, PrevNode->getEntry())) - Dominated = true; - } - - // TODO: The dominator check is too strict - return Dominated; -} - -/// Take one node from the order vector and wire it up -void AMDGPUStructurizeCFG::wireFlow(bool ExitUseAllowed, - BasicBlock *LoopEnd) { - - RegionNode *Node = Order.pop_back_val(); - Visited.insert(Node->getEntry()); - - if (isPredictableTrue(Node)) { - // Just a linear flow - if (PrevNode) { - changeExit(PrevNode, Node->getEntry(), true); - } - PrevNode = Node; - - } else { - // Insert extra prefix node (or reuse last one) - BasicBlock *Flow = needPrefix(false); - - // Insert extra postfix node (or use exit instead) - BasicBlock *Entry = Node->getEntry(); - BasicBlock *Next = needPostfix(Flow, ExitUseAllowed); - - // let it point to entry and next block - Conditions.push_back(BranchInst::Create(Entry, Next, BoolUndef, Flow)); - addPhiValues(Flow, Entry); - DT->changeImmediateDominator(Entry, Flow); - - PrevNode = Node; - while (!Order.empty() && !Visited.count(LoopEnd) && - dominatesPredicates(Entry, Order.back())) { - handleLoops(false, LoopEnd); - } - - changeExit(PrevNode, Next, false); - setPrevNode(Next); - } -} - -void AMDGPUStructurizeCFG::handleLoops(bool ExitUseAllowed, - BasicBlock *LoopEnd) { - RegionNode *Node = Order.back(); - BasicBlock *LoopStart = Node->getEntry(); - - if (!Loops.count(LoopStart)) { - wireFlow(ExitUseAllowed, LoopEnd); - return; - } - - if (!isPredictableTrue(Node)) - LoopStart = needPrefix(true); - - LoopEnd = Loops[Node->getEntry()]; - wireFlow(false, LoopEnd); - while (!Visited.count(LoopEnd)) { - handleLoops(false, LoopEnd); - } - - // Create an extra loop end node - LoopEnd = needPrefix(false); - BasicBlock *Next = needPostfix(LoopEnd, ExitUseAllowed); - LoopConds.push_back(BranchInst::Create(Next, LoopStart, - BoolUndef, LoopEnd)); - addPhiValues(LoopEnd, LoopStart); - setPrevNode(Next); -} - -/// After this function control flow looks like it should be, but -/// branches and PHI nodes only have undefined conditions. -void AMDGPUStructurizeCFG::createFlow() { - - BasicBlock *Exit = ParentRegion->getExit(); - bool EntryDominatesExit = DT->dominates(ParentRegion->getEntry(), Exit); - - DeletedPhis.clear(); - AddedPhis.clear(); - Conditions.clear(); - LoopConds.clear(); - - PrevNode = 0; - Visited.clear(); - - while (!Order.empty()) { - handleLoops(EntryDominatesExit, 0); - } - - if (PrevNode) - changeExit(PrevNode, Exit, EntryDominatesExit); - else - assert(EntryDominatesExit); -} - -/// Handle a rare case where the disintegrated nodes instructions -/// no longer dominate all their uses. Not sure if this is really nessasary -void AMDGPUStructurizeCFG::rebuildSSA() { - SSAUpdater Updater; - for (Region::block_iterator I = ParentRegion->block_begin(), - E = ParentRegion->block_end(); - I != E; ++I) { - - BasicBlock *BB = *I; - for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); - II != IE; ++II) { - - bool Initialized = false; - for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) { - - Next = I->getNext(); - - Instruction *User = cast<Instruction>(I->getUser()); - if (User->getParent() == BB) { - continue; - - } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) { - if (UserPN->getIncomingBlock(*I) == BB) - continue; - } - - if (DT->dominates(II, User)) - continue; - - if (!Initialized) { - Value *Undef = UndefValue::get(II->getType()); - Updater.Initialize(II->getType(), ""); - Updater.AddAvailableValue(&Func->getEntryBlock(), Undef); - Updater.AddAvailableValue(BB, II); - Initialized = true; - } - Updater.RewriteUseAfterInsertions(*I); - } - } - } -} - -/// \brief Run the transformation for each region found -bool AMDGPUStructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) { - if (R->isTopLevelRegion()) - return false; - - Func = R->getEntry()->getParent(); - ParentRegion = R; - - DT = &getAnalysis<DominatorTree>(); - - orderNodes(); - collectInfos(); - createFlow(); - insertConditions(false); - insertConditions(true); - setPhiValues(); - rebuildSSA(); - - // Cleanup - Order.clear(); - Visited.clear(); - DeletedPhis.clear(); - AddedPhis.clear(); - Predicates.clear(); - Conditions.clear(); - Loops.clear(); - LoopPreds.clear(); - LoopConds.clear(); - - return true; -} - -/// \brief Create the pass -Pass *llvm::createAMDGPUStructurizeCFGPass() { - return new AMDGPUStructurizeCFG(); -} diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp index 2fba434..5ebc5f2 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.cpp +++ b/lib/Target/R600/AMDGPUTargetMachine.cpp @@ -91,7 +91,6 @@ public: AMDGPUTargetMachine &getAMDGPUTargetMachine() const { return getTM<AMDGPUTargetMachine>(); } - virtual bool addPreISel(); virtual bool addInstSelector(); virtual bool addPreRegAlloc(); @@ -105,11 +104,24 @@ TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) { return new AMDGPUPassConfig(this, PM); } +//===----------------------------------------------------------------------===// +// AMDGPU Analysis Pass Setup +//===----------------------------------------------------------------------===// + +void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) { + // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This + // allows the AMDGPU pass to delegate to the target independent layer when + // appropriate. + PM.add(createBasicTargetTransformInfoPass(this)); + PM.add(createAMDGPUTargetTransformInfoPass(this)); +} + bool AMDGPUPassConfig::addPreISel() { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); + addPass(createFlattenCFGPass()); if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { - addPass(createAMDGPUStructurizeCFGPass()); + addPass(createStructurizeCFGPass()); addPass(createSIAnnotateControlFlowPass()); } else { addPass(createR600TextureIntrinsicsReplacer()); @@ -134,6 +146,8 @@ bool AMDGPUPassConfig::addPreRegAlloc() { if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { addPass(createR600VectorRegMerger(*TM)); + } else { + addPass(createSIFixSGPRCopiesPass(*TM)); } return false; } @@ -148,7 +162,11 @@ bool AMDGPUPassConfig::addPostRegAlloc() { } bool AMDGPUPassConfig::addPreSched2() { + const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { + addPass(createR600EmitClauseMarkers(*TM)); + } addPass(&IfConverterID); return false; } @@ -156,9 +174,7 @@ bool AMDGPUPassConfig::addPreSched2() { bool AMDGPUPassConfig::addPreEmitPass() { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { - addPass(createAMDGPUCFGPreparationPass(*TM)); addPass(createAMDGPUCFGStructurizerPass(*TM)); - addPass(createR600EmitClauseMarkers(*TM)); addPass(createR600ExpandSpecialInstrsPass(*TM)); addPass(&FinalizeMachineBundlesID); addPass(createR600Packetizer(*TM)); diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/R600/AMDGPUTargetMachine.h index bb26ed9..f942614 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.h +++ b/lib/Target/R600/AMDGPUTargetMachine.h @@ -25,8 +25,6 @@ namespace llvm { -MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT); - class AMDGPUTargetMachine : public LLVMTargetMachine { AMDGPUSubtarget Subtarget; @@ -63,6 +61,9 @@ public: } virtual const DataLayout *getDataLayout() const { return &Layout; } virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); + + /// \brief Register R600 analysis passes with a pass manager. + virtual void addAnalysisPasses(PassManagerBase &PM); }; } // End namespace llvm diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp new file mode 100644 index 0000000..8db319c --- /dev/null +++ b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp @@ -0,0 +1,90 @@ +//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// \file +// This file implements a TargetTransformInfo analysis pass specific to the +// AMDGPU target machine. It uses the target's detailed information to provide +// more precise answers to certain TTI queries, while letting the target +// independent and default TTI implementations handle the rest. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "AMDGPUtti" +#include "AMDGPU.h" +#include "AMDGPUTargetMachine.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Target/TargetLowering.h" +#include "llvm/Target/CostTable.h" +using namespace llvm; + +// Declare the pass initialization routine locally as target-specific passes +// don't have a target-wide initialization entry point, and so we rely on the +// pass constructor initialization. +namespace llvm { +void initializeAMDGPUTTIPass(PassRegistry &); +} + +namespace { + +class AMDGPUTTI : public ImmutablePass, public TargetTransformInfo { + const AMDGPUTargetMachine *TM; + const AMDGPUSubtarget *ST; + const AMDGPUTargetLowering *TLI; + + /// Estimate the overhead of scalarizing an instruction. Insert and Extract + /// are set if the result needs to be inserted and/or extracted from vectors. + unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; + +public: + AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { + llvm_unreachable("This pass cannot be directly constructed"); + } + + AMDGPUTTI(const AMDGPUTargetMachine *TM) + : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()), + TLI(TM->getTargetLowering()) { + initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry()); + } + + virtual void initializePass() { pushTTIStack(this); } + + virtual void finalizePass() { popTTIStack(); } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + TargetTransformInfo::getAnalysisUsage(AU); + } + + /// Pass identification. + static char ID; + + /// Provide necessary pointer adjustments for the two base classes. + virtual void *getAdjustedAnalysisPointer(const void *ID) { + if (ID == &TargetTransformInfo::ID) + return (TargetTransformInfo *)this; + return this; + } + + virtual bool hasBranchDivergence() const; + + /// @} +}; + +} // end anonymous namespace + +INITIALIZE_AG_PASS(AMDGPUTTI, TargetTransformInfo, "AMDGPUtti", + "AMDGPU Target Transform Info", true, true, false) +char AMDGPUTTI::ID = 0; + +ImmutablePass * +llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) { + return new AMDGPUTTI(TM); +} + +bool AMDGPUTTI::hasBranchDivergence() const { return true; } diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp index 4910e5d..687eadb 100644 --- a/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -8,14 +8,17 @@ /// \file //==-----------------------------------------------------------------------===// -#define DEBUGME 0 #define DEBUG_TYPE "structcfg" #include "AMDGPU.h" #include "AMDGPUInstrInfo.h" +#include "R600InstrInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/Analysis/DominatorInternals.h" #include "llvm/Analysis/Dominators.h" #include "llvm/CodeGen/MachineDominators.h" @@ -46,12 +49,8 @@ STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern " "matched"); STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern " "matched"); -STATISTIC(numLoopbreakPatternMatch, "CFGStructurizer number of loop-break " - "pattern matched"); STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue " "pattern matched"); -STATISTIC(numLoopPatternMatch, "CFGStructurizer number of loop pattern " - "matched"); STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks"); STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions"); @@ -62,37 +61,27 @@ STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions"); //===----------------------------------------------------------------------===// namespace { #define SHOWNEWINSTR(i) \ - if (DEBUGME) errs() << "New instr: " << *i << "\n" + DEBUG(dbgs() << "New instr: " << *i << "\n"); #define SHOWNEWBLK(b, msg) \ -if (DEBUGME) { \ - errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \ - errs() << "\n"; \ -} +DEBUG( \ + dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \ + dbgs() << "\n"; \ +); #define SHOWBLK_DETAIL(b, msg) \ -if (DEBUGME) { \ +DEBUG( \ if (b) { \ - errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \ - b->print(errs()); \ - errs() << "\n"; \ + dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \ + b->print(dbgs()); \ + dbgs() << "\n"; \ } \ -} +); #define INVALIDSCCNUM -1 -#define INVALIDREGNUM 0 - -template<class LoopinfoT> -void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) { - for (typename LoopinfoT::iterator iter = LoopInfo.begin(), - iterEnd = LoopInfo.end(); - iter != iterEnd; ++iter) { - (*iter)->print(OS, 0); - } -} template<class NodeT> -void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) { +void ReverseVector(SmallVectorImpl<NodeT *> &Src) { size_t sz = Src.size(); for (size_t i = 0; i < sz/2; ++i) { NodeT *t = Src[i]; @@ -109,40 +98,14 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) { // //===----------------------------------------------------------------------===// + namespace { -template<class PassT> -struct CFGStructTraits { -}; -template <class InstrT> class BlockInformation { public: - bool isRetired; - int sccNum; - //SmallVector<InstrT*, DEFAULT_VEC_SLOTS> succInstr; - //Instructions defining the corresponding successor. - BlockInformation() : isRetired(false), sccNum(INVALIDSCCNUM) {} -}; - -template <class BlockT, class InstrT, class RegiT> -class LandInformation { -public: - BlockT *landBlk; - std::set<RegiT> breakInitRegs; //Registers that need to "reg = 0", before - //WHILELOOP(thisloop) init before entering - //thisloop. - std::set<RegiT> contInitRegs; //Registers that need to "reg = 0", after - //WHILELOOP(thisloop) init after entering - //thisloop. - std::set<RegiT> endbranchInitRegs; //Init before entering this loop, at loop - //land block, branch cond on this reg. - std::set<RegiT> breakOnRegs; //registers that need to "if (reg) break - //endif" after ENDLOOP(thisloop) break - //outerLoopOf(thisLoop). - std::set<RegiT> contOnRegs; //registers that need to "if (reg) continue - //endif" after ENDLOOP(thisloop) continue on - //outerLoopOf(thisLoop). - LandInformation() : landBlk(NULL) {} + bool IsRetired; + int SccNum; + BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {} }; } // end anonymous namespace @@ -154,1025 +117,1211 @@ public: //===----------------------------------------------------------------------===// namespace { -// bixia TODO: port it to BasicBlock, not just MachineBasicBlock. -template<class PassT> -class CFGStructurizer { +class AMDGPUCFGStructurizer : public MachineFunctionPass { public: - typedef enum { + typedef SmallVector<MachineBasicBlock *, 32> MBBVector; + typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap; + typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap; + + enum PathToKind { Not_SinglePath = 0, SinglePath_InPath = 1, SinglePath_NotInPath = 2 - } PathToKind; + }; -public: - typedef typename PassT::InstructionType InstrT; - typedef typename PassT::FunctionType FuncT; - typedef typename PassT::DominatortreeType DomTreeT; - typedef typename PassT::PostDominatortreeType PostDomTreeT; - typedef typename PassT::DomTreeNodeType DomTreeNodeT; - typedef typename PassT::LoopinfoType LoopInfoT; - - typedef GraphTraits<FuncT *> FuncGTraits; - //typedef FuncGTraits::nodes_iterator BlockIterator; - typedef typename FuncT::iterator BlockIterator; - - typedef typename FuncGTraits::NodeType BlockT; - typedef GraphTraits<BlockT *> BlockGTraits; - typedef GraphTraits<Inverse<BlockT *> > InvBlockGTraits; - //typedef BlockGTraits::succ_iterator InstructionIterator; - typedef typename BlockT::iterator InstrIterator; - - typedef CFGStructTraits<PassT> CFGTraits; - typedef BlockInformation<InstrT> BlockInfo; - typedef std::map<BlockT *, BlockInfo *> BlockInfoMap; - - typedef int RegiT; - typedef typename PassT::LoopType LoopT; - typedef LandInformation<BlockT, InstrT, RegiT> LoopLandInfo; - typedef std::map<LoopT *, LoopLandInfo *> LoopLandInfoMap; - //landing info for loop break - typedef SmallVector<BlockT *, 32> BlockTSmallerVector; + static char ID; -public: - CFGStructurizer(); - ~CFGStructurizer(); + AMDGPUCFGStructurizer(TargetMachine &tm) : + MachineFunctionPass(ID), TM(tm), + TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())), + TRI(&TII->getRegisterInfo()) { } + + const char *getPassName() const { + return "AMD IL Control Flow Graph structurizer Pass"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addPreserved<MachineFunctionAnalysis>(); + AU.addRequired<MachineFunctionAnalysis>(); + AU.addRequired<MachineDominatorTree>(); + AU.addRequired<MachinePostDominatorTree>(); + AU.addRequired<MachineLoopInfo>(); + } /// Perform the CFG structurization - bool run(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri); + bool run(); /// Perform the CFG preparation - bool prepare(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri); + /// This step will remove every unconditionnal/dead jump instructions and make + /// sure all loops have an exit block + bool prepare(); + + bool runOnMachineFunction(MachineFunction &MF) { + DEBUG(MF.dump();); + OrderedBlks.clear(); + FuncRep = &MF; + MLI = &getAnalysis<MachineLoopInfo>(); + DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI);); + MDT = &getAnalysis<MachineDominatorTree>(); + DEBUG(MDT->print(dbgs(), (const llvm::Module*)0);); + PDT = &getAnalysis<MachinePostDominatorTree>(); + DEBUG(PDT->print(dbgs());); + prepare(); + run(); + DEBUG(MF.dump();); + return true; + } -private: - void reversePredicateSetter(typename BlockT::iterator); - void orderBlocks(); - void printOrderedBlocks(llvm::raw_ostream &OS); - int patternMatch(BlockT *CurBlock); - int patternMatchGroup(BlockT *CurBlock); - - int serialPatternMatch(BlockT *CurBlock); - int ifPatternMatch(BlockT *CurBlock); - int switchPatternMatch(BlockT *CurBlock); - int loopendPatternMatch(BlockT *CurBlock); - int loopPatternMatch(BlockT *CurBlock); - - int loopbreakPatternMatch(LoopT *LoopRep, BlockT *LoopHeader); - int loopcontPatternMatch(LoopT *LoopRep, BlockT *LoopHeader); - //int loopWithoutBreak(BlockT *); - - void handleLoopbreak (BlockT *ExitingBlock, LoopT *ExitingLoop, - BlockT *ExitBlock, LoopT *exitLoop, BlockT *landBlock); - void handleLoopcontBlock(BlockT *ContingBlock, LoopT *contingLoop, - BlockT *ContBlock, LoopT *contLoop); - bool isSameloopDetachedContbreak(BlockT *Src1Block, BlockT *Src2Block); - int handleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock, - BlockT *FalseBlock); - int handleJumpintoIfImp(BlockT *HeadBlock, BlockT *TrueBlock, - BlockT *FalseBlock); - int improveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock, - BlockT *FalseBlock, BlockT **LandBlockPtr); - void showImproveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock, - BlockT *FalseBlock, BlockT *LandBlock, - bool Detail = false); - PathToKind singlePathTo(BlockT *SrcBlock, BlockT *DstBlock, - bool AllowSideEntry = true); - BlockT *singlePathEnd(BlockT *srcBlock, BlockT *DstBlock, - bool AllowSideEntry = true); - int cloneOnSideEntryTo(BlockT *PreBlock, BlockT *SrcBlock, BlockT *DstBlock); - void mergeSerialBlock(BlockT *DstBlock, BlockT *srcBlock); - - void mergeIfthenelseBlock(InstrT *BranchInstr, BlockT *CurBlock, - BlockT *TrueBlock, BlockT *FalseBlock, - BlockT *LandBlock); - void mergeLooplandBlock(BlockT *DstBlock, LoopLandInfo *LoopLand); - void mergeLoopbreakBlock(BlockT *ExitingBlock, BlockT *ExitBlock, - BlockT *ExitLandBlock, RegiT SetReg); - void settleLoopcontBlock(BlockT *ContingBlock, BlockT *ContBlock, - RegiT SetReg); - BlockT *relocateLoopcontBlock(LoopT *ParentLoopRep, LoopT *LoopRep, - std::set<BlockT*> &ExitBlockSet, - BlockT *ExitLandBlk); - BlockT *addLoopEndbranchBlock(LoopT *LoopRep, - BlockTSmallerVector &ExitingBlocks, - BlockTSmallerVector &ExitBlocks); - BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep); - void removeUnconditionalBranch(BlockT *SrcBlock); - void removeRedundantConditionalBranch(BlockT *SrcBlock); - void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks); - - void removeSuccessor(BlockT *SrcBlock); - BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock); - BlockT *exitingBlock2ExitBlock (LoopT *LoopRep, BlockT *exitingBlock); - - void migrateInstruction(BlockT *SrcBlock, BlockT *DstBlock, - InstrIterator InsertPos); - - void recordSccnum(BlockT *SrcBlock, int SCCNum); - int getSCCNum(BlockT *srcBlk); - - void retireBlock(BlockT *DstBlock, BlockT *SrcBlock); - bool isRetiredBlock(BlockT *SrcBlock); - bool isActiveLoophead(BlockT *CurBlock); - bool needMigrateBlock(BlockT *Block); - - BlockT *recordLoopLandBlock(LoopT *LoopRep, BlockT *LandBlock, - BlockTSmallerVector &exitBlocks, - std::set<BlockT*> &ExitBlockSet); - void setLoopLandBlock(LoopT *LoopRep, BlockT *Block = NULL); - BlockT *getLoopLandBlock(LoopT *LoopRep); - LoopLandInfo *getLoopLandInfo(LoopT *LoopRep); - - void addLoopBreakOnReg(LoopT *LoopRep, RegiT RegNum); - void addLoopContOnReg(LoopT *LoopRep, RegiT RegNum); - void addLoopBreakInitReg(LoopT *LoopRep, RegiT RegNum); - void addLoopContInitReg(LoopT *LoopRep, RegiT RegNum); - void addLoopEndbranchInitReg(LoopT *LoopRep, RegiT RegNum); - - bool hasBackEdge(BlockT *curBlock); - unsigned getLoopDepth (LoopT *LoopRep); - int countActiveBlock( - typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterStart, - typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterEnd); - BlockT *findNearestCommonPostDom(std::set<BlockT *>&); - BlockT *findNearestCommonPostDom(BlockT *Block1, BlockT *Block2); +protected: + TargetMachine &TM; + MachineDominatorTree *MDT; + MachinePostDominatorTree *PDT; + MachineLoopInfo *MLI; + const R600InstrInfo *TII; + const AMDGPURegisterInfo *TRI; + + // PRINT FUNCTIONS + /// Print the ordered Blocks. + void printOrderedBlocks() const { + size_t i = 0; + for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(), + iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) { + dbgs() << "BB" << (*iterBlk)->getNumber(); + dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")"; + if (i != 0 && i % 10 == 0) { + dbgs() << "\n"; + } else { + dbgs() << " "; + } + } + } + static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) { + for (MachineLoop::iterator iter = LoopInfo.begin(), + iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) { + (*iter)->print(dbgs(), 0); + } + } + + // UTILITY FUNCTIONS + int getSCCNum(MachineBasicBlock *MBB) const; + MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const; + bool hasBackEdge(MachineBasicBlock *MBB) const; + static unsigned getLoopDepth(MachineLoop *LoopRep); + bool isRetiredBlock(MachineBasicBlock *MBB) const; + bool isActiveLoophead(MachineBasicBlock *MBB) const; + PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB, + bool AllowSideEntry = true) const; + int countActiveBlock(MBBVector::const_iterator It, + MBBVector::const_iterator E) const; + bool needMigrateBlock(MachineBasicBlock *MBB) const; + + // Utility Functions + void reversePredicateSetter(MachineBasicBlock::iterator I); + /// Compute the reversed DFS post order of Blocks + void orderBlocks(MachineFunction *MF); + + // Function originaly from CFGStructTraits + void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode, + DebugLoc DL = DebugLoc()); + MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode, + DebugLoc DL = DebugLoc()); + MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode); + void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode, + DebugLoc DL); + void insertCondBranchBefore(MachineBasicBlock *MBB, + MachineBasicBlock::iterator I, int NewOpcode, int RegNum, + DebugLoc DL); + void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum); + static int getBranchNzeroOpcode(int OldOpcode); + static int getBranchZeroOpcode(int OldOpcode); + static int getContinueNzeroOpcode(int OldOpcode); + static int getContinueZeroOpcode(int OldOpcode); + static MachineBasicBlock *getTrueBranch(MachineInstr *MI); + static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB); + static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB, + MachineInstr *MI); + static bool isCondBranch(MachineInstr *MI); + static bool isUncondBranch(MachineInstr *MI); + static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB); + static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB); + /// The correct naming for this is getPossibleLoopendBlockBranchInstr. + /// + /// BB with backward-edge could have move instructions after the branch + /// instruction. Such move instruction "belong to" the loop backward-edge. + MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB); + static MachineInstr *getReturnInstr(MachineBasicBlock *MBB); + static MachineInstr *getContinueInstr(MachineBasicBlock *MBB); + static bool isReturnBlock(MachineBasicBlock *MBB); + static void cloneSuccessorList(MachineBasicBlock *DstMBB, + MachineBasicBlock *SrcMBB) ; + static MachineBasicBlock *clone(MachineBasicBlock *MBB); + /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose + /// because the AMDGPU instruction is not recognized as terminator fix this + /// and retire this routine + void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB, + MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk); + static void wrapup(MachineBasicBlock *MBB); + + + int patternMatch(MachineBasicBlock *MBB); + int patternMatchGroup(MachineBasicBlock *MBB); + int serialPatternMatch(MachineBasicBlock *MBB); + int ifPatternMatch(MachineBasicBlock *MBB); + int loopendPatternMatch(); + int mergeLoop(MachineLoop *LoopRep); + int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader); + + void handleLoopcontBlock(MachineBasicBlock *ContingMBB, + MachineLoop *ContingLoop, MachineBasicBlock *ContMBB, + MachineLoop *ContLoop); + /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in + /// the same loop with LoopLandInfo without explicitly keeping track of + /// loopContBlks and loopBreakBlks, this is a method to get the information. + bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB, + MachineBasicBlock *Src2MBB); + int handleJumpintoIf(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB); + int handleJumpintoIfImp(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB); + int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, + MachineBasicBlock **LandMBBPtr); + void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, + MachineBasicBlock *LandMBB, bool Detail = false); + int cloneOnSideEntryTo(MachineBasicBlock *PreMBB, + MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB); + void mergeSerialBlock(MachineBasicBlock *DstMBB, + MachineBasicBlock *SrcMBB); + + void mergeIfthenelseBlock(MachineInstr *BranchMI, + MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB, + MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB); + void mergeLooplandBlock(MachineBasicBlock *DstMBB, + MachineBasicBlock *LandMBB); + void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB, + MachineBasicBlock *LandMBB); + void settleLoopcontBlock(MachineBasicBlock *ContingMBB, + MachineBasicBlock *ContMBB); + /// normalizeInfiniteLoopExit change + /// B1: + /// uncond_br LoopHeader + /// + /// to + /// B1: + /// cond_br 1 LoopHeader dummyExit + /// and return the newly added dummy exit block + MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep); + void removeUnconditionalBranch(MachineBasicBlock *MBB); + /// Remove duplicate branches instructions in a block. + /// For instance + /// B0: + /// cond_br X B1 B2 + /// cond_br X B1 B2 + /// is transformed to + /// B0: + /// cond_br X B1 B2 + void removeRedundantConditionalBranch(MachineBasicBlock *MBB); + void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB); + void removeSuccessor(MachineBasicBlock *MBB); + MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB, + MachineBasicBlock *PredMBB); + void migrateInstruction(MachineBasicBlock *SrcMBB, + MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I); + void recordSccnum(MachineBasicBlock *MBB, int SCCNum); + void retireBlock(MachineBasicBlock *MBB); + void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL); + + MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&); + /// This is work around solution for findNearestCommonDominator not avaiable + /// to post dom a proper fix should go to Dominators.h. + MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1, + MachineBasicBlock *MBB2); private: - DomTreeT *domTree; - PostDomTreeT *postDomTree; - LoopInfoT *loopInfo; - PassT *passRep; - FuncT *funcRep; - - BlockInfoMap blockInfoMap; - LoopLandInfoMap loopLandInfoMap; - SmallVector<BlockT *, DEFAULT_VEC_SLOTS> orderedBlks; - const AMDGPURegisterInfo *TRI; + MBBInfoMap BlockInfoMap; + LoopLandInfoMap LLInfoMap; + std::map<MachineLoop *, bool> Visited; + MachineFunction *FuncRep; + SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> OrderedBlks; +}; -}; //template class CFGStructurizer +int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const { + MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB); + if (It == BlockInfoMap.end()) + return INVALIDSCCNUM; + return (*It).second->SccNum; +} -template<class PassT> CFGStructurizer<PassT>::CFGStructurizer() - : domTree(NULL), postDomTree(NULL), loopInfo(NULL) { +MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep) + const { + LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep); + if (It == LLInfoMap.end()) + return NULL; + return (*It).second; } -template<class PassT> CFGStructurizer<PassT>::~CFGStructurizer() { - for (typename BlockInfoMap::iterator I = blockInfoMap.begin(), - E = blockInfoMap.end(); I != E; ++I) { - delete I->second; +bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const { + MachineLoop *LoopRep = MLI->getLoopFor(MBB); + if (!LoopRep) + return false; + MachineBasicBlock *LoopHeader = LoopRep->getHeader(); + return MBB->isSuccessor(LoopHeader); +} + +unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) { + return LoopRep ? LoopRep->getLoopDepth() : 0; +} + +bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const { + MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB); + if (It == BlockInfoMap.end()) + return false; + return (*It).second->IsRetired; +} + +bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const { + MachineLoop *LoopRep = MLI->getLoopFor(MBB); + while (LoopRep && LoopRep->getHeader() == MBB) { + MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep); + if(!LoopLand) + return true; + if (!isRetiredBlock(LoopLand)) + return true; + LoopRep = LoopRep->getParentLoop(); } + return false; +} +AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo( + MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB, + bool AllowSideEntry) const { + assert(DstMBB); + if (SrcMBB == DstMBB) + return SinglePath_InPath; + while (SrcMBB && SrcMBB->succ_size() == 1) { + SrcMBB = *SrcMBB->succ_begin(); + if (SrcMBB == DstMBB) + return SinglePath_InPath; + if (!AllowSideEntry && SrcMBB->pred_size() > 1) + return Not_SinglePath; + } + if (SrcMBB && SrcMBB->succ_size()==0) + return SinglePath_NotInPath; + return Not_SinglePath; } -template<class PassT> -bool CFGStructurizer<PassT>::prepare(FuncT &func, PassT &pass, - const AMDGPURegisterInfo * tri) { - passRep = &pass; - funcRep = &func; - TRI = tri; +int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It, + MBBVector::const_iterator E) const { + int Count = 0; + while (It != E) { + if (!isRetiredBlock(*It)) + ++Count; + ++It; + } + return Count; +} - bool changed = false; +bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const { + unsigned BlockSizeThreshold = 30; + unsigned CloneInstrThreshold = 100; + bool MultiplePreds = MBB && (MBB->pred_size() > 1); - //FIXME: if not reducible flow graph, make it so ??? + if(!MultiplePreds) + return false; + unsigned BlkSize = MBB->size(); + return ((BlkSize > BlockSizeThreshold) && + (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold)); +} - if (DEBUGME) { - errs() << "AMDGPUCFGStructurizer::prepare\n"; +void AMDGPUCFGStructurizer::reversePredicateSetter( + MachineBasicBlock::iterator I) { + while (I--) { + if (I->getOpcode() == AMDGPU::PRED_X) { + switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) { + case OPCODE_IS_ZERO_INT: + static_cast<MachineInstr *>(I)->getOperand(2) + .setImm(OPCODE_IS_NOT_ZERO_INT); + return; + case OPCODE_IS_NOT_ZERO_INT: + static_cast<MachineInstr *>(I)->getOperand(2) + .setImm(OPCODE_IS_ZERO_INT); + return; + case OPCODE_IS_ZERO: + static_cast<MachineInstr *>(I)->getOperand(2) + .setImm(OPCODE_IS_NOT_ZERO); + return; + case OPCODE_IS_NOT_ZERO: + static_cast<MachineInstr *>(I)->getOperand(2) + .setImm(OPCODE_IS_ZERO); + return; + default: + llvm_unreachable("PRED_X Opcode invalid!"); + } + } } +} + +void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB, + int NewOpcode, DebugLoc DL) { + MachineInstr *MI = MBB->getParent() + ->CreateMachineInstr(TII->get(NewOpcode), DL); + MBB->push_back(MI); + //assume the instruction doesn't take any reg operand ... + SHOWNEWINSTR(MI); +} + +MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB, + int NewOpcode, DebugLoc DL) { + MachineInstr *MI = + MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL); + if (MBB->begin() != MBB->end()) + MBB->insert(MBB->begin(), MI); + else + MBB->push_back(MI); + SHOWNEWINSTR(MI); + return MI; +} + +MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore( + MachineBasicBlock::iterator I, int NewOpcode) { + MachineInstr *OldMI = &(*I); + MachineBasicBlock *MBB = OldMI->getParent(); + MachineInstr *NewMBB = + MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc()); + MBB->insert(I, NewMBB); + //assume the instruction doesn't take any reg operand ... + SHOWNEWINSTR(NewMBB); + return NewMBB; +} - loopInfo = CFGTraits::getLoopInfo(pass); - if (DEBUGME) { - errs() << "LoopInfo:\n"; - PrintLoopinfo(*loopInfo, errs()); +void AMDGPUCFGStructurizer::insertCondBranchBefore( + MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) { + MachineInstr *OldMI = &(*I); + MachineBasicBlock *MBB = OldMI->getParent(); + MachineFunction *MF = MBB->getParent(); + MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL); + MBB->insert(I, NewMI); + MachineInstrBuilder MIB(*MF, NewMI); + MIB.addReg(OldMI->getOperand(1).getReg(), false); + SHOWNEWINSTR(NewMI); + //erase later oldInstr->eraseFromParent(); +} + +void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk, + MachineBasicBlock::iterator I, int NewOpcode, int RegNum, + DebugLoc DL) { + MachineFunction *MF = blk->getParent(); + MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL); + //insert before + blk->insert(I, NewInstr); + MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false); + SHOWNEWINSTR(NewInstr); +} + +void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB, + int NewOpcode, int RegNum) { + MachineFunction *MF = MBB->getParent(); + MachineInstr *NewInstr = + MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc()); + MBB->push_back(NewInstr); + MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false); + SHOWNEWINSTR(NewInstr); +} + +int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) { + switch(OldOpcode) { + case AMDGPU::JUMP_COND: + case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET; + case AMDGPU::BRANCH_COND_i32: + case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32; + default: llvm_unreachable("internal error"); } + return -1; +} - orderBlocks(); - if (DEBUGME) { - errs() << "Ordered blocks:\n"; - printOrderedBlocks(errs()); +int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) { + switch(OldOpcode) { + case AMDGPU::JUMP_COND: + case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET; + case AMDGPU::BRANCH_COND_i32: + case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32; + default: llvm_unreachable("internal error"); } + return -1; +} - SmallVector<BlockT *, DEFAULT_VEC_SLOTS> retBlks; - - for (typename LoopInfoT::iterator iter = loopInfo->begin(), - iterEnd = loopInfo->end(); - iter != iterEnd; ++iter) { - LoopT* loopRep = (*iter); - BlockTSmallerVector exitingBlks; - loopRep->getExitingBlocks(exitingBlks); - - if (exitingBlks.size() == 0) { - BlockT* dummyExitBlk = normalizeInfiniteLoopExit(loopRep); - if (dummyExitBlk != NULL) - retBlks.push_back(dummyExitBlk); - } +int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) { + switch(OldOpcode) { + case AMDGPU::JUMP_COND: + case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32; + default: llvm_unreachable("internal error"); + }; + return -1; +} + +int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) { + switch(OldOpcode) { + case AMDGPU::JUMP_COND: + case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32; + default: llvm_unreachable("internal error"); } + return -1; +} - // Remove unconditional branch instr. - // Add dummy exit block iff there are multiple returns. +MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) { + return MI->getOperand(0).getMBB(); +} + +void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI, + MachineBasicBlock *MBB) { + MI->getOperand(0).setMBB(MBB); +} + +MachineBasicBlock * +AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB, + MachineInstr *MI) { + assert(MBB->succ_size() == 2); + MachineBasicBlock *TrueBranch = getTrueBranch(MI); + MachineBasicBlock::succ_iterator It = MBB->succ_begin(); + MachineBasicBlock::succ_iterator Next = It; + ++Next; + return (*It == TrueBranch) ? *Next : *It; +} + +bool AMDGPUCFGStructurizer::isCondBranch(MachineInstr *MI) { + switch (MI->getOpcode()) { + case AMDGPU::JUMP_COND: + case AMDGPU::BRANCH_COND_i32: + case AMDGPU::BRANCH_COND_f32: return true; + default: + return false; + } + return false; +} + +bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) { + switch (MI->getOpcode()) { + case AMDGPU::JUMP: + case AMDGPU::BRANCH: + return true; + default: + return false; + } + return false; +} + +DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) { + //get DebugLoc from the first MachineBasicBlock instruction with debug info + DebugLoc DL; + for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end(); + ++It) { + MachineInstr *instr = &(*It); + if (instr->getDebugLoc().isUnknown() == false) + DL = instr->getDebugLoc(); + } + return DL; +} + +MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr( + MachineBasicBlock *MBB) { + MachineBasicBlock::reverse_iterator It = MBB->rbegin(); + MachineInstr *MI = &*It; + if (MI && (isCondBranch(MI) || isUncondBranch(MI))) + return MI; + return NULL; +} - for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator - iterBlk = orderedBlks.begin(), iterEndBlk = orderedBlks.end(); - iterBlk != iterEndBlk; - ++iterBlk) { - BlockT *curBlk = *iterBlk; - removeUnconditionalBranch(curBlk); - removeRedundantConditionalBranch(curBlk); - if (CFGTraits::isReturnBlock(curBlk)) { - retBlks.push_back(curBlk); +MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr( + MachineBasicBlock *MBB) { + for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend(); + It != E; ++It) { + // FIXME: Simplify + MachineInstr *MI = &*It; + if (MI) { + if (isCondBranch(MI) || isUncondBranch(MI)) + return MI; + else if (!TII->isMov(MI->getOpcode())) + break; } - assert(curBlk->succ_size() <= 2); - } //for + } + return NULL; +} - if (retBlks.size() >= 2) { - addDummyExitBlock(retBlks); - changed = true; +MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) { + MachineBasicBlock::reverse_iterator It = MBB->rbegin(); + if (It != MBB->rend()) { + MachineInstr *instr = &(*It); + if (instr->getOpcode() == AMDGPU::RETURN) + return instr; } + return NULL; +} - return changed; -} //CFGStructurizer::prepare +MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) { + MachineBasicBlock::reverse_iterator It = MBB->rbegin(); + if (It != MBB->rend()) { + MachineInstr *MI = &(*It); + if (MI->getOpcode() == AMDGPU::CONTINUE) + return MI; + } + return NULL; +} -template<class PassT> -bool CFGStructurizer<PassT>::run(FuncT &func, PassT &pass, - const AMDGPURegisterInfo * tri) { - passRep = &pass; - funcRep = &func; - TRI = tri; +bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) { + MachineInstr *MI = getReturnInstr(MBB); + bool IsReturn = (MBB->succ_size() == 0); + if (MI) + assert(IsReturn); + else if (IsReturn) + DEBUG( + dbgs() << "BB" << MBB->getNumber() + <<" is return block without RETURN instr\n";); + return IsReturn; +} - //Assume reducible CFG... - if (DEBUGME) { - errs() << "AMDGPUCFGStructurizer::run\n"; - func.viewCFG(); +void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB, + MachineBasicBlock *SrcMBB) { + for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(), + iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It) + DstMBB->addSuccessor(*It); // *iter's predecessor is also taken care of +} + +MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) { + MachineFunction *Func = MBB->getParent(); + MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock(); + Func->push_back(NewMBB); //insert to function + for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end(); + It != E; ++It) { + MachineInstr *MI = Func->CloneMachineInstr(It); + NewMBB->push_back(MI); } + return NewMBB; +} + +void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith( + MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB, + MachineBasicBlock *NewBlk) { + MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB); + if (BranchMI && isCondBranch(BranchMI) && + getTrueBranch(BranchMI) == OldMBB) + setTrueBranch(BranchMI, NewBlk); +} + +void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) { + assert((!MBB->getParent()->getJumpTableInfo() + || MBB->getParent()->getJumpTableInfo()->isEmpty()) + && "found a jump table"); + + //collect continue right before endloop + SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> ContInstr; + MachineBasicBlock::iterator Pre = MBB->begin(); + MachineBasicBlock::iterator E = MBB->end(); + MachineBasicBlock::iterator It = Pre; + while (It != E) { + if (Pre->getOpcode() == AMDGPU::CONTINUE + && It->getOpcode() == AMDGPU::ENDLOOP) + ContInstr.push_back(Pre); + Pre = It; + ++It; + } + + //delete continue right before endloop + for (unsigned i = 0; i < ContInstr.size(); ++i) + ContInstr[i]->eraseFromParent(); + + // TODO to fix up jump table so later phase won't be confused. if + // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but + // there isn't such an interface yet. alternatively, replace all the other + // blocks in the jump table with the entryBlk //} + +} + + +bool AMDGPUCFGStructurizer::prepare() { + bool Changed = false; - domTree = CFGTraits::getDominatorTree(pass); - if (DEBUGME) { - domTree->print(errs(), (const llvm::Module*)0); + //FIXME: if not reducible flow graph, make it so ??? + + DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";); + + orderBlocks(FuncRep); + + SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> RetBlks; + + // Add an ExitBlk to loop that don't have one + for (MachineLoopInfo::iterator It = MLI->begin(), + E = MLI->end(); It != E; ++It) { + MachineLoop *LoopRep = (*It); + MBBVector ExitingMBBs; + LoopRep->getExitingBlocks(ExitingMBBs); + + if (ExitingMBBs.size() == 0) { + MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep); + if (DummyExitBlk) + RetBlks.push_back(DummyExitBlk); + } } - postDomTree = CFGTraits::getPostDominatorTree(pass); - if (DEBUGME) { - postDomTree->print(errs()); + // Remove unconditional branch instr. + // Add dummy exit block iff there are multiple returns. + for (SmallVectorImpl<MachineBasicBlock *>::const_iterator + It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) { + MachineBasicBlock *MBB = *It; + removeUnconditionalBranch(MBB); + removeRedundantConditionalBranch(MBB); + if (isReturnBlock(MBB)) { + RetBlks.push_back(MBB); + } + assert(MBB->succ_size() <= 2); } - loopInfo = CFGTraits::getLoopInfo(pass); - if (DEBUGME) { - errs() << "LoopInfo:\n"; - PrintLoopinfo(*loopInfo, errs()); + if (RetBlks.size() >= 2) { + addDummyExitBlock(RetBlks); + Changed = true; } - orderBlocks(); + return Changed; +} + +bool AMDGPUCFGStructurizer::run() { + + //Assume reducible CFG... + DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG();); + #ifdef STRESSTEST //Use the worse block ordering to test the algorithm. ReverseVector(orderedBlks); #endif - if (DEBUGME) { - errs() << "Ordered blocks:\n"; - printOrderedBlocks(errs()); - } - int numIter = 0; - bool finish = false; - BlockT *curBlk; - bool makeProgress = false; - int numRemainedBlk = countActiveBlock(orderedBlks.begin(), - orderedBlks.end()); + DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks();); + int NumIter = 0; + bool Finish = false; + MachineBasicBlock *MBB; + bool MakeProgress = false; + int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(), + OrderedBlks.end()); do { - ++numIter; - if (DEBUGME) { - errs() << "numIter = " << numIter - << ", numRemaintedBlk = " << numRemainedBlk << "\n"; - } - - typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator - iterBlk = orderedBlks.begin(); - typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator - iterBlkEnd = orderedBlks.end(); - - typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator - sccBeginIter = iterBlk; - BlockT *sccBeginBlk = NULL; - int sccNumBlk = 0; // The number of active blocks, init to a + ++NumIter; + DEBUG( + dbgs() << "numIter = " << NumIter + << ", numRemaintedBlk = " << NumRemainedBlk << "\n"; + ); + + SmallVectorImpl<MachineBasicBlock *>::const_iterator It = + OrderedBlks.begin(); + SmallVectorImpl<MachineBasicBlock *>::const_iterator E = + OrderedBlks.end(); + + SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter = + It; + MachineBasicBlock *SccBeginMBB = NULL; + int SccNumBlk = 0; // The number of active blocks, init to a // maximum possible number. - int sccNumIter; // Number of iteration in this SCC. - - while (iterBlk != iterBlkEnd) { - curBlk = *iterBlk; - - if (sccBeginBlk == NULL) { - sccBeginIter = iterBlk; - sccBeginBlk = curBlk; - sccNumIter = 0; - sccNumBlk = numRemainedBlk; // Init to maximum possible number. - if (DEBUGME) { - errs() << "start processing SCC" << getSCCNum(sccBeginBlk); - errs() << "\n"; - } + int SccNumIter; // Number of iteration in this SCC. + + while (It != E) { + MBB = *It; + + if (!SccBeginMBB) { + SccBeginIter = It; + SccBeginMBB = MBB; + SccNumIter = 0; + SccNumBlk = NumRemainedBlk; // Init to maximum possible number. + DEBUG( + dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB); + dbgs() << "\n"; + ); } - if (!isRetiredBlock(curBlk)) { - patternMatch(curBlk); - } + if (!isRetiredBlock(MBB)) + patternMatch(MBB); - ++iterBlk; + ++It; - bool contNextScc = true; - if (iterBlk == iterBlkEnd - || getSCCNum(sccBeginBlk) != getSCCNum(*iterBlk)) { + bool ContNextScc = true; + if (It == E + || getSCCNum(SccBeginMBB) != getSCCNum(*It)) { // Just finish one scc. - ++sccNumIter; - int sccRemainedNumBlk = countActiveBlock(sccBeginIter, iterBlk); - if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= sccNumBlk) { - if (DEBUGME) { - errs() << "Can't reduce SCC " << getSCCNum(curBlk) - << ", sccNumIter = " << sccNumIter; - errs() << "doesn't make any progress\n"; - } - contNextScc = true; - } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < sccNumBlk) { - sccNumBlk = sccRemainedNumBlk; - iterBlk = sccBeginIter; - contNextScc = false; - if (DEBUGME) { - errs() << "repeat processing SCC" << getSCCNum(curBlk) - << "sccNumIter = " << sccNumIter << "\n"; - func.viewCFG(); - } + ++SccNumIter; + int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It); + if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) { + DEBUG( + dbgs() << "Can't reduce SCC " << getSCCNum(MBB) + << ", sccNumIter = " << SccNumIter; + dbgs() << "doesn't make any progress\n"; + ); + ContNextScc = true; + } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) { + SccNumBlk = sccRemainedNumBlk; + It = SccBeginIter; + ContNextScc = false; + DEBUG( + dbgs() << "repeat processing SCC" << getSCCNum(MBB) + << "sccNumIter = " << SccNumIter << "\n"; + FuncRep->viewCFG(); + ); } else { // Finish the current scc. - contNextScc = true; + ContNextScc = true; } } else { // Continue on next component in the current scc. - contNextScc = false; + ContNextScc = false; } - if (contNextScc) { - sccBeginBlk = NULL; - } + if (ContNextScc) + SccBeginMBB = NULL; } //while, "one iteration" over the function. - BlockT *entryBlk = FuncGTraits::nodes_begin(&func); - if (entryBlk->succ_size() == 0) { - finish = true; - if (DEBUGME) { - errs() << "Reduce to one block\n"; - } + MachineBasicBlock *EntryMBB = + GraphTraits<MachineFunction *>::nodes_begin(FuncRep); + if (EntryMBB->succ_size() == 0) { + Finish = true; + DEBUG( + dbgs() << "Reduce to one block\n"; + ); } else { - int newnumRemainedBlk - = countActiveBlock(orderedBlks.begin(), orderedBlks.end()); + int NewnumRemainedBlk + = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end()); // consider cloned blocks ?? - if (newnumRemainedBlk == 1 || newnumRemainedBlk < numRemainedBlk) { - makeProgress = true; - numRemainedBlk = newnumRemainedBlk; + if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) { + MakeProgress = true; + NumRemainedBlk = NewnumRemainedBlk; } else { - makeProgress = false; - if (DEBUGME) { - errs() << "No progress\n"; - } + MakeProgress = false; + DEBUG( + dbgs() << "No progress\n"; + ); } } - } while (!finish && makeProgress); + } while (!Finish && MakeProgress); // Misc wrap up to maintain the consistency of the Function representation. - CFGTraits::wrapup(FuncGTraits::nodes_begin(&func)); + wrapup(GraphTraits<MachineFunction *>::nodes_begin(FuncRep)); // Detach retired Block, release memory. - for (typename BlockInfoMap::iterator iterMap = blockInfoMap.begin(), - iterEndMap = blockInfoMap.end(); iterMap != iterEndMap; ++iterMap) { - if ((*iterMap).second && (*iterMap).second->isRetired) { - assert(((*iterMap).first)->getNumber() != -1); - if (DEBUGME) { - errs() << "Erase BB" << ((*iterMap).first)->getNumber() << "\n"; - } - (*iterMap).first->eraseFromParent(); //Remove from the parent Function. + for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end(); + It != E; ++It) { + if ((*It).second && (*It).second->IsRetired) { + assert(((*It).first)->getNumber() != -1); + DEBUG( + dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n"; + ); + (*It).first->eraseFromParent(); //Remove from the parent Function. } - delete (*iterMap).second; - } - blockInfoMap.clear(); - - // clear loopLandInfoMap - for (typename LoopLandInfoMap::iterator iterMap = loopLandInfoMap.begin(), - iterEndMap = loopLandInfoMap.end(); iterMap != iterEndMap; ++iterMap) { - delete (*iterMap).second; + delete (*It).second; } - loopLandInfoMap.clear(); + BlockInfoMap.clear(); + LLInfoMap.clear(); - if (DEBUGME) { - func.viewCFG(); - } + DEBUG( + FuncRep->viewCFG(); + ); - if (!finish) { - assert(!"IRREDUCIBL_CF"); - } + if (!Finish) + llvm_unreachable("IRREDUCIBL_CF"); return true; -} //CFGStructurizer::run - -/// Print the ordered Blocks. -/// -template<class PassT> -void CFGStructurizer<PassT>::printOrderedBlocks(llvm::raw_ostream &os) { - size_t i = 0; - for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator - iterBlk = orderedBlks.begin(), iterBlkEnd = orderedBlks.end(); - iterBlk != iterBlkEnd; - ++iterBlk, ++i) { - os << "BB" << (*iterBlk)->getNumber(); - os << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")"; - if (i != 0 && i % 10 == 0) { - os << "\n"; - } else { - os << " "; - } - } -} //printOrderedBlocks - -/// Compute the reversed DFS post order of Blocks -/// -template<class PassT> void CFGStructurizer<PassT>::orderBlocks() { - int sccNum = 0; - BlockT *bb; - for (scc_iterator<FuncT *> sccIter = scc_begin(funcRep), - sccEnd = scc_end(funcRep); sccIter != sccEnd; ++sccIter, ++sccNum) { - std::vector<BlockT *> &sccNext = *sccIter; - for (typename std::vector<BlockT *>::const_iterator - blockIter = sccNext.begin(), blockEnd = sccNext.end(); +} + + + +void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) { + int SccNum = 0; + MachineBasicBlock *MBB; + for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF); + It != E; ++It, ++SccNum) { + std::vector<MachineBasicBlock *> &SccNext = *It; + for (std::vector<MachineBasicBlock *>::const_iterator + blockIter = SccNext.begin(), blockEnd = SccNext.end(); blockIter != blockEnd; ++blockIter) { - bb = *blockIter; - orderedBlks.push_back(bb); - recordSccnum(bb, sccNum); + MBB = *blockIter; + OrderedBlks.push_back(MBB); + recordSccnum(MBB, SccNum); } } //walk through all the block in func to check for unreachable - for (BlockIterator blockIter1 = FuncGTraits::nodes_begin(funcRep), - blockEnd1 = FuncGTraits::nodes_end(funcRep); - blockIter1 != blockEnd1; ++blockIter1) { - BlockT *bb = &(*blockIter1); - sccNum = getSCCNum(bb); - if (sccNum == INVALIDSCCNUM) { - errs() << "unreachable block BB" << bb->getNumber() << "\n"; - } + typedef GraphTraits<MachineFunction *> GTM; + MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF); + for (; It != E; ++It) { + MachineBasicBlock *MBB = &(*It); + SccNum = getSCCNum(MBB); + if (SccNum == INVALIDSCCNUM) + dbgs() << "unreachable block BB" << MBB->getNumber() << "\n"; } -} //orderBlocks +} -template<class PassT> int CFGStructurizer<PassT>::patternMatch(BlockT *curBlk) { - int numMatch = 0; - int curMatch; +int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) { + int NumMatch = 0; + int CurMatch; - if (DEBUGME) { - errs() << "Begin patternMatch BB" << curBlk->getNumber() << "\n"; - } + DEBUG( + dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n"; + ); - while ((curMatch = patternMatchGroup(curBlk)) > 0) { - numMatch += curMatch; - } + while ((CurMatch = patternMatchGroup(MBB)) > 0) + NumMatch += CurMatch; + + DEBUG( + dbgs() << "End patternMatch BB" << MBB->getNumber() + << ", numMatch = " << NumMatch << "\n"; + ); + + return NumMatch; +} + +int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) { + int NumMatch = 0; + NumMatch += loopendPatternMatch(); + NumMatch += serialPatternMatch(MBB); + NumMatch += ifPatternMatch(MBB); + return NumMatch; +} - if (DEBUGME) { - errs() << "End patternMatch BB" << curBlk->getNumber() - << ", numMatch = " << numMatch << "\n"; - } - return numMatch; -} //patternMatch - -template<class PassT> -int CFGStructurizer<PassT>::patternMatchGroup(BlockT *curBlk) { - int numMatch = 0; - numMatch += serialPatternMatch(curBlk); - numMatch += ifPatternMatch(curBlk); - numMatch += loopendPatternMatch(curBlk); - numMatch += loopPatternMatch(curBlk); - return numMatch; -}//patternMatchGroup - -template<class PassT> -int CFGStructurizer<PassT>::serialPatternMatch(BlockT *curBlk) { - if (curBlk->succ_size() != 1) { +int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) { + if (MBB->succ_size() != 1) return 0; - } - BlockT *childBlk = *curBlk->succ_begin(); - if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk)) { + MachineBasicBlock *childBlk = *MBB->succ_begin(); + if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk)) return 0; - } - mergeSerialBlock(curBlk, childBlk); + mergeSerialBlock(MBB, childBlk); ++numSerialPatternMatch; return 1; -} //serialPatternMatch +} -template<class PassT> -int CFGStructurizer<PassT>::ifPatternMatch(BlockT *curBlk) { +int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) { //two edges - if (curBlk->succ_size() != 2) { + if (MBB->succ_size() != 2) return 0; - } - - if (hasBackEdge(curBlk)) { + if (hasBackEdge(MBB)) return 0; - } - - InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(curBlk); - if (branchInstr == NULL) { + MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB); + if (!BranchMI) return 0; - } - assert(CFGTraits::isCondBranch(branchInstr)); + assert(isCondBranch(BranchMI)); - BlockT *trueBlk = CFGTraits::getTrueBranch(branchInstr); - BlockT *falseBlk = CFGTraits::getFalseBranch(curBlk, branchInstr); - BlockT *landBlk; - int cloned = 0; + MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI); + serialPatternMatch(TrueMBB); + ifPatternMatch(TrueMBB); + MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI); + serialPatternMatch(FalseMBB); + ifPatternMatch(FalseMBB); + MachineBasicBlock *LandBlk; + int Cloned = 0; + assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty()); // TODO: Simplify - if (trueBlk->succ_size() == 1 && falseBlk->succ_size() == 1 - && *trueBlk->succ_begin() == *falseBlk->succ_begin()) { - landBlk = *trueBlk->succ_begin(); - } else if (trueBlk->succ_size() == 0 && falseBlk->succ_size() == 0) { - landBlk = NULL; - } else if (trueBlk->succ_size() == 1 && *trueBlk->succ_begin() == falseBlk) { - landBlk = falseBlk; - falseBlk = NULL; - } else if (falseBlk->succ_size() == 1 - && *falseBlk->succ_begin() == trueBlk) { - landBlk = trueBlk; - trueBlk = NULL; - } else if (falseBlk->succ_size() == 1 - && isSameloopDetachedContbreak(trueBlk, falseBlk)) { - landBlk = *falseBlk->succ_begin(); - } else if (trueBlk->succ_size() == 1 - && isSameloopDetachedContbreak(falseBlk, trueBlk)) { - landBlk = *trueBlk->succ_begin(); + if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1 + && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) { + // Diamond pattern + LandBlk = *TrueMBB->succ_begin(); + } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) { + // Triangle pattern, false is empty + LandBlk = FalseMBB; + FalseMBB = NULL; + } else if (FalseMBB->succ_size() == 1 + && *FalseMBB->succ_begin() == TrueMBB) { + // Triangle pattern, true is empty + // We reverse the predicate to make a triangle, empty false pattern; + std::swap(TrueMBB, FalseMBB); + reversePredicateSetter(MBB->end()); + LandBlk = FalseMBB; + FalseMBB = NULL; + } else if (FalseMBB->succ_size() == 1 + && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) { + LandBlk = *FalseMBB->succ_begin(); + } else if (TrueMBB->succ_size() == 1 + && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) { + LandBlk = *TrueMBB->succ_begin(); } else { - return handleJumpintoIf(curBlk, trueBlk, falseBlk); + return handleJumpintoIf(MBB, TrueMBB, FalseMBB); } // improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the // new BB created for landBlk==NULL may introduce new challenge to the // reduction process. - if (landBlk != NULL && - ((trueBlk && trueBlk->pred_size() > 1) - || (falseBlk && falseBlk->pred_size() > 1))) { - cloned += improveSimpleJumpintoIf(curBlk, trueBlk, falseBlk, &landBlk); + if (LandBlk && + ((TrueMBB && TrueMBB->pred_size() > 1) + || (FalseMBB && FalseMBB->pred_size() > 1))) { + Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk); } - if (trueBlk && trueBlk->pred_size() > 1) { - trueBlk = cloneBlockForPredecessor(trueBlk, curBlk); - ++cloned; + if (TrueMBB && TrueMBB->pred_size() > 1) { + TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB); + ++Cloned; } - if (falseBlk && falseBlk->pred_size() > 1) { - falseBlk = cloneBlockForPredecessor(falseBlk, curBlk); - ++cloned; + if (FalseMBB && FalseMBB->pred_size() > 1) { + FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB); + ++Cloned; } - mergeIfthenelseBlock(branchInstr, curBlk, trueBlk, falseBlk, landBlk); + mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk); ++numIfPatternMatch; - numClonedBlock += cloned; - - return 1 + cloned; -} //ifPatternMatch + numClonedBlock += Cloned; -template<class PassT> -int CFGStructurizer<PassT>::switchPatternMatch(BlockT *curBlk) { - return 0; -} //switchPatternMatch + return 1 + Cloned; +} -template<class PassT> -int CFGStructurizer<PassT>::loopendPatternMatch(BlockT *curBlk) { - LoopT *loopRep = loopInfo->getLoopFor(curBlk); - typename std::vector<LoopT *> nestedLoops; - while (loopRep) { - nestedLoops.push_back(loopRep); - loopRep = loopRep->getParentLoop(); +int AMDGPUCFGStructurizer::loopendPatternMatch() { + std::vector<MachineLoop *> NestedLoops; + for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end(); + It != E; ++It) { + df_iterator<MachineLoop *> LpIt = df_begin(*It), + LpE = df_end(*It); + for (; LpIt != LpE; ++LpIt) + NestedLoops.push_back(*LpIt); } - - if (nestedLoops.size() == 0) { + if (NestedLoops.size() == 0) return 0; - } // Process nested loop outside->inside, so "continue" to a outside loop won't // be mistaken as "break" of the current loop. - int num = 0; - for (typename std::vector<LoopT *>::reverse_iterator - iter = nestedLoops.rbegin(), iterEnd = nestedLoops.rend(); - iter != iterEnd; ++iter) { - loopRep = *iter; - - if (getLoopLandBlock(loopRep) != NULL) { + int Num = 0; + for (std::vector<MachineLoop *>::reverse_iterator It = NestedLoops.rbegin(), + E = NestedLoops.rend(); It != E; ++It) { + MachineLoop *ExaminedLoop = *It; + if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop]) continue; - } - - BlockT *loopHeader = loopRep->getHeader(); - - int numBreak = loopbreakPatternMatch(loopRep, loopHeader); - - if (numBreak == -1) { + DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump();); + int NumBreak = mergeLoop(ExaminedLoop); + if (NumBreak == -1) break; - } - - int numCont = loopcontPatternMatch(loopRep, loopHeader); - num += numBreak + numCont; - } - - return num; -} //loopendPatternMatch - -template<class PassT> -int CFGStructurizer<PassT>::loopPatternMatch(BlockT *curBlk) { - if (curBlk->succ_size() != 0) { - return 0; + Num += NumBreak; } + return Num; +} - int numLoop = 0; - LoopT *loopRep = loopInfo->getLoopFor(curBlk); - while (loopRep && loopRep->getHeader() == curBlk) { - LoopLandInfo *loopLand = getLoopLandInfo(loopRep); - if (loopLand) { - BlockT *landBlk = loopLand->landBlk; - assert(landBlk); - if (!isRetiredBlock(landBlk)) { - mergeLooplandBlock(curBlk, loopLand); - ++numLoop; - } - } - loopRep = loopRep->getParentLoop(); - } - - numLoopPatternMatch += numLoop; - - return numLoop; -} //loopPatternMatch - -template<class PassT> -int CFGStructurizer<PassT>::loopbreakPatternMatch(LoopT *loopRep, - BlockT *loopHeader) { - BlockTSmallerVector exitingBlks; - loopRep->getExitingBlocks(exitingBlks); - - if (DEBUGME) { - errs() << "Loop has " << exitingBlks.size() << " exiting blocks\n"; - } - - if (exitingBlks.size() == 0) { - setLoopLandBlock(loopRep); - return 0; - } - - // Compute the corresponding exitBlks and exit block set. - BlockTSmallerVector exitBlks; - std::set<BlockT *> exitBlkSet; - for (typename BlockTSmallerVector::const_iterator iter = exitingBlks.begin(), - iterEnd = exitingBlks.end(); iter != iterEnd; ++iter) { - BlockT *exitingBlk = *iter; - BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk); - exitBlks.push_back(exitBlk); - exitBlkSet.insert(exitBlk); //non-duplicate insert - } - - assert(exitBlkSet.size() > 0); - assert(exitBlks.size() == exitingBlks.size()); - - if (DEBUGME) { - errs() << "Loop has " << exitBlkSet.size() << " exit blocks\n"; - } - - // Find exitLandBlk. - BlockT *exitLandBlk = NULL; - int numCloned = 0; - int numSerial = 0; - - if (exitBlkSet.size() == 1) { - exitLandBlk = *exitBlkSet.begin(); - } else { - exitLandBlk = findNearestCommonPostDom(exitBlkSet); - - if (exitLandBlk == NULL) { - return -1; - } - - bool allInPath = true; - bool allNotInPath = true; - for (typename std::set<BlockT*>::const_iterator - iter = exitBlkSet.begin(), - iterEnd = exitBlkSet.end(); - iter != iterEnd; ++iter) { - BlockT *exitBlk = *iter; - - PathToKind pathKind = singlePathTo(exitBlk, exitLandBlk, true); - if (DEBUGME) { - errs() << "BB" << exitBlk->getNumber() - << " to BB" << exitLandBlk->getNumber() << " PathToKind=" - << pathKind << "\n"; - } - - allInPath = allInPath && (pathKind == SinglePath_InPath); - allNotInPath = allNotInPath && (pathKind == SinglePath_NotInPath); - - if (!allInPath && !allNotInPath) { - if (DEBUGME) { - errs() << "singlePath check fail\n"; - } - return -1; - } - } // check all exit blocks - - if (allNotInPath) { - - // TODO: Simplify, maybe separate function? - LoopT *parentLoopRep = loopRep->getParentLoop(); - BlockT *parentLoopHeader = NULL; - if (parentLoopRep) - parentLoopHeader = parentLoopRep->getHeader(); - - if (exitLandBlk == parentLoopHeader && - (exitLandBlk = relocateLoopcontBlock(parentLoopRep, - loopRep, - exitBlkSet, - exitLandBlk)) != NULL) { - if (DEBUGME) { - errs() << "relocateLoopcontBlock success\n"; - } - } else if ((exitLandBlk = addLoopEndbranchBlock(loopRep, - exitingBlks, - exitBlks)) != NULL) { - if (DEBUGME) { - errs() << "insertEndbranchBlock success\n"; - } - } else { - if (DEBUGME) { - errs() << "loop exit fail\n"; - } - return -1; - } - } - - // Handle side entry to exit path. - exitBlks.clear(); - exitBlkSet.clear(); - for (typename BlockTSmallerVector::iterator iterExiting = - exitingBlks.begin(), - iterExitingEnd = exitingBlks.end(); - iterExiting != iterExitingEnd; ++iterExiting) { - BlockT *exitingBlk = *iterExiting; - BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk); - BlockT *newExitBlk = exitBlk; - - if (exitBlk != exitLandBlk && exitBlk->pred_size() > 1) { - newExitBlk = cloneBlockForPredecessor(exitBlk, exitingBlk); - ++numCloned; - } - - numCloned += cloneOnSideEntryTo(exitingBlk, newExitBlk, exitLandBlk); - - exitBlks.push_back(newExitBlk); - exitBlkSet.insert(newExitBlk); - } - - for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(), - iterExitEnd = exitBlks.end(); - iterExit != iterExitEnd; ++iterExit) { - BlockT *exitBlk = *iterExit; - numSerial += serialPatternMatch(exitBlk); - } - - for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(), - iterExitEnd = exitBlks.end(); - iterExit != iterExitEnd; ++iterExit) { - BlockT *exitBlk = *iterExit; - if (exitBlk->pred_size() > 1) { - if (exitBlk != exitLandBlk) { - return -1; - } - } else { - if (exitBlk != exitLandBlk && - (exitBlk->succ_size() != 1 || - *exitBlk->succ_begin() != exitLandBlk)) { - return -1; - } - } - } - } // else - - exitLandBlk = recordLoopLandBlock(loopRep, exitLandBlk, exitBlks, exitBlkSet); - - // Fold break into the breaking block. Leverage across level breaks. - assert(exitingBlks.size() == exitBlks.size()); - for (typename BlockTSmallerVector::const_iterator iterExit = exitBlks.begin(), - iterExiting = exitingBlks.begin(), iterExitEnd = exitBlks.end(); - iterExit != iterExitEnd; ++iterExit, ++iterExiting) { - BlockT *exitBlk = *iterExit; - BlockT *exitingBlk = *iterExiting; - assert(exitBlk->pred_size() == 1 || exitBlk == exitLandBlk); - LoopT *exitingLoop = loopInfo->getLoopFor(exitingBlk); - handleLoopbreak(exitingBlk, exitingLoop, exitBlk, loopRep, exitLandBlk); - } +int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) { + MachineBasicBlock *LoopHeader = LoopRep->getHeader(); + MBBVector ExitingMBBs; + LoopRep->getExitingBlocks(ExitingMBBs); + assert(!ExitingMBBs.empty() && "Infinite Loop not supported"); + DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";); + // We assume a single ExitBlk + MBBVector ExitBlks; + LoopRep->getExitBlocks(ExitBlks); + SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet; + for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i) + ExitBlkSet.insert(ExitBlks[i]); + assert(ExitBlkSet.size() == 1); + MachineBasicBlock *ExitBlk = *ExitBlks.begin(); + assert(ExitBlk && "Loop has several exit block"); + MBBVector LatchBlks; + typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits; + InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader), + PE = InvMBBTraits::child_end(LoopHeader); + for (; PI != PE; PI++) { + if (LoopRep->contains(*PI)) + LatchBlks.push_back(*PI); + } + + for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i) + mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk); + for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i) + settleLoopcontBlock(LatchBlks[i], LoopHeader); + int Match = 0; + do { + Match = 0; + Match += serialPatternMatch(LoopHeader); + Match += ifPatternMatch(LoopHeader); + } while (Match > 0); + mergeLooplandBlock(LoopHeader, ExitBlk); + MachineLoop *ParentLoop = LoopRep->getParentLoop(); + if (ParentLoop) + MLI->changeLoopFor(LoopHeader, ParentLoop); + else + MLI->removeBlock(LoopHeader); + Visited[LoopRep] = true; + return 1; +} - int numBreak = static_cast<int>(exitingBlks.size()); - numLoopbreakPatternMatch += numBreak; - numClonedBlock += numCloned; - return numBreak + numSerial + numCloned; -} //loopbreakPatternMatch - -template<class PassT> -int CFGStructurizer<PassT>::loopcontPatternMatch(LoopT *loopRep, - BlockT *loopHeader) { - int numCont = 0; - SmallVector<BlockT *, DEFAULT_VEC_SLOTS> contBlk; - for (typename InvBlockGTraits::ChildIteratorType iter = - InvBlockGTraits::child_begin(loopHeader), - iterEnd = InvBlockGTraits::child_end(loopHeader); - iter != iterEnd; ++iter) { - BlockT *curBlk = *iter; - if (loopRep->contains(curBlk)) { - handleLoopcontBlock(curBlk, loopInfo->getLoopFor(curBlk), - loopHeader, loopRep); - contBlk.push_back(curBlk); - ++numCont; +int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep, + MachineBasicBlock *LoopHeader) { + int NumCont = 0; + SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB; + typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM; + GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader), + E = GTIM::child_end(LoopHeader); + for (; It != E; ++It) { + MachineBasicBlock *MBB = *It; + if (LoopRep->contains(MBB)) { + handleLoopcontBlock(MBB, MLI->getLoopFor(MBB), + LoopHeader, LoopRep); + ContMBB.push_back(MBB); + ++NumCont; } } - for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator - iter = contBlk.begin(), iterEnd = contBlk.end(); - iter != iterEnd; ++iter) { - (*iter)->removeSuccessor(loopHeader); + for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(), + E = ContMBB.end(); It != E; ++It) { + (*It)->removeSuccessor(LoopHeader); } - numLoopcontPatternMatch += numCont; + numLoopcontPatternMatch += NumCont; - return numCont; -} //loopcontPatternMatch + return NumCont; +} -template<class PassT> -bool CFGStructurizer<PassT>::isSameloopDetachedContbreak(BlockT *src1Blk, - BlockT *src2Blk) { - // return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in the - // same loop with LoopLandInfo without explicitly keeping track of - // loopContBlks and loopBreakBlks, this is a method to get the information. - // - if (src1Blk->succ_size() == 0) { - LoopT *loopRep = loopInfo->getLoopFor(src1Blk); - if (loopRep != NULL && loopRep == loopInfo->getLoopFor(src2Blk)) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - if (theEntry != NULL) { - if (DEBUGME) { - errs() << "isLoopContBreakBlock yes src1 = BB" - << src1Blk->getNumber() - << " src2 = BB" << src2Blk->getNumber() << "\n"; - } +bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak( + MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) { + if (Src1MBB->succ_size() == 0) { + MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB); + if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) { + MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep]; + if (TheEntry) { + DEBUG( + dbgs() << "isLoopContBreakBlock yes src1 = BB" + << Src1MBB->getNumber() + << " src2 = BB" << Src2MBB->getNumber() << "\n"; + ); return true; } } } return false; -} //isSameloopDetachedContbreak - -template<class PassT> -int CFGStructurizer<PassT>::handleJumpintoIf(BlockT *headBlk, - BlockT *trueBlk, - BlockT *falseBlk) { - int num = handleJumpintoIfImp(headBlk, trueBlk, falseBlk); - if (num == 0) { - if (DEBUGME) { - errs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n"; - } - num = handleJumpintoIfImp(headBlk, falseBlk, trueBlk); - } - return num; } -template<class PassT> -int CFGStructurizer<PassT>::handleJumpintoIfImp(BlockT *headBlk, - BlockT *trueBlk, - BlockT *falseBlk) { - int num = 0; - BlockT *downBlk; - - //trueBlk could be the common post dominator - downBlk = trueBlk; - - if (DEBUGME) { - errs() << "handleJumpintoIfImp head = BB" << headBlk->getNumber() - << " true = BB" << trueBlk->getNumber() - << ", numSucc=" << trueBlk->succ_size() - << " false = BB" << falseBlk->getNumber() << "\n"; +int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) { + int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB); + if (Num == 0) { + DEBUG( + dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n"; + ); + Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB); } + return Num; +} - while (downBlk) { - if (DEBUGME) { - errs() << "check down = BB" << downBlk->getNumber(); - } - - if (singlePathTo(falseBlk, downBlk) == SinglePath_InPath) { - if (DEBUGME) { - errs() << " working\n"; - } - - num += cloneOnSideEntryTo(headBlk, trueBlk, downBlk); - num += cloneOnSideEntryTo(headBlk, falseBlk, downBlk); +int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) { + int Num = 0; + MachineBasicBlock *DownBlk; - numClonedBlock += num; - num += serialPatternMatch(*headBlk->succ_begin()); - num += serialPatternMatch(*(++headBlk->succ_begin())); - num += ifPatternMatch(headBlk); - assert(num > 0); + //trueBlk could be the common post dominator + DownBlk = TrueMBB; + + DEBUG( + dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber() + << " true = BB" << TrueMBB->getNumber() + << ", numSucc=" << TrueMBB->succ_size() + << " false = BB" << FalseMBB->getNumber() << "\n"; + ); + + while (DownBlk) { + DEBUG( + dbgs() << "check down = BB" << DownBlk->getNumber(); + ); + + if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) { + DEBUG( + dbgs() << " working\n"; + ); + + Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk); + Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk); + + numClonedBlock += Num; + Num += serialPatternMatch(*HeadMBB->succ_begin()); + Num += serialPatternMatch(*(++HeadMBB->succ_begin())); + Num += ifPatternMatch(HeadMBB); + assert(Num > 0); break; } - if (DEBUGME) { - errs() << " not working\n"; - } - downBlk = (downBlk->succ_size() == 1) ? (*downBlk->succ_begin()) : NULL; + DEBUG( + dbgs() << " not working\n"; + ); + DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL; } // walk down the postDomTree - return num; -} //handleJumpintoIf - -template<class PassT> -void CFGStructurizer<PassT>::showImproveSimpleJumpintoIf(BlockT *headBlk, - BlockT *trueBlk, - BlockT *falseBlk, - BlockT *landBlk, - bool detail) { - errs() << "head = BB" << headBlk->getNumber() - << " size = " << headBlk->size(); - if (detail) { - errs() << "\n"; - headBlk->print(errs()); - errs() << "\n"; + return Num; +} + +void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf( + MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB, + MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) { + dbgs() << "head = BB" << HeadMBB->getNumber() + << " size = " << HeadMBB->size(); + if (Detail) { + dbgs() << "\n"; + HeadMBB->print(dbgs()); + dbgs() << "\n"; } - if (trueBlk) { - errs() << ", true = BB" << trueBlk->getNumber() << " size = " - << trueBlk->size() << " numPred = " << trueBlk->pred_size(); - if (detail) { - errs() << "\n"; - trueBlk->print(errs()); - errs() << "\n"; + if (TrueMBB) { + dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = " + << TrueMBB->size() << " numPred = " << TrueMBB->pred_size(); + if (Detail) { + dbgs() << "\n"; + TrueMBB->print(dbgs()); + dbgs() << "\n"; } } - if (falseBlk) { - errs() << ", false = BB" << falseBlk->getNumber() << " size = " - << falseBlk->size() << " numPred = " << falseBlk->pred_size(); - if (detail) { - errs() << "\n"; - falseBlk->print(errs()); - errs() << "\n"; + if (FalseMBB) { + dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = " + << FalseMBB->size() << " numPred = " << FalseMBB->pred_size(); + if (Detail) { + dbgs() << "\n"; + FalseMBB->print(dbgs()); + dbgs() << "\n"; } } - if (landBlk) { - errs() << ", land = BB" << landBlk->getNumber() << " size = " - << landBlk->size() << " numPred = " << landBlk->pred_size(); - if (detail) { - errs() << "\n"; - landBlk->print(errs()); - errs() << "\n"; + if (LandMBB) { + dbgs() << ", land = BB" << LandMBB->getNumber() << " size = " + << LandMBB->size() << " numPred = " << LandMBB->pred_size(); + if (Detail) { + dbgs() << "\n"; + LandMBB->print(dbgs()); + dbgs() << "\n"; } } - errs() << "\n"; -} //showImproveSimpleJumpintoIf + dbgs() << "\n"; +} -template<class PassT> -int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, - BlockT *trueBlk, - BlockT *falseBlk, - BlockT **plandBlk) { - bool migrateTrue = false; - bool migrateFalse = false; +int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB, + MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, + MachineBasicBlock **LandMBBPtr) { + bool MigrateTrue = false; + bool MigrateFalse = false; - BlockT *landBlk = *plandBlk; + MachineBasicBlock *LandBlk = *LandMBBPtr; - assert((trueBlk == NULL || trueBlk->succ_size() <= 1) - && (falseBlk == NULL || falseBlk->succ_size() <= 1)); + assert((!TrueMBB || TrueMBB->succ_size() <= 1) + && (!FalseMBB || FalseMBB->succ_size() <= 1)); - if (trueBlk == falseBlk) { + if (TrueMBB == FalseMBB) return 0; - } - migrateTrue = needMigrateBlock(trueBlk); - migrateFalse = needMigrateBlock(falseBlk); + MigrateTrue = needMigrateBlock(TrueMBB); + MigrateFalse = needMigrateBlock(FalseMBB); - if (!migrateTrue && !migrateFalse) { + if (!MigrateTrue && !MigrateFalse) return 0; - } // If we need to migrate either trueBlk and falseBlk, migrate the rest that // have more than one predecessors. without doing this, its predecessor // rather than headBlk will have undefined value in initReg. - if (!migrateTrue && trueBlk && trueBlk->pred_size() > 1) { - migrateTrue = true; - } - if (!migrateFalse && falseBlk && falseBlk->pred_size() > 1) { - migrateFalse = true; - } + if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1) + MigrateTrue = true; + if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1) + MigrateFalse = true; - if (DEBUGME) { - errs() << "before improveSimpleJumpintoIf: "; - showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0); - } + DEBUG( + dbgs() << "before improveSimpleJumpintoIf: "; + showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0); + ); // org: headBlk => if () {trueBlk} else {falseBlk} => landBlk // @@ -1186,205 +1335,143 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk, // add initReg = initVal to headBlk const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); - unsigned initReg = - funcRep->getRegInfo().createVirtualRegister(I32RC); - if (!migrateTrue || !migrateFalse) { - int initVal = migrateTrue ? 0 : 1; - CFGTraits::insertAssignInstrBefore(headBlk, passRep, initReg, initVal); - } + unsigned InitReg = + HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); + if (!MigrateTrue || !MigrateFalse) + llvm_unreachable("Extra register needed to handle CFG"); - int numNewBlk = 0; + int NumNewBlk = 0; - if (landBlk == NULL) { - landBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(landBlk); //insert to function + if (!LandBlk) { + LandBlk = HeadMBB->getParent()->CreateMachineBasicBlock(); + HeadMBB->getParent()->push_back(LandBlk); //insert to function - if (trueBlk) { - trueBlk->addSuccessor(landBlk); + if (TrueMBB) { + TrueMBB->addSuccessor(LandBlk); } else { - headBlk->addSuccessor(landBlk); + HeadMBB->addSuccessor(LandBlk); } - if (falseBlk) { - falseBlk->addSuccessor(landBlk); + if (FalseMBB) { + FalseMBB->addSuccessor(LandBlk); } else { - headBlk->addSuccessor(landBlk); + HeadMBB->addSuccessor(LandBlk); } - numNewBlk ++; + NumNewBlk ++; } - bool landBlkHasOtherPred = (landBlk->pred_size() > 2); + bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2); //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL" - typename BlockT::iterator insertPos = - CFGTraits::getInstrPos - (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDGPU::ENDIF, passRep)); - - if (landBlkHasOtherPred) { - unsigned immReg = - funcRep->getRegInfo().createVirtualRegister(I32RC); - CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 2); - unsigned cmpResReg = - funcRep->getRegInfo().createVirtualRegister(I32RC); - - CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg, - initReg, immReg); - CFGTraits::insertCondBranchBefore(landBlk, insertPos, - AMDGPU::IF_PREDICATE_SET, passRep, - cmpResReg, DebugLoc()); + MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF); + + if (LandBlkHasOtherPred) { + llvm_unreachable("Extra register needed to handle CFG"); + unsigned CmpResReg = + HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); + llvm_unreachable("Extra compare instruction needed to handle CFG"); + insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, + CmpResReg, DebugLoc()); } - CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDGPU::IF_PREDICATE_SET, - passRep, initReg, DebugLoc()); + insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg, + DebugLoc()); - if (migrateTrue) { - migrateInstruction(trueBlk, landBlk, insertPos); + if (MigrateTrue) { + migrateInstruction(TrueMBB, LandBlk, I); // need to uncondionally insert the assignment to ensure a path from its // predecessor rather than headBlk has valid value in initReg if // (initVal != 1). - CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1); + llvm_unreachable("Extra register needed to handle CFG"); } - CFGTraits::insertInstrBefore(insertPos, AMDGPU::ELSE, passRep); + insertInstrBefore(I, AMDGPU::ELSE); - if (migrateFalse) { - migrateInstruction(falseBlk, landBlk, insertPos); + if (MigrateFalse) { + migrateInstruction(FalseMBB, LandBlk, I); // need to uncondionally insert the assignment to ensure a path from its // predecessor rather than headBlk has valid value in initReg if // (initVal != 0) - CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0); + llvm_unreachable("Extra register needed to handle CFG"); } - if (landBlkHasOtherPred) { + if (LandBlkHasOtherPred) { // add endif - CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep); + insertInstrBefore(I, AMDGPU::ENDIF); // put initReg = 2 to other predecessors of landBlk - for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(), - predIterEnd = landBlk->pred_end(); predIter != predIterEnd; - ++predIter) { - BlockT *curBlk = *predIter; - if (curBlk != trueBlk && curBlk != falseBlk) { - CFGTraits::insertAssignInstrBefore(curBlk, passRep, initReg, 2); - } - } //for - } - if (DEBUGME) { - errs() << "result from improveSimpleJumpintoIf: "; - showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0); - } - - // update landBlk - *plandBlk = landBlk; - - return numNewBlk; -} //improveSimpleJumpintoIf - -template<class PassT> -void CFGStructurizer<PassT>::handleLoopbreak(BlockT *exitingBlk, - LoopT *exitingLoop, - BlockT *exitBlk, - LoopT *exitLoop, - BlockT *landBlk) { - if (DEBUGME) { - errs() << "Trying to break loop-depth = " << getLoopDepth(exitLoop) - << " from loop-depth = " << getLoopDepth(exitingLoop) << "\n"; - } - const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); - - RegiT initReg = INVALIDREGNUM; - if (exitingLoop != exitLoop) { - initReg = static_cast<int> - (funcRep->getRegInfo().createVirtualRegister(I32RC)); - assert(initReg != INVALIDREGNUM); - addLoopBreakInitReg(exitLoop, initReg); - while (exitingLoop != exitLoop && exitingLoop) { - addLoopBreakOnReg(exitingLoop, initReg); - exitingLoop = exitingLoop->getParentLoop(); + for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(), + PE = LandBlk->pred_end(); PI != PE; ++PI) { + MachineBasicBlock *MBB = *PI; + if (MBB != TrueMBB && MBB != FalseMBB) + llvm_unreachable("Extra register needed to handle CFG"); } - assert(exitingLoop == exitLoop); } + DEBUG( + dbgs() << "result from improveSimpleJumpintoIf: "; + showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0); + ); - mergeLoopbreakBlock(exitingBlk, exitBlk, landBlk, initReg); + // update landBlk + *LandMBBPtr = LandBlk; -} //handleLoopbreak + return NumNewBlk; +} -template<class PassT> -void CFGStructurizer<PassT>::handleLoopcontBlock(BlockT *contingBlk, - LoopT *contingLoop, - BlockT *contBlk, - LoopT *contLoop) { - if (DEBUGME) { - errs() << "loopcontPattern cont = BB" << contingBlk->getNumber() - << " header = BB" << contBlk->getNumber() << "\n"; +void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB, + MachineLoop *ContingLoop, MachineBasicBlock *ContMBB, + MachineLoop *ContLoop) { + DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber() + << " header = BB" << ContMBB->getNumber() << "\n"; + dbgs() << "Trying to continue loop-depth = " + << getLoopDepth(ContLoop) + << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";); + settleLoopcontBlock(ContingMBB, ContMBB); +} - errs() << "Trying to continue loop-depth = " - << getLoopDepth(contLoop) - << " from loop-depth = " << getLoopDepth(contingLoop) << "\n"; - } +void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB, + MachineBasicBlock *SrcMBB) { + DEBUG( + dbgs() << "serialPattern BB" << DstMBB->getNumber() + << " <= BB" << SrcMBB->getNumber() << "\n"; + ); + DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end()); - RegiT initReg = INVALIDREGNUM; - const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); - if (contingLoop != contLoop) { - initReg = static_cast<int> - (funcRep->getRegInfo().createVirtualRegister(I32RC)); - assert(initReg != INVALIDREGNUM); - addLoopContInitReg(contLoop, initReg); - while (contingLoop && contingLoop->getParentLoop() != contLoop) { - addLoopBreakOnReg(contingLoop, initReg); //not addLoopContOnReg - contingLoop = contingLoop->getParentLoop(); - } - assert(contingLoop && contingLoop->getParentLoop() == contLoop); - addLoopContOnReg(contingLoop, initReg); - } + DstMBB->removeSuccessor(SrcMBB); + cloneSuccessorList(DstMBB, SrcMBB); - settleLoopcontBlock(contingBlk, contBlk, initReg); -} //handleLoopcontBlock + removeSuccessor(SrcMBB); + MLI->removeBlock(SrcMBB); + retireBlock(SrcMBB); +} -template<class PassT> -void CFGStructurizer<PassT>::mergeSerialBlock(BlockT *dstBlk, BlockT *srcBlk) { - if (DEBUGME) { - errs() << "serialPattern BB" << dstBlk->getNumber() - << " <= BB" << srcBlk->getNumber() << "\n"; - } - dstBlk->splice(dstBlk->end(), srcBlk, srcBlk->begin(), srcBlk->end()); - - dstBlk->removeSuccessor(srcBlk); - CFGTraits::cloneSuccessorList(dstBlk, srcBlk); - - removeSuccessor(srcBlk); - retireBlock(dstBlk, srcBlk); -} //mergeSerialBlock - -template<class PassT> -void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr, - BlockT *curBlk, - BlockT *trueBlk, - BlockT *falseBlk, - BlockT *landBlk) { - if (DEBUGME) { - errs() << "ifPattern BB" << curBlk->getNumber(); - errs() << "{ "; - if (trueBlk) { - errs() << "BB" << trueBlk->getNumber(); - } - errs() << " } else "; - errs() << "{ "; - if (falseBlk) { - errs() << "BB" << falseBlk->getNumber(); - } - errs() << " }\n "; - errs() << "landBlock: "; - if (landBlk == NULL) { - errs() << "NULL"; +void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI, + MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB, + MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) { + assert (TrueMBB); + DEBUG( + dbgs() << "ifPattern BB" << MBB->getNumber(); + dbgs() << "{ "; + if (TrueMBB) { + dbgs() << "BB" << TrueMBB->getNumber(); + } + dbgs() << " } else "; + dbgs() << "{ "; + if (FalseMBB) { + dbgs() << "BB" << FalseMBB->getNumber(); + } + dbgs() << " }\n "; + dbgs() << "landBlock: "; + if (!LandMBB) { + dbgs() << "NULL"; } else { - errs() << "BB" << landBlk->getNumber(); + dbgs() << "BB" << LandMBB->getNumber(); } - errs() << "\n"; - } + dbgs() << "\n"; + ); - int oldOpcode = branchInstr->getOpcode(); - DebugLoc branchDL = branchInstr->getDebugLoc(); + int OldOpcode = BranchMI->getOpcode(); + DebugLoc BranchDL = BranchMI->getDebugLoc(); // transform to // if cond @@ -1394,1645 +1481,374 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr, // endif // landBlk - typename BlockT::iterator branchInstrPos = - CFGTraits::getInstrPos(curBlk, branchInstr); - CFGTraits::insertCondBranchBefore(branchInstrPos, - CFGTraits::getBranchNzeroOpcode(oldOpcode), - passRep, - branchDL); - - if (trueBlk) { - curBlk->splice(branchInstrPos, trueBlk, trueBlk->begin(), trueBlk->end()); - curBlk->removeSuccessor(trueBlk); - if (landBlk && trueBlk->succ_size()!=0) { - trueBlk->removeSuccessor(landBlk); - } - retireBlock(curBlk, trueBlk); - } - CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ELSE, passRep); - - if (falseBlk) { - curBlk->splice(branchInstrPos, falseBlk, falseBlk->begin(), - falseBlk->end()); - curBlk->removeSuccessor(falseBlk); - if (landBlk && falseBlk->succ_size() != 0) { - falseBlk->removeSuccessor(landBlk); - } - retireBlock(curBlk, falseBlk); - } - CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep); - - branchInstr->eraseFromParent(); + MachineBasicBlock::iterator I = BranchMI; + insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode), + BranchDL); - if (landBlk && trueBlk && falseBlk) { - curBlk->addSuccessor(landBlk); + if (TrueMBB) { + MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end()); + MBB->removeSuccessor(TrueMBB); + if (LandMBB && TrueMBB->succ_size()!=0) + TrueMBB->removeSuccessor(LandMBB); + retireBlock(TrueMBB); + MLI->removeBlock(TrueMBB); } -} //mergeIfthenelseBlock - -template<class PassT> -void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk, - LoopLandInfo *loopLand) { - BlockT *landBlk = loopLand->landBlk; - - if (DEBUGME) { - errs() << "loopPattern header = BB" << dstBlk->getNumber() - << " land = BB" << landBlk->getNumber() << "\n"; + if (FalseMBB) { + insertInstrBefore(I, AMDGPU::ELSE); + MBB->splice(I, FalseMBB, FalseMBB->begin(), + FalseMBB->end()); + MBB->removeSuccessor(FalseMBB); + if (LandMBB && FalseMBB->succ_size() != 0) + FalseMBB->removeSuccessor(LandMBB); + retireBlock(FalseMBB); + MLI->removeBlock(FalseMBB); } + insertInstrBefore(I, AMDGPU::ENDIF); - // Loop contInitRegs are init at the beginning of the loop. - for (typename std::set<RegiT>::const_iterator iter = - loopLand->contInitRegs.begin(), - iterEnd = loopLand->contInitRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0); - } + BranchMI->eraseFromParent(); - /* we last inserterd the DebugLoc in the - * BREAK_LOGICALZ_i32 or AMDGPU::BREAK_LOGICALNZ statement in the current dstBlk. - * search for the DebugLoc in the that statement. - * if not found, we have to insert the empty/default DebugLoc */ - InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk); - DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc(); - - CFGTraits::insertInstrBefore(dstBlk, AMDGPU::WHILELOOP, passRep, DLBreak); - // Loop breakInitRegs are init before entering the loop. - for (typename std::set<RegiT>::const_iterator iter = - loopLand->breakInitRegs.begin(), - iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0); - } - // Loop endbranchInitRegs are init before entering the loop. - for (typename std::set<RegiT>::const_iterator iter = - loopLand->endbranchInitRegs.begin(), - iterEnd = loopLand->endbranchInitRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0); - } - - /* we last inserterd the DebugLoc in the continue statement in the current dstBlk - * search for the DebugLoc in the continue statement. - * if not found, we have to insert the empty/default DebugLoc */ - InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk); - DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc(); - - CFGTraits::insertInstrEnd(dstBlk, AMDGPU::ENDLOOP, passRep, DLContinue); - // Loop breakOnRegs are check after the ENDLOOP: break the loop outside this - // loop. - for (typename std::set<RegiT>::const_iterator iter = - loopLand->breakOnRegs.begin(), - iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::PREDICATED_BREAK, passRep, - *iter); - } - - // Loop contOnRegs are check after the ENDLOOP: cont the loop outside this - // loop. - for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(), - iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) { - CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::CONTINUE_LOGICALNZ_i32, - passRep, *iter); - } + if (LandMBB && TrueMBB && FalseMBB) + MBB->addSuccessor(LandMBB); - dstBlk->splice(dstBlk->end(), landBlk, landBlk->begin(), landBlk->end()); - - for (typename BlockT::succ_iterator iter = landBlk->succ_begin(), - iterEnd = landBlk->succ_end(); iter != iterEnd; ++iter) { - dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of. - } - - removeSuccessor(landBlk); - retireBlock(dstBlk, landBlk); -} //mergeLooplandBlock - -template<class PassT> -void CFGStructurizer<PassT>::reversePredicateSetter(typename BlockT::iterator I) { - while (I--) { - if (I->getOpcode() == AMDGPU::PRED_X) { - switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) { - case OPCODE_IS_ZERO_INT: - static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO_INT); - return; - case OPCODE_IS_NOT_ZERO_INT: - static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO_INT); - return; - case OPCODE_IS_ZERO: - static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO); - return; - case OPCODE_IS_NOT_ZERO: - static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO); - return; - default: - assert(0 && "PRED_X Opcode invalid!"); - } - } - } } -template<class PassT> -void CFGStructurizer<PassT>::mergeLoopbreakBlock(BlockT *exitingBlk, - BlockT *exitBlk, - BlockT *exitLandBlk, - RegiT setReg) { - if (DEBUGME) { - errs() << "loopbreakPattern exiting = BB" << exitingBlk->getNumber() - << " exit = BB" << exitBlk->getNumber() - << " land = BB" << exitLandBlk->getNumber() << "\n"; - } - - InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(exitingBlk); - assert(branchInstr && CFGTraits::isCondBranch(branchInstr)); - - DebugLoc DL = branchInstr->getDebugLoc(); - - BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr); +void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk, + MachineBasicBlock *LandMBB) { + DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber() + << " land = BB" << LandMBB->getNumber() << "\n";); - // transform exitingBlk to - // if ( ) { - // exitBlk (if exitBlk != exitLandBlk) - // setReg = 1 - // break - // }endif - // successor = {orgSuccessor(exitingBlk) - exitBlk} - - typename BlockT::iterator branchInstrPos = - CFGTraits::getInstrPos(exitingBlk, branchInstr); - - if (exitBlk == exitLandBlk && setReg == INVALIDREGNUM) { - //break_logical + insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc()); + insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc()); + DstBlk->addSuccessor(LandMBB); + DstBlk->removeSuccessor(DstBlk); +} - if (trueBranch != exitBlk) { - reversePredicateSetter(branchInstrPos); - } - CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL); - } else { - if (trueBranch != exitBlk) { - reversePredicateSetter(branchInstr); - } - CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL); - if (exitBlk != exitLandBlk) { - //splice is insert-before ... - exitingBlk->splice(branchInstrPos, exitBlk, exitBlk->begin(), - exitBlk->end()); - } - if (setReg != INVALIDREGNUM) { - CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1); - } - CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::BREAK, passRep); - } //if_logical +void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB, + MachineBasicBlock *LandMBB) { + DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber() + << " land = BB" << LandMBB->getNumber() << "\n";); + MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB); + assert(BranchMI && isCondBranch(BranchMI)); + DebugLoc DL = BranchMI->getDebugLoc(); + MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI); + MachineBasicBlock::iterator I = BranchMI; + if (TrueBranch != LandMBB) + reversePredicateSetter(I); + insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL); + insertInstrBefore(I, AMDGPU::BREAK); + insertInstrBefore(I, AMDGPU::ENDIF); //now branchInst can be erase safely - branchInstr->eraseFromParent(); - + BranchMI->eraseFromParent(); //now take care of successors, retire blocks - exitingBlk->removeSuccessor(exitBlk); - if (exitBlk != exitLandBlk) { - //splice is insert-before ... - exitBlk->removeSuccessor(exitLandBlk); - retireBlock(exitingBlk, exitBlk); - } - -} //mergeLoopbreakBlock - -template<class PassT> -void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk, - BlockT *contBlk, - RegiT setReg) { - if (DEBUGME) { - errs() << "settleLoopcontBlock conting = BB" - << contingBlk->getNumber() - << ", cont = BB" << contBlk->getNumber() << "\n"; - } - - InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(contingBlk); - if (branchInstr) { - assert(CFGTraits::isCondBranch(branchInstr)); - typename BlockT::iterator branchInstrPos = - CFGTraits::getInstrPos(contingBlk, branchInstr); - BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr); - int oldOpcode = branchInstr->getOpcode(); - DebugLoc DL = branchInstr->getDebugLoc(); - - // transform contingBlk to - // if () { - // move instr after branchInstr - // continue - // or - // setReg = 1 - // break - // }endif - // successor = {orgSuccessor(contingBlk) - loopHeader} - - bool useContinueLogical = - (setReg == INVALIDREGNUM && (&*contingBlk->rbegin()) == branchInstr); - - if (useContinueLogical == false) { - int branchOpcode = - trueBranch == contBlk ? CFGTraits::getBranchNzeroOpcode(oldOpcode) - : CFGTraits::getBranchZeroOpcode(oldOpcode); - - CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL); - - if (setReg != INVALIDREGNUM) { - CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1); - // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, DL); - } else { - // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, DL); - } + ExitingMBB->removeSuccessor(LandMBB); +} - CFGTraits::insertInstrEnd(contingBlk, AMDGPU::ENDIF, passRep, DL); +void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB, + MachineBasicBlock *ContMBB) { + DEBUG(dbgs() << "settleLoopcontBlock conting = BB" + << ContingMBB->getNumber() + << ", cont = BB" << ContMBB->getNumber() << "\n";); + + MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB); + if (MI) { + assert(isCondBranch(MI)); + MachineBasicBlock::iterator I = MI; + MachineBasicBlock *TrueBranch = getTrueBranch(MI); + int OldOpcode = MI->getOpcode(); + DebugLoc DL = MI->getDebugLoc(); + + bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI); + + if (UseContinueLogical == false) { + int BranchOpcode = + TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) : + getBranchZeroOpcode(OldOpcode); + insertCondBranchBefore(I, BranchOpcode, DL); + // insertEnd to ensure phi-moves, if exist, go before the continue-instr. + insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL); + insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL); } else { - int branchOpcode = - trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode) - : CFGTraits::getContinueZeroOpcode(oldOpcode); - - CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL); + int BranchOpcode = + TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) : + getContinueZeroOpcode(OldOpcode); + insertCondBranchBefore(I, BranchOpcode, DL); } - branchInstr->eraseFromParent(); + MI->eraseFromParent(); } else { // if we've arrived here then we've already erased the branch instruction - // travel back up the basic block to see the last reference of our debug location - // we've just inserted that reference here so it should be representative - if (setReg != INVALIDREGNUM) { - CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1); - // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); - } else { - // insertEnd to ensure phi-moves, if exist, go before the continue-instr. - CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk)); - } - } //else - -} //settleLoopcontBlock - -// BBs in exitBlkSet are determined as in break-path for loopRep, -// before we can put code for BBs as inside loop-body for loopRep -// check whether those BBs are determined as cont-BB for parentLoopRep -// earlier. -// If so, generate a new BB newBlk -// (1) set newBlk common successor of BBs in exitBlkSet -// (2) change the continue-instr in BBs in exitBlkSet to break-instr -// (3) generate continue-instr in newBlk -// -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::relocateLoopcontBlock(LoopT *parentLoopRep, - LoopT *loopRep, - std::set<BlockT *> &exitBlkSet, - BlockT *exitLandBlk) { - std::set<BlockT *> endBlkSet; - - - - for (typename std::set<BlockT *>::const_iterator iter = exitBlkSet.begin(), - iterEnd = exitBlkSet.end(); - iter != iterEnd; ++iter) { - BlockT *exitBlk = *iter; - BlockT *endBlk = singlePathEnd(exitBlk, exitLandBlk); - - if (endBlk == NULL || CFGTraits::getContinueInstr(endBlk) == NULL) - return NULL; - - endBlkSet.insert(endBlk); - } - - BlockT *newBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(newBlk); //insert to function - CFGTraits::insertInstrEnd(newBlk, AMDGPU::CONTINUE, passRep); - SHOWNEWBLK(newBlk, "New continue block: "); - - for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(), - iterEnd = endBlkSet.end(); - iter != iterEnd; ++iter) { - BlockT *endBlk = *iter; - InstrT *contInstr = CFGTraits::getContinueInstr(endBlk); - if (contInstr) { - contInstr->eraseFromParent(); - } - endBlk->addSuccessor(newBlk); - if (DEBUGME) { - errs() << "Add new continue Block to BB" - << endBlk->getNumber() << " successors\n"; - } - } - - return newBlk; -} //relocateLoopcontBlock - - -// LoopEndbranchBlock is a BB created by the CFGStructurizer to use as -// LoopLandBlock. This BB branch on the loop endBranchInit register to the -// pathes corresponding to the loop exiting branches. - -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep, - BlockTSmallerVector &exitingBlks, - BlockTSmallerVector &exitBlks) { - const AMDGPUInstrInfo *tii = - static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo()); - const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); - - RegiT endBranchReg = static_cast<int> - (funcRep->getRegInfo().createVirtualRegister(I32RC)); - assert(endBranchReg >= 0); - - // reg = 0 before entering the loop - addLoopEndbranchInitReg(loopRep, endBranchReg); - - uint32_t numBlks = static_cast<uint32_t>(exitingBlks.size()); - assert(numBlks >=2 && numBlks == exitBlks.size()); - - BlockT *preExitingBlk = exitingBlks[0]; - BlockT *preExitBlk = exitBlks[0]; - BlockT *preBranchBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(preBranchBlk); //insert to function - SHOWNEWBLK(preBranchBlk, "New loopEndbranch block: "); - - BlockT *newLandBlk = preBranchBlk; - - CFGTraits::replaceInstrUseOfBlockWith(preExitingBlk, preExitBlk, - newLandBlk); - preExitingBlk->removeSuccessor(preExitBlk); - preExitingBlk->addSuccessor(newLandBlk); - - //it is redundant to add reg = 0 to exitingBlks[0] - - // For 1..n th exiting path (the last iteration handles two pathes) create the - // branch to the previous path and the current path. - for (uint32_t i = 1; i < numBlks; ++i) { - BlockT *curExitingBlk = exitingBlks[i]; - BlockT *curExitBlk = exitBlks[i]; - BlockT *curBranchBlk; - - if (i == numBlks - 1) { - curBranchBlk = curExitBlk; - } else { - curBranchBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(curBranchBlk); //insert to function - SHOWNEWBLK(curBranchBlk, "New loopEndbranch block: "); - } - - // Add reg = i to exitingBlks[i]. - CFGTraits::insertAssignInstrBefore(curExitingBlk, passRep, - endBranchReg, i); - - // Remove the edge (exitingBlks[i] exitBlks[i]) add new edge - // (exitingBlks[i], newLandBlk). - CFGTraits::replaceInstrUseOfBlockWith(curExitingBlk, curExitBlk, - newLandBlk); - curExitingBlk->removeSuccessor(curExitBlk); - curExitingBlk->addSuccessor(newLandBlk); - - // add to preBranchBlk the branch instruction: - // if (endBranchReg == preVal) - // preExitBlk - // else - // curBranchBlk - // - // preValReg = i - 1 - - DebugLoc DL; - RegiT preValReg = static_cast<int> - (funcRep->getRegInfo().createVirtualRegister(I32RC)); - - preBranchBlk->insert(preBranchBlk->begin(), - tii->getMovImmInstr(preBranchBlk->getParent(), preValReg, - i - 1)); - - // condResReg = (endBranchReg == preValReg) - RegiT condResReg = static_cast<int> - (funcRep->getRegInfo().createVirtualRegister(I32RC)); - BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg) - .addReg(endBranchReg).addReg(preValReg); - - BuildMI(preBranchBlk, DL, tii->get(AMDGPU::BRANCH_COND_i32)) - .addMBB(preExitBlk).addReg(condResReg); - - preBranchBlk->addSuccessor(preExitBlk); - preBranchBlk->addSuccessor(curBranchBlk); - - // Update preExitingBlk, preExitBlk, preBranchBlk. - preExitingBlk = curExitingBlk; - preExitBlk = curExitBlk; - preBranchBlk = curBranchBlk; - - } //end for 1 .. n blocks - - return newLandBlk; -} //addLoopEndbranchBlock - -template<class PassT> -typename CFGStructurizer<PassT>::PathToKind -CFGStructurizer<PassT>::singlePathTo(BlockT *srcBlk, BlockT *dstBlk, - bool allowSideEntry) { - assert(dstBlk); - - if (srcBlk == dstBlk) { - return SinglePath_InPath; - } - - while (srcBlk && srcBlk->succ_size() == 1) { - srcBlk = *srcBlk->succ_begin(); - if (srcBlk == dstBlk) { - return SinglePath_InPath; - } - - if (!allowSideEntry && srcBlk->pred_size() > 1) { - return Not_SinglePath; - } - } - - if (srcBlk && srcBlk->succ_size()==0) { - return SinglePath_NotInPath; - } - - return Not_SinglePath; -} //singlePathTo - -// If there is a single path from srcBlk to dstBlk, return the last block before -// dstBlk If there is a single path from srcBlk->end without dstBlk, return the -// last block in the path Otherwise, return NULL -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::singlePathEnd(BlockT *srcBlk, BlockT *dstBlk, - bool allowSideEntry) { - assert(dstBlk); - - if (srcBlk == dstBlk) { - return srcBlk; - } - - if (srcBlk->succ_size() == 0) { - return srcBlk; - } - - while (srcBlk && srcBlk->succ_size() == 1) { - BlockT *preBlk = srcBlk; - - srcBlk = *srcBlk->succ_begin(); - if (srcBlk == NULL) { - return preBlk; - } - - if (!allowSideEntry && srcBlk->pred_size() > 1) { - return NULL; - } - } - - if (srcBlk && srcBlk->succ_size()==0) { - return srcBlk; + // travel back up the basic block to see the last reference of our debug + // location we've just inserted that reference here so it should be + // representative insertEnd to ensure phi-moves, if exist, go before the + // continue-instr. + insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, + getLastDebugLocInBB(ContingMBB)); } +} - return NULL; - -} //singlePathEnd - -template<class PassT> -int CFGStructurizer<PassT>::cloneOnSideEntryTo(BlockT *preBlk, BlockT *srcBlk, - BlockT *dstBlk) { - int cloned = 0; - assert(preBlk->isSuccessor(srcBlk)); - while (srcBlk && srcBlk != dstBlk) { - assert(srcBlk->succ_size() == 1); - if (srcBlk->pred_size() > 1) { - srcBlk = cloneBlockForPredecessor(srcBlk, preBlk); - ++cloned; +int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB, + MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) { + int Cloned = 0; + assert(PreMBB->isSuccessor(SrcMBB)); + while (SrcMBB && SrcMBB != DstMBB) { + assert(SrcMBB->succ_size() == 1); + if (SrcMBB->pred_size() > 1) { + SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB); + ++Cloned; } - preBlk = srcBlk; - srcBlk = *srcBlk->succ_begin(); + PreMBB = SrcMBB; + SrcMBB = *SrcMBB->succ_begin(); } - return cloned; -} //cloneOnSideEntryTo + return Cloned; +} -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::cloneBlockForPredecessor(BlockT *curBlk, - BlockT *predBlk) { - assert(predBlk->isSuccessor(curBlk) && +MachineBasicBlock * +AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB, + MachineBasicBlock *PredMBB) { + assert(PredMBB->isSuccessor(MBB) && "succBlk is not a prececessor of curBlk"); - BlockT *cloneBlk = CFGTraits::clone(curBlk); //clone instructions - CFGTraits::replaceInstrUseOfBlockWith(predBlk, curBlk, cloneBlk); + MachineBasicBlock *CloneMBB = clone(MBB); //clone instructions + replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB); //srcBlk, oldBlk, newBlk - predBlk->removeSuccessor(curBlk); - predBlk->addSuccessor(cloneBlk); + PredMBB->removeSuccessor(MBB); + PredMBB->addSuccessor(CloneMBB); // add all successor to cloneBlk - CFGTraits::cloneSuccessorList(cloneBlk, curBlk); - - numClonedInstr += curBlk->size(); + cloneSuccessorList(CloneMBB, MBB); - if (DEBUGME) { - errs() << "Cloned block: " << "BB" - << curBlk->getNumber() << "size " << curBlk->size() << "\n"; - } + numClonedInstr += MBB->size(); - SHOWNEWBLK(cloneBlk, "result of Cloned block: "); - - return cloneBlk; -} //cloneBlockForPredecessor - -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::exitingBlock2ExitBlock(LoopT *loopRep, - BlockT *exitingBlk) { - BlockT *exitBlk = NULL; - - for (typename BlockT::succ_iterator iterSucc = exitingBlk->succ_begin(), - iterSuccEnd = exitingBlk->succ_end(); - iterSucc != iterSuccEnd; ++iterSucc) { - BlockT *curBlk = *iterSucc; - if (!loopRep->contains(curBlk)) { - assert(exitBlk == NULL); - exitBlk = curBlk; - } - } + DEBUG( + dbgs() << "Cloned block: " << "BB" + << MBB->getNumber() << "size " << MBB->size() << "\n"; + ); - assert(exitBlk != NULL); + SHOWNEWBLK(CloneMBB, "result of Cloned block: "); - return exitBlk; -} //exitingBlock2ExitBlock + return CloneMBB; +} -template<class PassT> -void CFGStructurizer<PassT>::migrateInstruction(BlockT *srcBlk, - BlockT *dstBlk, - InstrIterator insertPos) { - InstrIterator spliceEnd; +void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB, + MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I) { + MachineBasicBlock::iterator SpliceEnd; //look for the input branchinstr, not the AMDGPU branchinstr - InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk); - if (branchInstr == NULL) { - if (DEBUGME) { - errs() << "migrateInstruction don't see branch instr\n" ; - } - spliceEnd = srcBlk->end(); + MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB); + if (!BranchMI) { + DEBUG( + dbgs() << "migrateInstruction don't see branch instr\n" ; + ); + SpliceEnd = SrcMBB->end(); } else { - if (DEBUGME) { - errs() << "migrateInstruction see branch instr\n" ; - branchInstr->dump(); - } - spliceEnd = CFGTraits::getInstrPos(srcBlk, branchInstr); - } - if (DEBUGME) { - errs() << "migrateInstruction before splice dstSize = " << dstBlk->size() - << "srcSize = " << srcBlk->size() << "\n"; + DEBUG( + dbgs() << "migrateInstruction see branch instr\n" ; + BranchMI->dump(); + ); + SpliceEnd = BranchMI; } + DEBUG( + dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size() + << "srcSize = " << SrcMBB->size() << "\n"; + ); //splice insert before insertPos - dstBlk->splice(insertPos, srcBlk, srcBlk->begin(), spliceEnd); + DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd); - if (DEBUGME) { - errs() << "migrateInstruction after splice dstSize = " << dstBlk->size() - << "srcSize = " << srcBlk->size() << "\n"; - } -} //migrateInstruction + DEBUG( + dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size() + << "srcSize = " << SrcMBB->size() << "\n"; + ); +} -// normalizeInfiniteLoopExit change -// B1: -// uncond_br LoopHeader -// -// to -// B1: -// cond_br 1 LoopHeader dummyExit -// and return the newly added dummy exit block -// -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::normalizeInfiniteLoopExit(LoopT* LoopRep) { - BlockT *loopHeader; - BlockT *loopLatch; - loopHeader = LoopRep->getHeader(); - loopLatch = LoopRep->getLoopLatch(); - BlockT *dummyExitBlk = NULL; +MachineBasicBlock * +AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) { + MachineBasicBlock *LoopHeader = LoopRep->getHeader(); + MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch(); const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); - if (loopHeader!=NULL && loopLatch!=NULL) { - InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(loopLatch); - if (branchInstr!=NULL && CFGTraits::isUncondBranch(branchInstr)) { - dummyExitBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(dummyExitBlk); //insert to function - SHOWNEWBLK(dummyExitBlk, "DummyExitBlock to normalize infiniteLoop: "); - - if (DEBUGME) errs() << "Old branch instr: " << *branchInstr << "\n"; - - typename BlockT::iterator insertPos = - CFGTraits::getInstrPos(loopLatch, branchInstr); - unsigned immReg = - funcRep->getRegInfo().createVirtualRegister(I32RC); - CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1); - InstrT *newInstr = - CFGTraits::insertInstrBefore(insertPos, AMDGPU::BRANCH_COND_i32, passRep); - MachineInstrBuilder MIB(*funcRep, newInstr); - MIB.addMBB(loopHeader); - MIB.addReg(immReg, false); - - SHOWNEWINSTR(newInstr); - - branchInstr->eraseFromParent(); - loopLatch->addSuccessor(dummyExitBlk); - } - } - return dummyExitBlk; -} //normalizeInfiniteLoopExit + if (!LoopHeader || !LoopLatch) + return NULL; + MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch); + // Is LoopRep an infinite loop ? + if (!BranchMI || !isUncondBranch(BranchMI)) + return NULL; -template<class PassT> -void CFGStructurizer<PassT>::removeUnconditionalBranch(BlockT *srcBlk) { - InstrT *branchInstr; + MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock(); + FuncRep->push_back(DummyExitBlk); //insert to function + SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: "); + DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";); + MachineBasicBlock::iterator I = BranchMI; + unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC); + llvm_unreachable("Extra register needed to handle CFG"); + MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32); + MachineInstrBuilder MIB(*FuncRep, NewMI); + MIB.addMBB(LoopHeader); + MIB.addReg(ImmReg, false); + SHOWNEWINSTR(NewMI); + BranchMI->eraseFromParent(); + LoopLatch->addSuccessor(DummyExitBlk); + + return DummyExitBlk; +} + +void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) { + MachineInstr *BranchMI; // I saw two unconditional branch in one basic block in example // test_fc_do_while_or.c need to fix the upstream on this to remove the loop. - while ((branchInstr = CFGTraits::getLoopendBlockBranchInstr(srcBlk)) - && CFGTraits::isUncondBranch(branchInstr)) { - if (DEBUGME) { - errs() << "Removing unconditional branch instruction" ; - branchInstr->dump(); - } - branchInstr->eraseFromParent(); - } -} //removeUnconditionalBranch - -template<class PassT> -void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) { - if (srcBlk->succ_size() == 2) { - BlockT *blk1 = *srcBlk->succ_begin(); - BlockT *blk2 = *(++srcBlk->succ_begin()); - - if (blk1 == blk2) { - InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk); - assert(branchInstr && CFGTraits::isCondBranch(branchInstr)); - if (DEBUGME) { - errs() << "Removing unneeded conditional branch instruction" ; - branchInstr->dump(); - } - branchInstr->eraseFromParent(); - SHOWNEWBLK(blk1, "Removing redundant successor"); - srcBlk->removeSuccessor(blk1); - } - } -} //removeRedundantConditionalBranch - -template<class PassT> -void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*, - DEFAULT_VEC_SLOTS> &retBlks) { - BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(dummyExitBlk); //insert to function - CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep); - - for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter = - retBlks.begin(), - iterEnd = retBlks.end(); iter != iterEnd; ++iter) { - BlockT *curBlk = *iter; - InstrT *curInstr = CFGTraits::getReturnInstr(curBlk); - if (curInstr) { - curInstr->eraseFromParent(); - } - curBlk->addSuccessor(dummyExitBlk); - if (DEBUGME) { - errs() << "Add dummyExitBlock to BB" << curBlk->getNumber() - << " successors\n"; - } - } //for - - SHOWNEWBLK(dummyExitBlk, "DummyExitBlock: "); -} //addDummyExitBlock - -template<class PassT> -void CFGStructurizer<PassT>::removeSuccessor(BlockT *srcBlk) { - while (srcBlk->succ_size()) { - srcBlk->removeSuccessor(*srcBlk->succ_begin()); + while ((BranchMI = getLoopendBlockBranchInstr(MBB)) + && isUncondBranch(BranchMI)) { + DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump();); + BranchMI->eraseFromParent(); } } -template<class PassT> -void CFGStructurizer<PassT>::recordSccnum(BlockT *srcBlk, int sccNum) { - BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk]; +void AMDGPUCFGStructurizer::removeRedundantConditionalBranch( + MachineBasicBlock *MBB) { + if (MBB->succ_size() != 2) + return; + MachineBasicBlock *MBB1 = *MBB->succ_begin(); + MachineBasicBlock *MBB2 = *(++MBB->succ_begin()); + if (MBB1 != MBB2) + return; + + MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB); + assert(BranchMI && isCondBranch(BranchMI)); + DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump();); + BranchMI->eraseFromParent(); + SHOWNEWBLK(MBB1, "Removing redundant successor"); + MBB->removeSuccessor(MBB1); +} - if (srcBlkInfo == NULL) { - srcBlkInfo = new BlockInfo(); +void AMDGPUCFGStructurizer::addDummyExitBlock( + SmallVectorImpl<MachineBasicBlock*> &RetMBB) { + MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock(); + FuncRep->push_back(DummyExitBlk); //insert to function + insertInstrEnd(DummyExitBlk, AMDGPU::RETURN); + + for (SmallVectorImpl<MachineBasicBlock *>::iterator It = RetMBB.begin(), + E = RetMBB.end(); It != E; ++It) { + MachineBasicBlock *MBB = *It; + MachineInstr *MI = getReturnInstr(MBB); + if (MI) + MI->eraseFromParent(); + MBB->addSuccessor(DummyExitBlk); + DEBUG( + dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber() + << " successors\n"; + ); } + SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: "); +} - srcBlkInfo->sccNum = sccNum; +void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) { + while (MBB->succ_size()) + MBB->removeSuccessor(*MBB->succ_begin()); } -template<class PassT> -int CFGStructurizer<PassT>::getSCCNum(BlockT *srcBlk) { - BlockInfo *srcBlkInfo = blockInfoMap[srcBlk]; - return srcBlkInfo ? srcBlkInfo->sccNum : INVALIDSCCNUM; +void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB, + int SccNum) { + BlockInformation *&srcBlkInfo = BlockInfoMap[MBB]; + if (!srcBlkInfo) + srcBlkInfo = new BlockInformation(); + srcBlkInfo->SccNum = SccNum; } -template<class PassT> -void CFGStructurizer<PassT>::retireBlock(BlockT *dstBlk, BlockT *srcBlk) { - if (DEBUGME) { - errs() << "Retiring BB" << srcBlk->getNumber() << "\n"; - } +void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) { + DEBUG( + dbgs() << "Retiring BB" << MBB->getNumber() << "\n"; + ); - BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk]; + BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB]; - if (srcBlkInfo == NULL) { - srcBlkInfo = new BlockInfo(); - } + if (!SrcBlkInfo) + SrcBlkInfo = new BlockInformation(); - srcBlkInfo->isRetired = true; - assert(srcBlk->succ_size() == 0 && srcBlk->pred_size() == 0 + SrcBlkInfo->IsRetired = true; + assert(MBB->succ_size() == 0 && MBB->pred_size() == 0 && "can't retire block yet"); } -template<class PassT> -bool CFGStructurizer<PassT>::isRetiredBlock(BlockT *srcBlk) { - BlockInfo *srcBlkInfo = blockInfoMap[srcBlk]; - return (srcBlkInfo && srcBlkInfo->isRetired); -} - -template<class PassT> -bool CFGStructurizer<PassT>::isActiveLoophead(BlockT *curBlk) { - LoopT *loopRep = loopInfo->getLoopFor(curBlk); - while (loopRep && loopRep->getHeader() == curBlk) { - LoopLandInfo *loopLand = getLoopLandInfo(loopRep); - - if(loopLand == NULL) - return true; - - BlockT *landBlk = loopLand->landBlk; - assert(landBlk); - if (!isRetiredBlock(landBlk)) { - return true; - } - - loopRep = loopRep->getParentLoop(); - } - - return false; -} //isActiveLoophead - -template<class PassT> -bool CFGStructurizer<PassT>::needMigrateBlock(BlockT *blk) { - const unsigned blockSizeThreshold = 30; - const unsigned cloneInstrThreshold = 100; - - bool multiplePreds = blk && (blk->pred_size() > 1); - - if(!multiplePreds) - return false; - - unsigned blkSize = blk->size(); - return ((blkSize > blockSizeThreshold) - && (blkSize * (blk->pred_size() - 1) > cloneInstrThreshold)); -} //needMigrateBlock - -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::recordLoopLandBlock(LoopT *loopRep, BlockT *landBlk, - BlockTSmallerVector &exitBlks, - std::set<BlockT *> &exitBlkSet) { - SmallVector<BlockT *, DEFAULT_VEC_SLOTS> inpathBlks; //in exit path blocks - - for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(), - predIterEnd = landBlk->pred_end(); - predIter != predIterEnd; ++predIter) { - BlockT *curBlk = *predIter; - if (loopRep->contains(curBlk) || exitBlkSet.count(curBlk)) { - inpathBlks.push_back(curBlk); - } - } //for - - //if landBlk has predecessors that are not in the given loop, - //create a new block - BlockT *newLandBlk = landBlk; - if (inpathBlks.size() != landBlk->pred_size()) { - newLandBlk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(newLandBlk); //insert to function - newLandBlk->addSuccessor(landBlk); - for (typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::iterator iter = - inpathBlks.begin(), - iterEnd = inpathBlks.end(); iter != iterEnd; ++iter) { - BlockT *curBlk = *iter; - CFGTraits::replaceInstrUseOfBlockWith(curBlk, landBlk, newLandBlk); - //srcBlk, oldBlk, newBlk - curBlk->removeSuccessor(landBlk); - curBlk->addSuccessor(newLandBlk); - } - for (size_t i = 0, tot = exitBlks.size(); i < tot; ++i) { - if (exitBlks[i] == landBlk) { - exitBlks[i] = newLandBlk; - } - } - SHOWNEWBLK(newLandBlk, "NewLandingBlock: "); - } - - setLoopLandBlock(loopRep, newLandBlk); - - return newLandBlk; -} // recordLoopbreakLand - -template<class PassT> -void CFGStructurizer<PassT>::setLoopLandBlock(LoopT *loopRep, BlockT *blk) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - assert(theEntry->landBlk == NULL); - - if (blk == NULL) { - blk = funcRep->CreateMachineBasicBlock(); - funcRep->push_back(blk); //insert to function - SHOWNEWBLK(blk, "DummyLandingBlock for loop without break: "); - } - - theEntry->landBlk = blk; - - if (DEBUGME) { - errs() << "setLoopLandBlock loop-header = BB" - << loopRep->getHeader()->getNumber() - << " landing-block = BB" << blk->getNumber() << "\n"; - } -} // setLoopLandBlock - -template<class PassT> -void CFGStructurizer<PassT>::addLoopBreakOnReg(LoopT *loopRep, RegiT regNum) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - - theEntry->breakOnRegs.insert(regNum); - - if (DEBUGME) { - errs() << "addLoopBreakOnReg loop-header = BB" - << loopRep->getHeader()->getNumber() - << " regNum = " << regNum << "\n"; - } -} // addLoopBreakOnReg - -template<class PassT> -void CFGStructurizer<PassT>::addLoopContOnReg(LoopT *loopRep, RegiT regNum) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - theEntry->contOnRegs.insert(regNum); - - if (DEBUGME) { - errs() << "addLoopContOnReg loop-header = BB" - << loopRep->getHeader()->getNumber() - << " regNum = " << regNum << "\n"; - } -} // addLoopContOnReg - -template<class PassT> -void CFGStructurizer<PassT>::addLoopBreakInitReg(LoopT *loopRep, RegiT regNum) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - theEntry->breakInitRegs.insert(regNum); - - if (DEBUGME) { - errs() << "addLoopBreakInitReg loop-header = BB" +void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep, + MachineBasicBlock *MBB) { + MachineBasicBlock *&TheEntry = LLInfoMap[loopRep]; + if (!MBB) { + MBB = FuncRep->CreateMachineBasicBlock(); + FuncRep->push_back(MBB); //insert to function + SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: "); + } + TheEntry = MBB; + DEBUG( + dbgs() << "setLoopLandBlock loop-header = BB" << loopRep->getHeader()->getNumber() - << " regNum = " << regNum << "\n"; - } -} // addLoopBreakInitReg - -template<class PassT> -void CFGStructurizer<PassT>::addLoopContInitReg(LoopT *loopRep, RegiT regNum) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - theEntry->contInitRegs.insert(regNum); - - if (DEBUGME) { - errs() << "addLoopContInitReg loop-header = BB" - << loopRep->getHeader()->getNumber() - << " regNum = " << regNum << "\n"; - } -} // addLoopContInitReg - -template<class PassT> -void CFGStructurizer<PassT>::addLoopEndbranchInitReg(LoopT *loopRep, - RegiT regNum) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - if (theEntry == NULL) { - theEntry = new LoopLandInfo(); - } - theEntry->endbranchInitRegs.insert(regNum); - - if (DEBUGME) { - errs() << "addLoopEndbranchInitReg loop-header = BB" - << loopRep->getHeader()->getNumber() - << " regNum = " << regNum << "\n"; - } -} // addLoopEndbranchInitReg - -template<class PassT> -typename CFGStructurizer<PassT>::LoopLandInfo * -CFGStructurizer<PassT>::getLoopLandInfo(LoopT *loopRep) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - return theEntry; -} // getLoopLandInfo - -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::getLoopLandBlock(LoopT *loopRep) { - LoopLandInfo *&theEntry = loopLandInfoMap[loopRep]; - - return theEntry ? theEntry->landBlk : NULL; -} // getLoopLandBlock - - -template<class PassT> -bool CFGStructurizer<PassT>::hasBackEdge(BlockT *curBlk) { - LoopT *loopRep = loopInfo->getLoopFor(curBlk); - if (loopRep == NULL) - return false; - - BlockT *loopHeader = loopRep->getHeader(); - - return curBlk->isSuccessor(loopHeader); - -} //hasBackEdge - -template<class PassT> -unsigned CFGStructurizer<PassT>::getLoopDepth(LoopT *loopRep) { - return loopRep ? loopRep->getLoopDepth() : 0; -} //getLoopDepth - -template<class PassT> -int CFGStructurizer<PassT>::countActiveBlock -(typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterStart, - typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterEnd) { - int count = 0; - while (iterStart != iterEnd) { - if (!isRetiredBlock(*iterStart)) { - ++count; - } - ++iterStart; - } - - return count; -} //countActiveBlock - -// This is work around solution for findNearestCommonDominator not avaiable to -// post dom a proper fix should go to Dominators.h. + << " landing-block = BB" << MBB->getNumber() << "\n"; + ); +} -template<class PassT> -typename CFGStructurizer<PassT>::BlockT* -CFGStructurizer<PassT>::findNearestCommonPostDom(BlockT *blk1, BlockT *blk2) { +MachineBasicBlock * +AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1, + MachineBasicBlock *MBB2) { - if (postDomTree->dominates(blk1, blk2)) { - return blk1; - } - if (postDomTree->dominates(blk2, blk1)) { - return blk2; - } + if (PDT->dominates(MBB1, MBB2)) + return MBB1; + if (PDT->dominates(MBB2, MBB1)) + return MBB2; - DomTreeNodeT *node1 = postDomTree->getNode(blk1); - DomTreeNodeT *node2 = postDomTree->getNode(blk2); + MachineDomTreeNode *Node1 = PDT->getNode(MBB1); + MachineDomTreeNode *Node2 = PDT->getNode(MBB2); // Handle newly cloned node. - if (node1 == NULL && blk1->succ_size() == 1) { - return findNearestCommonPostDom(*blk1->succ_begin(), blk2); - } - if (node2 == NULL && blk2->succ_size() == 1) { - return findNearestCommonPostDom(blk1, *blk2->succ_begin()); - } + if (!Node1 && MBB1->succ_size() == 1) + return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2); + if (!Node2 && MBB2->succ_size() == 1) + return findNearestCommonPostDom(MBB1, *MBB2->succ_begin()); - if (node1 == NULL || node2 == NULL) { + if (!Node1 || !Node2) return NULL; - } - node1 = node1->getIDom(); - while (node1) { - if (postDomTree->dominates(node1, node2)) { - return node1->getBlock(); - } - node1 = node1->getIDom(); + Node1 = Node1->getIDom(); + while (Node1) { + if (PDT->dominates(Node1, Node2)) + return Node1->getBlock(); + Node1 = Node1->getIDom(); } return NULL; } -template<class PassT> -typename CFGStructurizer<PassT>::BlockT * -CFGStructurizer<PassT>::findNearestCommonPostDom -(typename std::set<BlockT *> &blks) { - BlockT *commonDom; - typename std::set<BlockT *>::const_iterator iter = blks.begin(); - typename std::set<BlockT *>::const_iterator iterEnd = blks.end(); - for (commonDom = *iter; iter != iterEnd && commonDom != NULL; ++iter) { - BlockT *curBlk = *iter; - if (curBlk != commonDom) { - commonDom = findNearestCommonPostDom(curBlk, commonDom); - } - } - - if (DEBUGME) { - errs() << "Common post dominator for exit blocks is "; - if (commonDom) { - errs() << "BB" << commonDom->getNumber() << "\n"; - } else { - errs() << "NULL\n"; - } - } - - return commonDom; -} //findNearestCommonPostDom - -} // end anonymous namespace - -//todo: move-end - - -//===----------------------------------------------------------------------===// -// -// CFGStructurizer for AMDGPU -// -//===----------------------------------------------------------------------===// - - -namespace { -class AMDGPUCFGStructurizer : public MachineFunctionPass { -public: - typedef MachineInstr InstructionType; - typedef MachineFunction FunctionType; - typedef MachineBasicBlock BlockType; - typedef MachineLoopInfo LoopinfoType; - typedef MachineDominatorTree DominatortreeType; - typedef MachinePostDominatorTree PostDominatortreeType; - typedef MachineDomTreeNode DomTreeNodeType; - typedef MachineLoop LoopType; - -protected: - TargetMachine &TM; - -public: - AMDGPUCFGStructurizer(char &pid, TargetMachine &tm); - const TargetInstrInfo *getTargetInstrInfo() const; - const AMDGPURegisterInfo *getTargetRegisterInfo() const; -}; - -} // end anonymous namespace -AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm) - : MachineFunctionPass(pid), TM(tm) { -} - -const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const { - return TM.getInstrInfo(); -} - -const AMDGPURegisterInfo *AMDGPUCFGStructurizer::getTargetRegisterInfo() const { - return static_cast<const AMDGPURegisterInfo *>(TM.getRegisterInfo()); -} - -//===----------------------------------------------------------------------===// -// -// CFGPrepare -// -//===----------------------------------------------------------------------===// - - -namespace { -class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer { -public: - static char ID; - -public: - AMDGPUCFGPrepare(TargetMachine &tm); - - virtual const char *getPassName() const; - virtual void getAnalysisUsage(AnalysisUsage &AU) const; - - bool runOnMachineFunction(MachineFunction &F); -}; - -char AMDGPUCFGPrepare::ID = 0; -} // end anonymous namespace - -AMDGPUCFGPrepare::AMDGPUCFGPrepare(TargetMachine &tm) - : AMDGPUCFGStructurizer(ID, tm ) { -} -const char *AMDGPUCFGPrepare::getPassName() const { - return "AMD IL Control Flow Graph Preparation Pass"; -} - -void AMDGPUCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addPreserved<MachineFunctionAnalysis>(); - AU.addRequired<MachineFunctionAnalysis>(); - AU.addRequired<MachineDominatorTree>(); - AU.addRequired<MachinePostDominatorTree>(); - AU.addRequired<MachineLoopInfo>(); -} - -//===----------------------------------------------------------------------===// -// -// CFGPerform -// -//===----------------------------------------------------------------------===// - - -namespace { -class AMDGPUCFGPerform : public AMDGPUCFGStructurizer { -public: - static char ID; - -public: - AMDGPUCFGPerform(TargetMachine &tm); - virtual const char *getPassName() const; - virtual void getAnalysisUsage(AnalysisUsage &AU) const; - bool runOnMachineFunction(MachineFunction &F); -}; - -char AMDGPUCFGPerform::ID = 0; -} // end anonymous namespace - - AMDGPUCFGPerform::AMDGPUCFGPerform(TargetMachine &tm) -: AMDGPUCFGStructurizer(ID, tm) { -} - -const char *AMDGPUCFGPerform::getPassName() const { - return "AMD IL Control Flow Graph structurizer Pass"; -} - -void AMDGPUCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addPreserved<MachineFunctionAnalysis>(); - AU.addRequired<MachineFunctionAnalysis>(); - AU.addRequired<MachineDominatorTree>(); - AU.addRequired<MachinePostDominatorTree>(); - AU.addRequired<MachineLoopInfo>(); +MachineBasicBlock * +AMDGPUCFGStructurizer::findNearestCommonPostDom( + std::set<MachineBasicBlock *> &MBBs) { + MachineBasicBlock *CommonDom; + std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin(); + std::set<MachineBasicBlock *>::const_iterator E = MBBs.end(); + for (CommonDom = *It; It != E && CommonDom; ++It) { + MachineBasicBlock *MBB = *It; + if (MBB != CommonDom) + CommonDom = findNearestCommonPostDom(MBB, CommonDom); + } + + DEBUG( + dbgs() << "Common post dominator for exit blocks is "; + if (CommonDom) + dbgs() << "BB" << CommonDom->getNumber() << "\n"; + else + dbgs() << "NULL\n"; + ); + + return CommonDom; } -//===----------------------------------------------------------------------===// -// -// CFGStructTraits<AMDGPUCFGStructurizer> -// -//===----------------------------------------------------------------------===// - -namespace { -// this class is tailor to the AMDGPU backend -template<> -struct CFGStructTraits<AMDGPUCFGStructurizer> { - typedef int RegiT; - - static int getBranchNzeroOpcode(int oldOpcode) { - switch(oldOpcode) { - case AMDGPU::JUMP_COND: - case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET; - case AMDGPU::BRANCH_COND_i32: - case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32; - default: - assert(0 && "internal error"); - } - return -1; - } - - static int getBranchZeroOpcode(int oldOpcode) { - switch(oldOpcode) { - case AMDGPU::JUMP_COND: - case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET; - case AMDGPU::BRANCH_COND_i32: - case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32; - default: - assert(0 && "internal error"); - } - return -1; - } - - static int getContinueNzeroOpcode(int oldOpcode) { - switch(oldOpcode) { - case AMDGPU::JUMP_COND: - case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32; - default: - assert(0 && "internal error"); - }; - return -1; - } - - static int getContinueZeroOpcode(int oldOpcode) { - switch(oldOpcode) { - case AMDGPU::JUMP_COND: - case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32; - default: - assert(0 && "internal error"); - } - return -1; - } - - static MachineBasicBlock *getTrueBranch(MachineInstr *instr) { - return instr->getOperand(0).getMBB(); - } - - static void setTrueBranch(MachineInstr *instr, MachineBasicBlock *blk) { - instr->getOperand(0).setMBB(blk); - } - - static MachineBasicBlock * - getFalseBranch(MachineBasicBlock *blk, MachineInstr *instr) { - assert(blk->succ_size() == 2); - MachineBasicBlock *trueBranch = getTrueBranch(instr); - MachineBasicBlock::succ_iterator iter = blk->succ_begin(); - MachineBasicBlock::succ_iterator iterNext = iter; - ++iterNext; - - return (*iter == trueBranch) ? *iterNext : *iter; - } - - static bool isCondBranch(MachineInstr *instr) { - switch (instr->getOpcode()) { - case AMDGPU::JUMP_COND: - case AMDGPU::BRANCH_COND_i32: - case AMDGPU::BRANCH_COND_f32: - break; - default: - return false; - } - return true; - } - - static bool isUncondBranch(MachineInstr *instr) { - switch (instr->getOpcode()) { - case AMDGPU::JUMP: - case AMDGPU::BRANCH: - return true; - default: - return false; - } - return true; - } - - static DebugLoc getLastDebugLocInBB(MachineBasicBlock *blk) { - //get DebugLoc from the first MachineBasicBlock instruction with debug info - DebugLoc DL; - for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) { - MachineInstr *instr = &(*iter); - if (instr->getDebugLoc().isUnknown() == false) { - DL = instr->getDebugLoc(); - } - } - return DL; - } - - static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *blk) { - MachineBasicBlock::reverse_iterator iter = blk->rbegin(); - MachineInstr *instr = &*iter; - if (instr && (isCondBranch(instr) || isUncondBranch(instr))) { - return instr; - } - return NULL; - } - - // The correct naming for this is getPossibleLoopendBlockBranchInstr. - // - // BB with backward-edge could have move instructions after the branch - // instruction. Such move instruction "belong to" the loop backward-edge. - // - static MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *blk) { - const AMDGPUInstrInfo * TII = static_cast<const AMDGPUInstrInfo *>( - blk->getParent()->getTarget().getInstrInfo()); - - for (MachineBasicBlock::reverse_iterator iter = blk->rbegin(), - iterEnd = blk->rend(); iter != iterEnd; ++iter) { - // FIXME: Simplify - MachineInstr *instr = &*iter; - if (instr) { - if (isCondBranch(instr) || isUncondBranch(instr)) { - return instr; - } else if (!TII->isMov(instr->getOpcode())) { - break; - } - } - } - return NULL; - } - - static MachineInstr *getReturnInstr(MachineBasicBlock *blk) { - MachineBasicBlock::reverse_iterator iter = blk->rbegin(); - if (iter != blk->rend()) { - MachineInstr *instr = &(*iter); - if (instr->getOpcode() == AMDGPU::RETURN) { - return instr; - } - } - return NULL; - } - - static MachineInstr *getContinueInstr(MachineBasicBlock *blk) { - MachineBasicBlock::reverse_iterator iter = blk->rbegin(); - if (iter != blk->rend()) { - MachineInstr *instr = &(*iter); - if (instr->getOpcode() == AMDGPU::CONTINUE) { - return instr; - } - } - return NULL; - } - - static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) { - for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) { - MachineInstr *instr = &(*iter); - if (instr->getOpcode() == AMDGPU::PREDICATED_BREAK) { - return instr; - } - } - return NULL; - } - - static bool isReturnBlock(MachineBasicBlock *blk) { - MachineInstr *instr = getReturnInstr(blk); - bool isReturn = (blk->succ_size() == 0); - if (instr) { - assert(isReturn); - } else if (isReturn) { - if (DEBUGME) { - errs() << "BB" << blk->getNumber() - <<" is return block without RETURN instr\n"; - } - } - - return isReturn; - } - - static MachineBasicBlock::iterator - getInstrPos(MachineBasicBlock *blk, MachineInstr *instr) { - assert(instr->getParent() == blk && "instruction doesn't belong to block"); - MachineBasicBlock::iterator iter = blk->begin(); - MachineBasicBlock::iterator iterEnd = blk->end(); - while (&(*iter) != instr && iter != iterEnd) { - ++iter; - } - - assert(iter != iterEnd); - return iter; - }//getInstrPos - - static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode, - AMDGPUCFGStructurizer *passRep) { - return insertInstrBefore(blk,newOpcode,passRep,DebugLoc()); - } //insertInstrBefore - - static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode, - AMDGPUCFGStructurizer *passRep, DebugLoc DL) { - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineInstr *newInstr = - blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DL); - - MachineBasicBlock::iterator res; - if (blk->begin() != blk->end()) { - blk->insert(blk->begin(), newInstr); - } else { - blk->push_back(newInstr); - } - - SHOWNEWINSTR(newInstr); - - return newInstr; - } //insertInstrBefore - - static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode, - AMDGPUCFGStructurizer *passRep) { - insertInstrEnd(blk,newOpcode,passRep,DebugLoc()); - } //insertInstrEnd - - static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode, - AMDGPUCFGStructurizer *passRep, DebugLoc DL) { - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineInstr *newInstr = blk->getParent() - ->CreateMachineInstr(tii->get(newOpcode), DL); - - blk->push_back(newInstr); - //assume the instruction doesn't take any reg operand ... - - SHOWNEWINSTR(newInstr); - } //insertInstrEnd - - static MachineInstr *insertInstrBefore(MachineBasicBlock::iterator instrPos, - int newOpcode, - AMDGPUCFGStructurizer *passRep) { - MachineInstr *oldInstr = &(*instrPos); - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineBasicBlock *blk = oldInstr->getParent(); - MachineInstr *newInstr = - blk->getParent()->CreateMachineInstr(tii->get(newOpcode), - DebugLoc()); - - blk->insert(instrPos, newInstr); - //assume the instruction doesn't take any reg operand ... - - SHOWNEWINSTR(newInstr); - return newInstr; - } //insertInstrBefore - - static void insertCondBranchBefore(MachineBasicBlock::iterator instrPos, - int newOpcode, - AMDGPUCFGStructurizer *passRep, - DebugLoc DL) { - MachineInstr *oldInstr = &(*instrPos); - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineBasicBlock *blk = oldInstr->getParent(); - MachineFunction *MF = blk->getParent(); - MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL); - - blk->insert(instrPos, newInstr); - MachineInstrBuilder MIB(*MF, newInstr); - MIB.addReg(oldInstr->getOperand(1).getReg(), false); - - SHOWNEWINSTR(newInstr); - //erase later oldInstr->eraseFromParent(); - } //insertCondBranchBefore - - static void insertCondBranchBefore(MachineBasicBlock *blk, - MachineBasicBlock::iterator insertPos, - int newOpcode, - AMDGPUCFGStructurizer *passRep, - RegiT regNum, - DebugLoc DL) { - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineFunction *MF = blk->getParent(); - - MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL); - - //insert before - blk->insert(insertPos, newInstr); - MachineInstrBuilder(*MF, newInstr).addReg(regNum, false); - - SHOWNEWINSTR(newInstr); - } //insertCondBranchBefore - - static void insertCondBranchEnd(MachineBasicBlock *blk, - int newOpcode, - AMDGPUCFGStructurizer *passRep, - RegiT regNum) { - const TargetInstrInfo *tii = passRep->getTargetInstrInfo(); - MachineFunction *MF = blk->getParent(); - MachineInstr *newInstr = - MF->CreateMachineInstr(tii->get(newOpcode), DebugLoc()); - - blk->push_back(newInstr); - MachineInstrBuilder(*MF, newInstr).addReg(regNum, false); - - SHOWNEWINSTR(newInstr); - } //insertCondBranchEnd - - - static void insertAssignInstrBefore(MachineBasicBlock::iterator instrPos, - AMDGPUCFGStructurizer *passRep, - RegiT regNum, int regVal) { - MachineInstr *oldInstr = &(*instrPos); - const AMDGPUInstrInfo *tii = - static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo()); - MachineBasicBlock *blk = oldInstr->getParent(); - MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum, - regVal); - blk->insert(instrPos, newInstr); - - SHOWNEWINSTR(newInstr); - } //insertAssignInstrBefore - - static void insertAssignInstrBefore(MachineBasicBlock *blk, - AMDGPUCFGStructurizer *passRep, - RegiT regNum, int regVal) { - const AMDGPUInstrInfo *tii = - static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo()); - - MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum, - regVal); - if (blk->begin() != blk->end()) { - blk->insert(blk->begin(), newInstr); - } else { - blk->push_back(newInstr); - } - - SHOWNEWINSTR(newInstr); - - } //insertInstrBefore - - static void insertCompareInstrBefore(MachineBasicBlock *blk, - MachineBasicBlock::iterator instrPos, - AMDGPUCFGStructurizer *passRep, - RegiT dstReg, RegiT src1Reg, - RegiT src2Reg) { - const AMDGPUInstrInfo *tii = - static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo()); - MachineFunction *MF = blk->getParent(); - MachineInstr *newInstr = - MF->CreateMachineInstr(tii->get(tii->getIEQOpcode()), DebugLoc()); - - MachineInstrBuilder MIB(*MF, newInstr); - MIB.addReg(dstReg, RegState::Define); //set target - MIB.addReg(src1Reg); //set src value - MIB.addReg(src2Reg); //set src value - - blk->insert(instrPos, newInstr); - SHOWNEWINSTR(newInstr); - - } //insertCompareInstrBefore +char AMDGPUCFGStructurizer::ID = 0; - static void cloneSuccessorList(MachineBasicBlock *dstBlk, - MachineBasicBlock *srcBlk) { - for (MachineBasicBlock::succ_iterator iter = srcBlk->succ_begin(), - iterEnd = srcBlk->succ_end(); iter != iterEnd; ++iter) { - dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of - } - } //cloneSuccessorList - - static MachineBasicBlock *clone(MachineBasicBlock *srcBlk) { - MachineFunction *func = srcBlk->getParent(); - MachineBasicBlock *newBlk = func->CreateMachineBasicBlock(); - func->push_back(newBlk); //insert to function - for (MachineBasicBlock::iterator iter = srcBlk->begin(), - iterEnd = srcBlk->end(); - iter != iterEnd; ++iter) { - MachineInstr *instr = func->CloneMachineInstr(iter); - newBlk->push_back(instr); - } - return newBlk; - } - - //MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose because - //the AMDGPU instruction is not recognized as terminator fix this and retire - //this routine - static void replaceInstrUseOfBlockWith(MachineBasicBlock *srcBlk, - MachineBasicBlock *oldBlk, - MachineBasicBlock *newBlk) { - MachineInstr *branchInstr = getLoopendBlockBranchInstr(srcBlk); - if (branchInstr && isCondBranch(branchInstr) && - getTrueBranch(branchInstr) == oldBlk) { - setTrueBranch(branchInstr, newBlk); - } - } - - static void wrapup(MachineBasicBlock *entryBlk) { - assert((!entryBlk->getParent()->getJumpTableInfo() - || entryBlk->getParent()->getJumpTableInfo()->isEmpty()) - && "found a jump table"); - - //collect continue right before endloop - SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> contInstr; - MachineBasicBlock::iterator pre = entryBlk->begin(); - MachineBasicBlock::iterator iterEnd = entryBlk->end(); - MachineBasicBlock::iterator iter = pre; - while (iter != iterEnd) { - if (pre->getOpcode() == AMDGPU::CONTINUE - && iter->getOpcode() == AMDGPU::ENDLOOP) { - contInstr.push_back(pre); - } - pre = iter; - ++iter; - } //end while - - //delete continue right before endloop - for (unsigned i = 0; i < contInstr.size(); ++i) { - contInstr[i]->eraseFromParent(); - } - - // TODO to fix up jump table so later phase won't be confused. if - // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but - // there isn't such an interface yet. alternatively, replace all the other - // blocks in the jump table with the entryBlk //} - - } //wrapup - - static MachineDominatorTree *getDominatorTree(AMDGPUCFGStructurizer &pass) { - return &pass.getAnalysis<MachineDominatorTree>(); - } - - static MachinePostDominatorTree* - getPostDominatorTree(AMDGPUCFGStructurizer &pass) { - return &pass.getAnalysis<MachinePostDominatorTree>(); - } - - static MachineLoopInfo *getLoopInfo(AMDGPUCFGStructurizer &pass) { - return &pass.getAnalysis<MachineLoopInfo>(); - } -}; // template class CFGStructTraits } // end anonymous namespace -// createAMDGPUCFGPreparationPass- Returns a pass -FunctionPass *llvm::createAMDGPUCFGPreparationPass(TargetMachine &tm) { - return new AMDGPUCFGPrepare(tm); -} - -bool AMDGPUCFGPrepare::runOnMachineFunction(MachineFunction &func) { - return CFGStructurizer<AMDGPUCFGStructurizer>().prepare(func, *this, - getTargetRegisterInfo()); -} -// createAMDGPUCFGStructurizerPass- Returns a pass FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) { - return new AMDGPUCFGPerform(tm); -} - -bool AMDGPUCFGPerform::runOnMachineFunction(MachineFunction &func) { - return CFGStructurizer<AMDGPUCFGStructurizer>().run(func, *this, - getTargetRegisterInfo()); + return new AMDGPUCFGStructurizer(tm); } diff --git a/lib/Target/R600/AMDILISelLowering.cpp b/lib/Target/R600/AMDILISelLowering.cpp index d669966..970787e 100644 --- a/lib/Target/R600/AMDILISelLowering.cpp +++ b/lib/Target/R600/AMDILISelLowering.cpp @@ -39,7 +39,7 @@ using namespace llvm; // TargetLowering Class Implementation Begins //===----------------------------------------------------------------------===// void AMDGPUTargetLowering::InitAMDILLowering() { - int types[] = { + static const int types[] = { (int)MVT::i8, (int)MVT::i16, (int)MVT::i32, @@ -58,19 +58,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() { (int)MVT::v2i64 }; - int IntTypes[] = { + static const int IntTypes[] = { (int)MVT::i8, (int)MVT::i16, (int)MVT::i32, (int)MVT::i64 }; - int FloatTypes[] = { + static const int FloatTypes[] = { (int)MVT::f32, (int)MVT::f64 }; - int VectorTypes[] = { + static const int VectorTypes[] = { (int)MVT::v2i8, (int)MVT::v4i8, (int)MVT::v2i16, @@ -82,10 +82,10 @@ void AMDGPUTargetLowering::InitAMDILLowering() { (int)MVT::v2f64, (int)MVT::v2i64 }; - size_t NumTypes = sizeof(types) / sizeof(*types); - size_t NumFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes); - size_t NumIntTypes = sizeof(IntTypes) / sizeof(*IntTypes); - size_t NumVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes); + const size_t NumTypes = array_lengthof(types); + const size_t NumFloatTypes = array_lengthof(FloatTypes); + const size_t NumIntTypes = array_lengthof(IntTypes); + const size_t NumVectorTypes = array_lengthof(VectorTypes); const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>(); // These are the current register classes that are diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt index 1b79bf5..fde187b 100644 --- a/lib/Target/R600/CMakeLists.txt +++ b/lib/Target/R600/CMakeLists.txt @@ -14,16 +14,16 @@ add_public_tablegen_target(AMDGPUCommonTableGen) add_llvm_target(R600CodeGen AMDILCFGStructurizer.cpp AMDILIntrinsicInfo.cpp - AMDILISelDAGToDAG.cpp AMDILISelLowering.cpp AMDGPUAsmPrinter.cpp AMDGPUFrameLowering.cpp AMDGPUIndirectAddressing.cpp + AMDGPUISelDAGToDAG.cpp AMDGPUMCInstLower.cpp AMDGPUMachineFunction.cpp AMDGPUSubtarget.cpp - AMDGPUStructurizeCFG.cpp AMDGPUTargetMachine.cpp + AMDGPUTargetTransformInfo.cpp AMDGPUISelLowering.cpp AMDGPUConvertToISA.cpp AMDGPUInstrInfo.cpp @@ -40,6 +40,7 @@ add_llvm_target(R600CodeGen R600RegisterInfo.cpp R600TextureIntrinsicsReplacer.cpp SIAnnotateControlFlow.cpp + SIFixSGPRCopies.cpp SIInsertWaits.cpp SIInstrInfo.cpp SIISelLowering.cpp @@ -48,7 +49,7 @@ add_llvm_target(R600CodeGen SIRegisterInfo.cpp ) -add_dependencies(LLVMR600CodeGen intrinsics_gen) +add_dependencies(LLVMR600CodeGen AMDGPUCommonTableGen intrinsics_gen) add_subdirectory(InstPrinter) add_subdirectory(TargetInfo) diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp index 8c814e0..fac3c39 100644 --- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp +++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp @@ -178,13 +178,13 @@ void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo, int BankSwizzle = MI->getOperand(OpNo).getImm(); switch (BankSwizzle) { case 1: - O << "BS:VEC_021"; + O << "BS:VEC_021/SCL_122"; break; case 2: - O << "BS:VEC_120"; + O << "BS:VEC_120/SCL_212"; break; case 3: - O << "BS:VEC_102"; + O << "BS:VEC_102/SCL_221"; break; case 4: O << "BS:VEC_201"; diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp index f1c44df..59136f3 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp @@ -70,11 +70,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() { SupportsDebugInformation = true; } -const char* -AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const { - return 0; -} - const MCSection* AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const { return 0; diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h index 485167b..22afd63 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h @@ -22,7 +22,6 @@ class StringRef; class AMDGPUMCAsmInfo : public MCAsmInfo { public: explicit AMDGPUMCAsmInfo(StringRef &TT); - const char* getDataASDirective(unsigned int Size, unsigned int AS) const; const MCSection* getNonexecutableStackSection(MCContext &CTX) const; }; } // namespace llvm diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp index 4d6c25c..f470783 100644 --- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp +++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp @@ -99,7 +99,9 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS, } else if (IS_VTX(Desc)) { uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups); uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset - InstWord2 |= 1 << 19; + if (!(STI.getFeatureBits() & AMDGPU::FeatureCaymanISA)) { + InstWord2 |= 1 << 19; // Mega-Fetch bit + } Emit(InstWord01, OS); Emit(InstWord2, OS); diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td index 81f407e..4631c04 100644 --- a/lib/Target/R600/Processors.td +++ b/lib/Target/R600/Processors.td @@ -48,3 +48,6 @@ def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>; def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>; def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>; def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"bonaire", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"kabini", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"kaveri", SI_Itin, [FeatureSouthernIslands]>; diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp index ab29d60..715be37 100644 --- a/lib/Target/R600/R600ControlFlowFinalizer.cpp +++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp @@ -172,7 +172,7 @@ private: } void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const { - unsigned LiteralRegs[] = { + static const unsigned LiteralRegs[] = { AMDGPU::ALU_LITERAL_X, AMDGPU::ALU_LITERAL_Y, AMDGPU::ALU_LITERAL_Z, @@ -256,6 +256,7 @@ private: ClauseContent.push_back(MILit); } } + assert(ClauseContent.size() < 128 && "ALU clause is too big"); ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1); return ClauseFile(ClauseHead, ClauseContent); } @@ -276,6 +277,7 @@ private: void EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause, unsigned &CfCount) { + Clause.first->getOperand(0).setImm(0); CounterPropagateAddr(Clause.first, CfCount); MachineBasicBlock *BB = Clause.first->getParent(); BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE)) @@ -345,6 +347,9 @@ public: MaxStack = 1; } std::vector<ClauseFile> FetchClauses, AluClauses; + std::vector<MachineInstr *> LastAlu(1); + std::vector<MachineInstr *> ToPopAfter; + for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) { if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) { @@ -355,6 +360,10 @@ public: } MachineBasicBlock::iterator MI = I; + if (MI->getOpcode() != AMDGPU::ENDIF) + LastAlu.back() = 0; + if (MI->getOpcode() == AMDGPU::CF_ALU) + LastAlu.back() = MI; I++; switch (MI->getOpcode()) { case AMDGPU::CF_ALU_PUSH_BEFORE: @@ -369,7 +378,10 @@ public: case AMDGPU::R600_ExportBuf: case AMDGPU::R600_ExportSwz: case AMDGPU::RAT_WRITE_CACHELESS_32_eg: + case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: + case AMDGPU::RAT_STORE_DWORD32_cm: + case AMDGPU::RAT_STORE_DWORD64_cm: DEBUG(dbgs() << CfCount << ":"; MI->dump();); CfCount++; break; @@ -400,6 +412,7 @@ public: break; } case AMDGPU::IF_PREDICATE_SET: { + LastAlu.push_back(0); MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP)) .addImm(0) @@ -417,7 +430,7 @@ public: MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_ELSE)) .addImm(0) - .addImm(1); + .addImm(0); DEBUG(dbgs() << CfCount << ":"; MIb->dump();); IfThenElseStack.push_back(MIb); MI->eraseFromParent(); @@ -426,31 +439,31 @@ public: } case AMDGPU::ENDIF: { CurrentStack--; + if (LastAlu.back()) { + ToPopAfter.push_back(LastAlu.back()); + } else { + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_POP)) + .addImm(CfCount + 1) + .addImm(1); + (void)MIb; + DEBUG(dbgs() << CfCount << ":"; MIb->dump();); + CfCount++; + } + MachineInstr *IfOrElseInst = IfThenElseStack.back(); IfThenElseStack.pop_back(); - CounterPropagateAddr(IfOrElseInst, CfCount + 1); - MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), - getHWInstrDesc(CF_POP)) - .addImm(CfCount + 1) - .addImm(1); - (void)MIb; - DEBUG(dbgs() << CfCount << ":"; MIb->dump();); + CounterPropagateAddr(IfOrElseInst, CfCount); + IfOrElseInst->getOperand(1).setImm(1); + LastAlu.pop_back(); MI->eraseFromParent(); - CfCount++; break; } - case AMDGPU::PREDICATED_BREAK: { - CurrentStack--; - CfCount += 3; - BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP)) - .addImm(CfCount) - .addImm(1); + case AMDGPU::BREAK: { + CfCount ++; MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_LOOP_BREAK)) .addImm(0); - BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP)) - .addImm(CfCount) - .addImm(1); LoopStack.back().second.insert(MIb); MI->eraseFromParent(); break; @@ -481,6 +494,21 @@ public: break; } } + for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) { + MachineInstr *Alu = ToPopAfter[i]; + BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu), + TII->get(AMDGPU::CF_ALU_POP_AFTER)) + .addImm(Alu->getOperand(0).getImm()) + .addImm(Alu->getOperand(1).getImm()) + .addImm(Alu->getOperand(2).getImm()) + .addImm(Alu->getOperand(3).getImm()) + .addImm(Alu->getOperand(4).getImm()) + .addImm(Alu->getOperand(5).getImm()) + .addImm(Alu->getOperand(6).getImm()) + .addImm(Alu->getOperand(7).getImm()) + .addImm(Alu->getOperand(8).getImm()); + Alu->eraseFromParent(); + } MFI->StackSize = getHWStackSize(MaxStack, HasPush); } diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h index aebe581..90fc29c 100644 --- a/lib/Target/R600/R600Defines.h +++ b/lib/Target/R600/R600Defines.h @@ -41,7 +41,10 @@ namespace R600_InstFlag { OP1 = (1 << 10), OP2 = (1 << 11), VTX_INST = (1 << 12), - TEX_INST = (1 << 13) + TEX_INST = (1 << 13), + ALU_INST = (1 << 14), + LDS_1A = (1 << 15), + LDS_1A1D = (1 << 16) }; } @@ -57,46 +60,7 @@ namespace R600_InstFlag { #define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST) #define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST) -namespace R600Operands { - enum Ops { - DST, - UPDATE_EXEC_MASK, - UPDATE_PREDICATE, - WRITE, - OMOD, - DST_REL, - CLAMP, - SRC0, - SRC0_NEG, - SRC0_REL, - SRC0_ABS, - SRC0_SEL, - SRC1, - SRC1_NEG, - SRC1_REL, - SRC1_ABS, - SRC1_SEL, - SRC2, - SRC2_NEG, - SRC2_REL, - SRC2_SEL, - LAST, - PRED_SEL, - IMM, - BANK_SWIZZLE, - COUNT - }; - - const static int ALUOpTable[3][R600Operands::COUNT] = { -// W C S S S S S S S S S S S -// R O D L S R R R R S R R R R S R R R L P -// D U I M R A R C C C C R C C C C R C C C A R I -// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M B -// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M S - {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13}, - {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20}, - {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18} - }; +namespace OpName { enum VecOps { UPDATE_EXEC_MASK_X, @@ -200,4 +164,6 @@ namespace R600Operands { #define R_028878_SQ_PGM_RESOURCES_GS 0x028878 #define R_0288D4_SQ_PGM_RESOURCES_LS 0x0288d4 +#define R_0288E8_SQ_LDS_ALLOC 0x0288E8 + #endif // R600DEFINES_H_ diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp index ff5ce5a..fac2b47 100644 --- a/lib/Target/R600/R600EmitClauseMarkers.cpp +++ b/lib/Target/R600/R600EmitClauseMarkers.cpp @@ -32,6 +32,7 @@ class R600EmitClauseMarkersPass : public MachineFunctionPass { private: static char ID; const R600InstrInfo *TII; + int Address; unsigned OccupiedDwords(MachineInstr *MI) const { switch (MI->getOpcode()) { @@ -106,7 +107,7 @@ private: bool SubstituteKCacheBank(MachineInstr *MI, std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const { std::vector<std::pair<unsigned, unsigned> > UsedKCache; - const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Consts = + const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts = TII->getSrcs(MI); assert((TII->isALUInstr(MI->getOpcode()) || MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const"); @@ -159,7 +160,7 @@ private: } MachineBasicBlock::iterator - MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { + MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) { MachineBasicBlock::iterator ClauseHead = I; std::vector<std::pair<unsigned, unsigned> > KCacheBanks; bool PushBeforeModifier = false; @@ -177,7 +178,14 @@ private: AluInstCount ++; continue; } - if (I->getOpcode() == AMDGPU::KILLGT) { + // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as: + // + // * KILL or INTERP instructions + // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits + // * Uses waterfalling (i.e. INDEX_MODE = AR.X) + // + // XXX: These checks have not been implemented yet. + if (TII->mustBeLastInClause(I->getOpcode())) { I++; break; } @@ -192,20 +200,25 @@ private: unsigned Opcode = PushBeforeModifier ? AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU; BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode)) - .addImm(0) // ADDR + // We don't use the ADDR field until R600ControlFlowFinalizer pass, where + // it is safe to assume it is 0. However if we always put 0 here, the ifcvt + // pass may assume that identical ALU clause starter at the beginning of a + // true and false branch can be factorized which is not the case. + .addImm(Address++) // ADDR .addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0 .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1 .addImm(KCacheBanks.empty()?0:2) // KM0 .addImm((KCacheBanks.size() < 2)?0:2) // KM1 .addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0 .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1 - .addImm(AluInstCount); // COUNT + .addImm(AluInstCount) // COUNT + .addImm(1); // Enabled return I; } public: R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID), - TII(0) { } + TII(0), Address(0) { } virtual bool runOnMachineFunction(MachineFunction &MF) { TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp index 40c058f..67b42d7 100644 --- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp +++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp @@ -82,28 +82,13 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { AMDGPU::ZERO); // src1 TII->addFlag(PredSet, 0, MO_FLAG_MASK); if (Flags & MO_FLAG_PUSH) { - TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1); + TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1); } else { - TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1); + TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1); } MI.eraseFromParent(); continue; } - case AMDGPU::BREAK: { - MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I, - AMDGPU::PRED_SETE_INT, - AMDGPU::PREDICATE_BIT, - AMDGPU::ZERO, - AMDGPU::ZERO); - TII->addFlag(PredSet, 0, MO_FLAG_MASK); - TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1); - - BuildMI(MBB, I, MBB.findDebugLoc(I), - TII->get(AMDGPU::PREDICATED_BREAK)) - .addReg(AMDGPU::PREDICATE_BIT); - MI.eraseFromParent(); - continue; - } case AMDGPU::INTERP_PAIR_XY: { MachineInstr *BMI; @@ -208,10 +193,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { // While not strictly necessary from hw point of view, we force // all src operands of a dot4 inst to belong to the same slot. unsigned Src0 = BMI->getOperand( - TII->getOperandIdx(Opcode, R600Operands::SRC0)) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0)) .getReg(); unsigned Src1 = BMI->getOperand( - TII->getOperandIdx(Opcode, R600Operands::SRC1)) + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1)) .getReg(); (void) Src0; (void) Src1; @@ -258,14 +243,14 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { // T0_W = CUBE T1_Y, T1_Z for (unsigned Chan = 0; Chan < 4; Chan++) { unsigned DstReg = MI.getOperand( - TII->getOperandIdx(MI, R600Operands::DST)).getReg(); + TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg(); unsigned Src0 = MI.getOperand( - TII->getOperandIdx(MI, R600Operands::SRC0)).getReg(); + TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg(); unsigned Src1 = 0; // Determine the correct source registers if (!IsCube) { - int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1); + int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1); if (Src1Idx != -1) { Src1 = MI.getOperand(Src1Idx).getReg(); } diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index 9cedadb..ce6ac89 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -16,6 +16,7 @@ #include "R600Defines.h" #include "R600InstrInfo.h" #include "R600MachineFunctionInfo.h" +#include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -26,41 +27,31 @@ using namespace llvm; R600TargetLowering::R600TargetLowering(TargetMachine &TM) : - AMDGPUTargetLowering(TM) { + AMDGPUTargetLowering(TM), + Gen(TM.getSubtarget<AMDGPUSubtarget>().getGeneration()) { addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass); + addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass); + addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass); + computeRegisterProperties(); setOperationAction(ISD::FADD, MVT::v4f32, Expand); + setOperationAction(ISD::FADD, MVT::v2f32, Expand); setOperationAction(ISD::FMUL, MVT::v4f32, Expand); + setOperationAction(ISD::FMUL, MVT::v2f32, Expand); setOperationAction(ISD::FDIV, MVT::v4f32, Expand); + setOperationAction(ISD::FDIV, MVT::v2f32, Expand); setOperationAction(ISD::FSUB, MVT::v4f32, Expand); + setOperationAction(ISD::FSUB, MVT::v2f32, Expand); + + setOperationAction(ISD::FCOS, MVT::f32, Custom); + setOperationAction(ISD::FSIN, MVT::f32, Custom); - setOperationAction(ISD::ADD, MVT::v4i32, Expand); - setOperationAction(ISD::AND, MVT::v4i32, Expand); - setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand); - setOperationAction(ISD::MUL, MVT::v2i32, Expand); - setOperationAction(ISD::MUL, MVT::v4i32, Expand); - setOperationAction(ISD::OR, MVT::v4i32, Expand); - setOperationAction(ISD::OR, MVT::v2i32, Expand); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand); - setOperationAction(ISD::SHL, MVT::v4i32, Expand); - setOperationAction(ISD::SHL, MVT::v2i32, Expand); - setOperationAction(ISD::SRL, MVT::v4i32, Expand); - setOperationAction(ISD::SRL, MVT::v2i32, Expand); - setOperationAction(ISD::SRA, MVT::v4i32, Expand); - setOperationAction(ISD::SRA, MVT::v2i32, Expand); - setOperationAction(ISD::SUB, MVT::v4i32, Expand); - setOperationAction(ISD::SUB, MVT::v2i32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand); - setOperationAction(ISD::UDIV, MVT::v4i32, Expand); - setOperationAction(ISD::UREM, MVT::v4i32, Expand); setOperationAction(ISD::SETCC, MVT::v4i32, Expand); - setOperationAction(ISD::XOR, MVT::v4i32, Expand); - setOperationAction(ISD::XOR, MVT::v2i32, Expand); + setOperationAction(ISD::SETCC, MVT::v2i32, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Expand); setOperationAction(ISD::BR_CC, MVT::f32, Expand); @@ -81,17 +72,14 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::SELECT, MVT::i32, Custom); setOperationAction(ISD::SELECT, MVT::f32, Custom); - setOperationAction(ISD::VSELECT, MVT::v4i32, Expand); - setOperationAction(ISD::VSELECT, MVT::v2i32, Expand); - // Legalize loads and stores to the private address space. setOperationAction(ISD::LOAD, MVT::i32, Custom); setOperationAction(ISD::LOAD, MVT::v2i32, Custom); setOperationAction(ISD::LOAD, MVT::v4i32, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom); + setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom); + setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom); setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom); setOperationAction(ISD::STORE, MVT::i8, Custom); setOperationAction(ISD::STORE, MVT::i32, Custom); setOperationAction(ISD::STORE, MVT::v2i32, Custom); @@ -105,6 +93,9 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setTargetDAGCombine(ISD::FP_TO_SINT); setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); setTargetDAGCombine(ISD::SELECT_CC); + setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); + + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setBooleanContents(ZeroOrNegativeOneBooleanContent); setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); @@ -156,6 +147,19 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( break; } + case AMDGPU::LDS_READ_RET: { + MachineInstrBuilder NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), + TII->get(MI->getOpcode()), + AMDGPU::OQAP); + for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) { + NewMI.addOperand(MI->getOperand(i)); + } + TII->buildDefaultInstruction(*BB, I, AMDGPU::MOV, + MI->getOperand(0).getReg(), + AMDGPU::OQAP); + break; + } + case AMDGPU::MOV_IMM_F32: TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(), MI->getOperand(1).getFPImm()->getValueAPF() @@ -168,12 +172,13 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( case AMDGPU::CONST_COPY: { MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV, MI->getOperand(0).getReg(), AMDGPU::ALU_CONST); - TII->setImmOperand(NewMI, R600Operands::SRC0_SEL, + TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel, MI->getOperand(1).getImm()); break; } case AMDGPU::RAT_WRITE_CACHELESS_32_eg: + case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: { unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; @@ -474,21 +479,24 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( //===----------------------------------------------------------------------===// SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); switch (Op.getOpcode()) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); + case ISD::FCOS: + case ISD::FSIN: return LowerTrig(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::LOAD: return LowerLOAD(Op, DAG); case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); + case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG); case ISD::INTRINSIC_VOID: { SDValue Chain = Op.getOperand(0); unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); switch (IntrinsicID) { case AMDGPUIntrinsic::AMDGPU_store_output: { - MachineFunction &MF = DAG.getMachineFunction(); - R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); MFI->LiveOuts.push_back(Reg); @@ -727,6 +735,37 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N, } } +SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { + // On hw >= R700, COS/SIN input must be between -1. and 1. + // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5) + EVT VT = Op.getValueType(); + SDValue Arg = Op.getOperand(0); + SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT, + DAG.getNode(ISD::FADD, SDLoc(Op), VT, + DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg, + DAG.getConstantFP(0.15915494309, MVT::f32)), + DAG.getConstantFP(0.5, MVT::f32))); + unsigned TrigNode; + switch (Op.getOpcode()) { + case ISD::FCOS: + TrigNode = AMDGPUISD::COS_HW; + break; + case ISD::FSIN: + TrigNode = AMDGPUISD::SIN_HW; + break; + default: + llvm_unreachable("Wrong trig opcode"); + } + SDValue TrigVal = DAG.getNode(TrigNode, SDLoc(Op), VT, + DAG.getNode(ISD::FADD, SDLoc(Op), VT, FractPart, + DAG.getConstantFP(-0.5, MVT::f32))); + if (Gen >= AMDGPUSubtarget::R700) + return TrigVal; + // On R600 hw, COS/SIN input must be between -Pi and Pi. + return DAG.getNode(ISD::FMUL, SDLoc(Op), VT, TrigVal, + DAG.getConstantFP(3.14159265359, MVT::f32)); +} + SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode( ISD::SETCC, @@ -742,7 +781,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, unsigned DwordOffset) const { unsigned ByteOffset = DwordOffset * 4; PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), - AMDGPUAS::PARAM_I_ADDRESS); + AMDGPUAS::CONSTANT_BUFFER_0); // We shouldn't be using an offset wider than 16-bits for implicit parameters. assert(isInt<16>(ByteOffset)); @@ -1099,7 +1138,13 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32)); Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); } - Result = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Slots, 4); + EVT NewVT = MVT::v4i32; + unsigned NumElements = 4; + if (VT.isVector()) { + NewVT = VT; + NumElements = VT.getVectorNumElements(); + } + Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements); } else { // non constant ptr cant be folded, keeps it as a v4f32 load Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, @@ -1121,6 +1166,30 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const return DAG.getMergeValues(MergedValues, 2, DL); } + // For most operations returning SDValue() will result int he node being + // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so + // we need to manually expand loads that may be legal in some address spaces + // and illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported + // for compute shaders, since the data is sign extended when it is uploaded + // to the buffer. Howerver SEXT loads from other addresspaces are not + // supported, so we need to expand them here. + if (LoadNode->getExtensionType() == ISD::SEXTLOAD) { + EVT MemVT = LoadNode->getMemoryVT(); + assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8)); + SDValue ShiftAmount = + DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32); + SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr, + LoadNode->getPointerInfo(), MemVT, + LoadNode->isVolatile(), + LoadNode->isNonTemporal(), + LoadNode->getAlignment()); + SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, NewLoad, ShiftAmount); + SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount); + + SDValue MergedValues[2] = { Sra, Chain }; + return DAG.getMergeValues(MergedValues, 2, DL); + } + if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { return SDValue(); } @@ -1180,31 +1249,27 @@ SDValue R600TargetLowering::LowerFormalArguments( const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { - unsigned ParamOffsetBytes = 36; - Function::const_arg_iterator FuncArg = - DAG.getMachineFunction().getFunction()->arg_begin(); - for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) { - EVT VT = Ins[i].VT; - Type *ArgType = FuncArg->getType(); - unsigned ArgSizeInBits = ArgType->isPointerTy() ? - 32 : ArgType->getPrimitiveSizeInBits(); - unsigned ArgBytes = ArgSizeInBits >> 3; - EVT ArgVT; - if (ArgSizeInBits < VT.getSizeInBits()) { - assert(!ArgType->isFloatTy() && - "Extending floating point arguments not supported yet"); - ArgVT = MVT::getIntegerVT(ArgSizeInBits); - } else { - ArgVT = VT; - } + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); + + AnalyzeFormalArguments(CCInfo, Ins); + + for (unsigned i = 0, e = Ins.size(); i < e; ++i) { + CCValAssign &VA = ArgLocs[i]; + EVT VT = VA.getLocVT(); + PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), - AMDGPUAS::PARAM_I_ADDRESS); - SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(), - DAG.getConstant(ParamOffsetBytes, MVT::i32), - MachinePointerInfo(UndefValue::get(PtrTy)), - ArgVT, false, false, ArgBytes); + AMDGPUAS::CONSTANT_BUFFER_0); + + // The first 36 bytes of the input buffer contains information about + // thread group and global sizes. + SDValue Arg = DAG.getLoad(VT, DL, Chain, + DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32), + MachinePointerInfo(UndefValue::get(PtrTy)), false, + false, false, 4); // 4 is the prefered alignment for + // the CONSTANT memory space. InVals.push_back(Arg); - ParamOffsetBytes += ArgBytes; } return Chain; } @@ -1263,6 +1328,8 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, VectorEntry.getOperand(3) }; bool isUnmovable[4] = { false, false, false, false }; + for (unsigned i = 0; i < 4; i++) + RemapSwizzle[i] = i; for (unsigned i = 0; i < 4; i++) { if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { @@ -1271,8 +1338,7 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, if (!isUnmovable[Idx]) { // Swap i and Idx std::swap(NewBldVec[Idx], NewBldVec[i]); - RemapSwizzle[Idx] = i; - RemapSwizzle[i] = Idx; + std::swap(RemapSwizzle[RemapSwizzle[Idx]], RemapSwizzle[RemapSwizzle[i]]); } isUnmovable[Idx] = true; } @@ -1355,6 +1421,61 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, break; } + + // insert_vector_elt (build_vector elt0, …, eltN), NewEltIdx, idx + // => build_vector elt0, …, NewEltIdx, …, eltN + case ISD::INSERT_VECTOR_ELT: { + SDValue InVec = N->getOperand(0); + SDValue InVal = N->getOperand(1); + SDValue EltNo = N->getOperand(2); + SDLoc dl(N); + + // If the inserted element is an UNDEF, just use the input vector. + if (InVal.getOpcode() == ISD::UNDEF) + return InVec; + + EVT VT = InVec.getValueType(); + + // If we can't generate a legal BUILD_VECTOR, exit + if (!isOperationLegal(ISD::BUILD_VECTOR, VT)) + return SDValue(); + + // Check that we know which element is being inserted + if (!isa<ConstantSDNode>(EltNo)) + return SDValue(); + unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); + + // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially + // be converted to a BUILD_VECTOR). Fill in the Ops vector with the + // vector elements. + SmallVector<SDValue, 8> Ops; + if (InVec.getOpcode() == ISD::BUILD_VECTOR) { + Ops.append(InVec.getNode()->op_begin(), + InVec.getNode()->op_end()); + } else if (InVec.getOpcode() == ISD::UNDEF) { + unsigned NElts = VT.getVectorNumElements(); + Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); + } else { + return SDValue(); + } + + // Insert the element + if (Elt < Ops.size()) { + // All the operands of BUILD_VECTOR must have the same type; + // we enforce that here. + EVT OpVT = Ops[0].getValueType(); + if (InVal.getValueType() != OpVT) + InVal = OpVT.bitsGT(InVal.getValueType()) ? + DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) : + DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal); + Ops[Elt] = InVal; + } + + // Return the new vector + return DAG.getNode(ISD::BUILD_VECTOR, dl, + VT, &Ops[0], Ops.size()); + } + // Extract_vec (Build_vector) generated by custom lowering // also needs to be customly combined case ISD::EXTRACT_VECTOR_ELT: { diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h index d4ba4c8..a033fcb 100644 --- a/lib/Target/R600/R600ISelLowering.h +++ b/lib/Target/R600/R600ISelLowering.h @@ -40,6 +40,7 @@ public: SmallVectorImpl<SDValue> &InVals) const; virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const; private: + unsigned Gen; /// Each OpenCL kernel has nine implicit parameters that are stored in the /// first nine dwords of a Vertex Buffer. These implicit parameters are /// lowered to load instructions which retreive the values from the Vertex @@ -60,6 +61,7 @@ private: SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const; SDValue stackPtrToRegIndex(SDValue Ptr, unsigned StackWidth, SelectionDAG &DAG) const; diff --git a/lib/Target/R600/R600InstrFormats.td b/lib/Target/R600/R600InstrFormats.td new file mode 100644 index 0000000..2d72404 --- /dev/null +++ b/lib/Target/R600/R600InstrFormats.td @@ -0,0 +1,490 @@ +//===-- R600InstrFormats.td - R600 Instruction Encodings ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// R600 Instruction format definitions. +// +//===----------------------------------------------------------------------===// + +class InstR600 <dag outs, dag ins, string asm, list<dag> pattern, + InstrItinClass itin> + : AMDGPUInst <outs, ins, asm, pattern> { + + field bits<64> Inst; + bit TransOnly = 0; + bit Trig = 0; + bit Op3 = 0; + bit isVector = 0; + bits<2> FlagOperandIdx = 0; + bit Op1 = 0; + bit Op2 = 0; + bit LDS_1A = 0; + bit LDS_1A1D = 0; + bit HasNativeOperands = 0; + bit VTXInst = 0; + bit TEXInst = 0; + bit ALUInst = 0; + + let Namespace = "AMDGPU"; + let OutOperandList = outs; + let InOperandList = ins; + let AsmString = asm; + let Pattern = pattern; + let Itinerary = itin; + + let TSFlags{0} = TransOnly; + let TSFlags{4} = Trig; + let TSFlags{5} = Op3; + + // Vector instructions are instructions that must fill all slots in an + // instruction group + let TSFlags{6} = isVector; + let TSFlags{8-7} = FlagOperandIdx; + let TSFlags{9} = HasNativeOperands; + let TSFlags{10} = Op1; + let TSFlags{11} = Op2; + let TSFlags{12} = VTXInst; + let TSFlags{13} = TEXInst; + let TSFlags{14} = ALUInst; + let TSFlags{15} = LDS_1A; + let TSFlags{16} = LDS_1A1D; +} + +//===----------------------------------------------------------------------===// +// ALU instructions +//===----------------------------------------------------------------------===// + +class R600_ALU_LDS_Word0 { + field bits<32> Word0; + + bits<11> src0; + bits<1> src0_rel; + bits<11> src1; + bits<1> src1_rel; + bits<3> index_mode = 0; + bits<2> pred_sel; + bits<1> last; + + bits<9> src0_sel = src0{8-0}; + bits<2> src0_chan = src0{10-9}; + bits<9> src1_sel = src1{8-0}; + bits<2> src1_chan = src1{10-9}; + + let Word0{8-0} = src0_sel; + let Word0{9} = src0_rel; + let Word0{11-10} = src0_chan; + let Word0{21-13} = src1_sel; + let Word0{22} = src1_rel; + let Word0{24-23} = src1_chan; + let Word0{28-26} = index_mode; + let Word0{30-29} = pred_sel; + let Word0{31} = last; +} + +class R600ALU_Word0 : R600_ALU_LDS_Word0 { + + bits<1> src0_neg; + bits<1> src1_neg; + + let Word0{12} = src0_neg; + let Word0{25} = src1_neg; +} + +class R600ALU_Word1 { + field bits<32> Word1; + + bits<11> dst; + bits<3> bank_swizzle; + bits<1> dst_rel; + bits<1> clamp; + + bits<7> dst_sel = dst{6-0}; + bits<2> dst_chan = dst{10-9}; + + let Word1{20-18} = bank_swizzle; + let Word1{27-21} = dst_sel; + let Word1{28} = dst_rel; + let Word1{30-29} = dst_chan; + let Word1{31} = clamp; +} + +class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{ + + bits<1> src0_abs; + bits<1> src1_abs; + bits<1> update_exec_mask; + bits<1> update_pred; + bits<1> write; + bits<2> omod; + + let Word1{0} = src0_abs; + let Word1{1} = src1_abs; + let Word1{2} = update_exec_mask; + let Word1{3} = update_pred; + let Word1{4} = write; + let Word1{6-5} = omod; + let Word1{17-7} = alu_inst; +} + +class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{ + + bits<11> src2; + bits<1> src2_rel; + bits<1> src2_neg; + + bits<9> src2_sel = src2{8-0}; + bits<2> src2_chan = src2{10-9}; + + let Word1{8-0} = src2_sel; + let Word1{9} = src2_rel; + let Word1{11-10} = src2_chan; + let Word1{12} = src2_neg; + let Word1{17-13} = alu_inst; +} + +class R600LDS_Word1 { + field bits<32> Word1; + + bits<11> src2; + bits<9> src2_sel = src2{8-0}; + bits<2> src2_chan = src2{10-9}; + bits<1> src2_rel; + // offset specifies the stride offset to the second set of data to be read + // from. This is a dword offset. + bits<5> alu_inst = 17; // OP3_INST_LDS_IDX_OP + bits<3> bank_swizzle; + bits<6> lds_op; + bits<2> dst_chan = 0; + + let Word1{8-0} = src2_sel; + let Word1{9} = src2_rel; + let Word1{11-10} = src2_chan; + let Word1{17-13} = alu_inst; + let Word1{20-18} = bank_swizzle; + let Word1{26-21} = lds_op; + let Word1{30-29} = dst_chan; +} + + +/* +XXX: R600 subtarget uses a slightly different encoding than the other +subtargets. We currently handle this in R600MCCodeEmitter, but we may +want to use these instruction classes in the future. + +class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 { + + bits<1> fog_merge; + bits<10> alu_inst; + + let Inst{37} = fog_merge; + let Inst{39-38} = omod; + let Inst{49-40} = alu_inst; +} + +class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 { + + bits<11> alu_inst; + + let Inst{38-37} = omod; + let Inst{49-39} = alu_inst; +} +*/ + +//===----------------------------------------------------------------------===// +// Vertex Fetch instructions +//===----------------------------------------------------------------------===// + +class VTX_WORD0 { + field bits<32> Word0; + bits<7> src_gpr; + bits<5> VC_INST; + bits<2> FETCH_TYPE; + bits<1> FETCH_WHOLE_QUAD; + bits<8> BUFFER_ID; + bits<1> SRC_REL; + bits<2> SRC_SEL_X; + + let Word0{4-0} = VC_INST; + let Word0{6-5} = FETCH_TYPE; + let Word0{7} = FETCH_WHOLE_QUAD; + let Word0{15-8} = BUFFER_ID; + let Word0{22-16} = src_gpr; + let Word0{23} = SRC_REL; + let Word0{25-24} = SRC_SEL_X; +} + +class VTX_WORD0_eg : VTX_WORD0 { + + bits<6> MEGA_FETCH_COUNT; + + let Word0{31-26} = MEGA_FETCH_COUNT; +} + +class VTX_WORD0_cm : VTX_WORD0 { + + bits<2> SRC_SEL_Y; + bits<2> STRUCTURED_READ; + bits<1> LDS_REQ; + bits<1> COALESCED_READ; + + let Word0{27-26} = SRC_SEL_Y; + let Word0{29-28} = STRUCTURED_READ; + let Word0{30} = LDS_REQ; + let Word0{31} = COALESCED_READ; +} + +class VTX_WORD1_GPR { + field bits<32> Word1; + bits<7> dst_gpr; + bits<1> DST_REL; + bits<3> DST_SEL_X; + bits<3> DST_SEL_Y; + bits<3> DST_SEL_Z; + bits<3> DST_SEL_W; + bits<1> USE_CONST_FIELDS; + bits<6> DATA_FORMAT; + bits<2> NUM_FORMAT_ALL; + bits<1> FORMAT_COMP_ALL; + bits<1> SRF_MODE_ALL; + + let Word1{6-0} = dst_gpr; + let Word1{7} = DST_REL; + let Word1{8} = 0; // Reserved + let Word1{11-9} = DST_SEL_X; + let Word1{14-12} = DST_SEL_Y; + let Word1{17-15} = DST_SEL_Z; + let Word1{20-18} = DST_SEL_W; + let Word1{21} = USE_CONST_FIELDS; + let Word1{27-22} = DATA_FORMAT; + let Word1{29-28} = NUM_FORMAT_ALL; + let Word1{30} = FORMAT_COMP_ALL; + let Word1{31} = SRF_MODE_ALL; +} + +//===----------------------------------------------------------------------===// +// Texture fetch instructions +//===----------------------------------------------------------------------===// + +class TEX_WORD0 { + field bits<32> Word0; + + bits<5> TEX_INST; + bits<2> INST_MOD; + bits<1> FETCH_WHOLE_QUAD; + bits<8> RESOURCE_ID; + bits<7> SRC_GPR; + bits<1> SRC_REL; + bits<1> ALT_CONST; + bits<2> RESOURCE_INDEX_MODE; + bits<2> SAMPLER_INDEX_MODE; + + let Word0{4-0} = TEX_INST; + let Word0{6-5} = INST_MOD; + let Word0{7} = FETCH_WHOLE_QUAD; + let Word0{15-8} = RESOURCE_ID; + let Word0{22-16} = SRC_GPR; + let Word0{23} = SRC_REL; + let Word0{24} = ALT_CONST; + let Word0{26-25} = RESOURCE_INDEX_MODE; + let Word0{28-27} = SAMPLER_INDEX_MODE; +} + +class TEX_WORD1 { + field bits<32> Word1; + + bits<7> DST_GPR; + bits<1> DST_REL; + bits<3> DST_SEL_X; + bits<3> DST_SEL_Y; + bits<3> DST_SEL_Z; + bits<3> DST_SEL_W; + bits<7> LOD_BIAS; + bits<1> COORD_TYPE_X; + bits<1> COORD_TYPE_Y; + bits<1> COORD_TYPE_Z; + bits<1> COORD_TYPE_W; + + let Word1{6-0} = DST_GPR; + let Word1{7} = DST_REL; + let Word1{11-9} = DST_SEL_X; + let Word1{14-12} = DST_SEL_Y; + let Word1{17-15} = DST_SEL_Z; + let Word1{20-18} = DST_SEL_W; + let Word1{27-21} = LOD_BIAS; + let Word1{28} = COORD_TYPE_X; + let Word1{29} = COORD_TYPE_Y; + let Word1{30} = COORD_TYPE_Z; + let Word1{31} = COORD_TYPE_W; +} + +class TEX_WORD2 { + field bits<32> Word2; + + bits<5> OFFSET_X; + bits<5> OFFSET_Y; + bits<5> OFFSET_Z; + bits<5> SAMPLER_ID; + bits<3> SRC_SEL_X; + bits<3> SRC_SEL_Y; + bits<3> SRC_SEL_Z; + bits<3> SRC_SEL_W; + + let Word2{4-0} = OFFSET_X; + let Word2{9-5} = OFFSET_Y; + let Word2{14-10} = OFFSET_Z; + let Word2{19-15} = SAMPLER_ID; + let Word2{22-20} = SRC_SEL_X; + let Word2{25-23} = SRC_SEL_Y; + let Word2{28-26} = SRC_SEL_Z; + let Word2{31-29} = SRC_SEL_W; +} + +//===----------------------------------------------------------------------===// +// Control Flow Instructions +//===----------------------------------------------------------------------===// + +class CF_WORD1_R600 { + field bits<32> Word1; + + bits<3> POP_COUNT; + bits<5> CF_CONST; + bits<2> COND; + bits<3> COUNT; + bits<6> CALL_COUNT; + bits<1> COUNT_3; + bits<1> END_OF_PROGRAM; + bits<1> VALID_PIXEL_MODE; + bits<7> CF_INST; + bits<1> WHOLE_QUAD_MODE; + bits<1> BARRIER; + + let Word1{2-0} = POP_COUNT; + let Word1{7-3} = CF_CONST; + let Word1{9-8} = COND; + let Word1{12-10} = COUNT; + let Word1{18-13} = CALL_COUNT; + let Word1{19} = COUNT_3; + let Word1{21} = END_OF_PROGRAM; + let Word1{22} = VALID_PIXEL_MODE; + let Word1{29-23} = CF_INST; + let Word1{30} = WHOLE_QUAD_MODE; + let Word1{31} = BARRIER; +} + +class CF_WORD0_EG { + field bits<32> Word0; + + bits<24> ADDR; + bits<3> JUMPTABLE_SEL; + + let Word0{23-0} = ADDR; + let Word0{26-24} = JUMPTABLE_SEL; +} + +class CF_WORD1_EG { + field bits<32> Word1; + + bits<3> POP_COUNT; + bits<5> CF_CONST; + bits<2> COND; + bits<6> COUNT; + bits<1> VALID_PIXEL_MODE; + bits<1> END_OF_PROGRAM; + bits<8> CF_INST; + bits<1> BARRIER; + + let Word1{2-0} = POP_COUNT; + let Word1{7-3} = CF_CONST; + let Word1{9-8} = COND; + let Word1{15-10} = COUNT; + let Word1{20} = VALID_PIXEL_MODE; + let Word1{21} = END_OF_PROGRAM; + let Word1{29-22} = CF_INST; + let Word1{31} = BARRIER; +} + +class CF_ALU_WORD0 { + field bits<32> Word0; + + bits<22> ADDR; + bits<4> KCACHE_BANK0; + bits<4> KCACHE_BANK1; + bits<2> KCACHE_MODE0; + + let Word0{21-0} = ADDR; + let Word0{25-22} = KCACHE_BANK0; + let Word0{29-26} = KCACHE_BANK1; + let Word0{31-30} = KCACHE_MODE0; +} + +class CF_ALU_WORD1 { + field bits<32> Word1; + + bits<2> KCACHE_MODE1; + bits<8> KCACHE_ADDR0; + bits<8> KCACHE_ADDR1; + bits<7> COUNT; + bits<1> ALT_CONST; + bits<4> CF_INST; + bits<1> WHOLE_QUAD_MODE; + bits<1> BARRIER; + + let Word1{1-0} = KCACHE_MODE1; + let Word1{9-2} = KCACHE_ADDR0; + let Word1{17-10} = KCACHE_ADDR1; + let Word1{24-18} = COUNT; + let Word1{25} = ALT_CONST; + let Word1{29-26} = CF_INST; + let Word1{30} = WHOLE_QUAD_MODE; + let Word1{31} = BARRIER; +} + +class CF_ALLOC_EXPORT_WORD0_RAT { + field bits<32> Word0; + + bits<4> rat_id; + bits<6> rat_inst; + bits<2> rim; + bits<2> type; + bits<7> rw_gpr; + bits<1> rw_rel; + bits<7> index_gpr; + bits<2> elem_size; + + let Word0{3-0} = rat_id; + let Word0{9-4} = rat_inst; + let Word0{10} = 0; // Reserved + let Word0{12-11} = rim; + let Word0{14-13} = type; + let Word0{21-15} = rw_gpr; + let Word0{22} = rw_rel; + let Word0{29-23} = index_gpr; + let Word0{31-30} = elem_size; +} + +class CF_ALLOC_EXPORT_WORD1_BUF { + field bits<32> Word1; + + bits<12> array_size; + bits<4> comp_mask; + bits<4> burst_count; + bits<1> vpm; + bits<1> eop; + bits<8> cf_inst; + bits<1> mark; + bits<1> barrier; + + let Word1{11-0} = array_size; + let Word1{15-12} = comp_mask; + let Word1{19-16} = burst_count; + let Word1{20} = vpm; + let Word1{21} = eop; + let Word1{29-22} = cf_inst; + let Word1{30} = mark; + let Word1{31} = barrier; +} diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp index 4f5cfcd..4e7eff9 100644 --- a/lib/Target/R600/R600InstrInfo.cpp +++ b/lib/Target/R600/R600InstrInfo.cpp @@ -51,9 +51,17 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - if (AMDGPU::R600_Reg128RegClass.contains(DestReg) - && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) { - for (unsigned I = 0; I < 4; I++) { + unsigned VectorComponents = 0; + if (AMDGPU::R600_Reg128RegClass.contains(DestReg) && + AMDGPU::R600_Reg128RegClass.contains(SrcReg)) { + VectorComponents = 4; + } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) && + AMDGPU::R600_Reg64RegClass.contains(SrcReg)) { + VectorComponents = 2; + } + + if (VectorComponents > 0) { + for (unsigned I = 0; I < VectorComponents; I++) { unsigned SubRegIndex = RI.getSubRegFromChannel(I); buildDefaultInstruction(MBB, MI, AMDGPU::MOV, RI.getSubReg(DestReg, SubRegIndex), @@ -62,14 +70,9 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, RegState::Define | RegState::Implicit); } } else { - - // We can't copy vec4 registers - assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg) - && !AMDGPU::R600_Reg128RegClass.contains(SrcReg)); - MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, DestReg, SrcReg); - NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0)) + NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) .setIsKill(KillSrc); } } @@ -114,9 +117,7 @@ bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { } bool R600InstrInfo::isReductionOp(unsigned Opcode) const { - switch(Opcode) { - default: return false; - } + return false; } bool R600InstrInfo::isCubeOp(unsigned Opcode) const { @@ -133,11 +134,24 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const { bool R600InstrInfo::isALUInstr(unsigned Opcode) const { unsigned TargetFlags = get(Opcode).TSFlags; + return (TargetFlags & R600_InstFlag::ALU_INST); +} + +bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const { + unsigned TargetFlags = get(Opcode).TSFlags; + return ((TargetFlags & R600_InstFlag::OP1) | (TargetFlags & R600_InstFlag::OP2) | (TargetFlags & R600_InstFlag::OP3)); } +bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { + unsigned TargetFlags = get(Opcode).TSFlags; + + return ((TargetFlags & R600_InstFlag::LDS_1A) | + (TargetFlags & R600_InstFlag::LDS_1A1D)); +} + bool R600InstrInfo::isTransOnly(unsigned Opcode) const { return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY); } @@ -165,27 +179,75 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { usesTextureCache(MI->getOpcode()); } +bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const { + switch (Opcode) { + case AMDGPU::KILLGT: + case AMDGPU::GROUP_BARRIER: + return true; + default: + return false; + } +} + +int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const { + static const unsigned OpTable[] = { + AMDGPU::OpName::src0, + AMDGPU::OpName::src1, + AMDGPU::OpName::src2 + }; + + assert (SrcNum < 3); + return getOperandIdx(Opcode, OpTable[SrcNum]); +} + +#define SRC_SEL_ROWS 11 +int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { + static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = { + {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, + {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, + {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, + {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, + {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, + {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, + {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, + {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, + {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, + {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, + {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W} + }; + + for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) { + if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) { + return getOperandIdx(Opcode, SrcSelTable[i][1]); + } + } + return -1; +} +#undef SRC_SEL_ROWS + SmallVector<std::pair<MachineOperand *, int64_t>, 3> R600InstrInfo::getSrcs(MachineInstr *MI) const { SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; if (MI->getOpcode() == AMDGPU::DOT_4) { - static const R600Operands::VecOps OpTable[8][2] = { - {R600Operands::SRC0_X, R600Operands::SRC0_SEL_X}, - {R600Operands::SRC0_Y, R600Operands::SRC0_SEL_Y}, - {R600Operands::SRC0_Z, R600Operands::SRC0_SEL_Z}, - {R600Operands::SRC0_W, R600Operands::SRC0_SEL_W}, - {R600Operands::SRC1_X, R600Operands::SRC1_SEL_X}, - {R600Operands::SRC1_Y, R600Operands::SRC1_SEL_Y}, - {R600Operands::SRC1_Z, R600Operands::SRC1_SEL_Z}, - {R600Operands::SRC1_W, R600Operands::SRC1_SEL_W}, + static const unsigned OpTable[8][2] = { + {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, + {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, + {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, + {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, + {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, + {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, + {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, + {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}, }; for (unsigned j = 0; j < 8; j++) { - MachineOperand &MO = MI->getOperand(OpTable[j][0] + 1); + MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), + OpTable[j][0])); unsigned Reg = MO.getReg(); if (Reg == AMDGPU::ALU_CONST) { - unsigned Sel = MI->getOperand(OpTable[j][1] + 1).getImm(); + unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(), + OpTable[j][1])).getImm(); Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); continue; } @@ -194,10 +256,10 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { return Result; } - static const R600Operands::Ops OpTable[3][2] = { - {R600Operands::SRC0, R600Operands::SRC0_SEL}, - {R600Operands::SRC1, R600Operands::SRC1_SEL}, - {R600Operands::SRC2, R600Operands::SRC2_SEL}, + static const unsigned OpTable[3][2] = { + {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, + {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, + {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, }; for (unsigned j = 0; j < 3; j++) { @@ -214,7 +276,7 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { } if (Reg == AMDGPU::ALU_LITERAL_X) { unsigned Imm = MI->getOperand( - getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm(); + getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm(); Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm)); continue; } @@ -225,8 +287,9 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { std::vector<std::pair<int, unsigned> > R600InstrInfo::ExtractSrcs(MachineInstr *MI, - const DenseMap<unsigned, unsigned> &PV) - const { + const DenseMap<unsigned, unsigned> &PV, + unsigned &ConstCount) const { + ConstCount = 0; const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI); const std::pair<int, unsigned> DummyPair(-1, 0); std::vector<std::pair<int, unsigned> > Result; @@ -234,15 +297,20 @@ R600InstrInfo::ExtractSrcs(MachineInstr *MI, for (unsigned n = Srcs.size(); i < n; ++i) { unsigned Reg = Srcs[i].first->getReg(); unsigned Index = RI.getEncodingValue(Reg) & 0xff; - unsigned Chan = RI.getHWRegChan(Reg); - if (Index > 127) { - Result.push_back(DummyPair); + if (Reg == AMDGPU::OQAP) { + Result.push_back(std::pair<int, unsigned>(Index, 0)); + } + if (PV.find(Reg) != PV.end()) { + // 255 is used to tells its a PS/PV reg + Result.push_back(std::pair<int, unsigned>(255, 0)); continue; } - if (PV.find(Index) != PV.end()) { + if (Index > 127) { + ConstCount++; Result.push_back(DummyPair); continue; } + unsigned Chan = RI.getHWRegChan(Reg); Result.push_back(std::pair<int, unsigned>(Index, Chan)); } for (; i < 3; ++i) @@ -254,15 +322,15 @@ static std::vector<std::pair<int, unsigned> > Swizzle(std::vector<std::pair<int, unsigned> > Src, R600InstrInfo::BankSwizzle Swz) { switch (Swz) { - case R600InstrInfo::ALU_VEC_012: + case R600InstrInfo::ALU_VEC_012_SCL_210: break; - case R600InstrInfo::ALU_VEC_021: + case R600InstrInfo::ALU_VEC_021_SCL_122: std::swap(Src[1], Src[2]); break; - case R600InstrInfo::ALU_VEC_102: + case R600InstrInfo::ALU_VEC_102_SCL_221: std::swap(Src[0], Src[1]); break; - case R600InstrInfo::ALU_VEC_120: + case R600InstrInfo::ALU_VEC_120_SCL_212: std::swap(Src[0], Src[1]); std::swap(Src[0], Src[2]); break; @@ -277,66 +345,182 @@ Swizzle(std::vector<std::pair<int, unsigned> > Src, return Src; } -static bool -isLegal(const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, +static unsigned +getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) { + switch (Swz) { + case R600InstrInfo::ALU_VEC_012_SCL_210: { + unsigned Cycles[3] = { 2, 1, 0}; + return Cycles[Op]; + } + case R600InstrInfo::ALU_VEC_021_SCL_122: { + unsigned Cycles[3] = { 1, 2, 2}; + return Cycles[Op]; + } + case R600InstrInfo::ALU_VEC_120_SCL_212: { + unsigned Cycles[3] = { 2, 1, 2}; + return Cycles[Op]; + } + case R600InstrInfo::ALU_VEC_102_SCL_221: { + unsigned Cycles[3] = { 2, 2, 1}; + return Cycles[Op]; + } + default: + llvm_unreachable("Wrong Swizzle for Trans Slot"); + return 0; + } +} + +/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed +/// in the same Instruction Group while meeting read port limitations given a +/// Swz swizzle sequence. +unsigned R600InstrInfo::isLegalUpTo( + const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, const std::vector<R600InstrInfo::BankSwizzle> &Swz, - unsigned CheckedSize) { + const std::vector<std::pair<int, unsigned> > &TransSrcs, + R600InstrInfo::BankSwizzle TransSwz) const { int Vector[4][3]; memset(Vector, -1, sizeof(Vector)); - for (unsigned i = 0; i < CheckedSize; i++) { + for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) { const std::vector<std::pair<int, unsigned> > &Srcs = Swizzle(IGSrcs[i], Swz[i]); for (unsigned j = 0; j < 3; j++) { const std::pair<int, unsigned> &Src = Srcs[j]; - if (Src.first < 0) + if (Src.first < 0 || Src.first == 255) continue; + if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) { + if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 && + Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) { + // The value from output queue A (denoted by register OQAP) can + // only be fetched during the first cycle. + return false; + } + // OQAP does not count towards the normal read port restrictions + continue; + } if (Vector[Src.second][j] < 0) Vector[Src.second][j] = Src.first; if (Vector[Src.second][j] != Src.first) - return false; + return i; } } - return true; + // Now check Trans Alu + for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) { + const std::pair<int, unsigned> &Src = TransSrcs[i]; + unsigned Cycle = getTransSwizzle(TransSwz, i); + if (Src.first < 0) + continue; + if (Src.first == 255) + continue; + if (Vector[Src.second][Cycle] < 0) + Vector[Src.second][Cycle] = Src.first; + if (Vector[Src.second][Cycle] != Src.first) + return IGSrcs.size() - 1; + } + return IGSrcs.size(); } -static bool recursiveFitsFPLimitation( -const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, -std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, -unsigned Depth = 0) { - if (!isLegal(IGSrcs, SwzCandidate, Depth)) +/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next +/// (in lexicographic term) swizzle sequence assuming that all swizzles after +/// Idx can be skipped +static bool +NextPossibleSolution( + std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, + unsigned Idx) { + assert(Idx < SwzCandidate.size()); + int ResetIdx = Idx; + while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210) + ResetIdx --; + for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) { + SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210; + } + if (ResetIdx == -1) return false; - if (IGSrcs.size() == Depth) - return true; - unsigned i = SwzCandidate[Depth]; - for (; i < 6; i++) { - SwzCandidate[Depth] = (R600InstrInfo::BankSwizzle) i; - if (recursiveFitsFPLimitation(IGSrcs, SwzCandidate, Depth + 1)) + int NextSwizzle = SwzCandidate[ResetIdx] + 1; + SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle; + return true; +} + +/// Enumerate all possible Swizzle sequence to find one that can meet all +/// read port requirements. +bool R600InstrInfo::FindSwizzleForVectorSlot( + const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, + std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, + const std::vector<std::pair<int, unsigned> > &TransSrcs, + R600InstrInfo::BankSwizzle TransSwz) const { + unsigned ValidUpTo = 0; + do { + ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz); + if (ValidUpTo == IGSrcs.size()) return true; - } - SwzCandidate[Depth] = R600InstrInfo::ALU_VEC_012; + } while (NextPossibleSolution(SwzCandidate, ValidUpTo)); return false; } +/// Instructions in Trans slot can't read gpr at cycle 0 if they also read +/// a const, and can't read a gpr at cycle 1 if they read 2 const. +static bool +isConstCompatible(R600InstrInfo::BankSwizzle TransSwz, + const std::vector<std::pair<int, unsigned> > &TransOps, + unsigned ConstCount) { + for (unsigned i = 0, e = TransOps.size(); i < e; ++i) { + const std::pair<int, unsigned> &Src = TransOps[i]; + unsigned Cycle = getTransSwizzle(TransSwz, i); + if (Src.first < 0) + continue; + if (ConstCount > 0 && Cycle == 0) + return false; + if (ConstCount > 1 && Cycle == 1) + return false; + } + return true; +} + bool R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, - const DenseMap<unsigned, unsigned> &PV, - std::vector<BankSwizzle> &ValidSwizzle) + const DenseMap<unsigned, unsigned> &PV, + std::vector<BankSwizzle> &ValidSwizzle, + bool isLastAluTrans) const { //Todo : support shared src0 - src1 operand std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs; ValidSwizzle.clear(); + unsigned ConstCount; + BankSwizzle TransBS = ALU_VEC_012_SCL_210; for (unsigned i = 0, e = IG.size(); i < e; ++i) { - IGSrcs.push_back(ExtractSrcs(IG[i], PV)); + IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount)); unsigned Op = getOperandIdx(IG[i]->getOpcode(), - R600Operands::BANK_SWIZZLE); + AMDGPU::OpName::bank_swizzle); ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) IG[i]->getOperand(Op).getImm()); } - bool Result = recursiveFitsFPLimitation(IGSrcs, ValidSwizzle); - if (!Result) - return false; - return true; + std::vector<std::pair<int, unsigned> > TransOps; + if (!isLastAluTrans) + return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS); + + TransOps = IGSrcs.back(); + IGSrcs.pop_back(); + ValidSwizzle.pop_back(); + + static const R600InstrInfo::BankSwizzle TransSwz[] = { + ALU_VEC_012_SCL_210, + ALU_VEC_021_SCL_122, + ALU_VEC_120_SCL_212, + ALU_VEC_102_SCL_221 + }; + for (unsigned i = 0; i < 4; i++) { + TransBS = TransSwz[i]; + if (!isConstCompatible(TransBS, TransOps, ConstCount)) + continue; + bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, + TransBS); + if (Result) { + ValidSwizzle.push_back(TransBS); + return true; + } + } + + return false; } @@ -366,18 +550,24 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) } bool -R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const { +R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs) + const { std::vector<unsigned> Consts; + SmallSet<int64_t, 4> Literals; for (unsigned i = 0, n = MIs.size(); i < n; i++) { MachineInstr *MI = MIs[i]; if (!isALUInstr(MI->getOpcode())) continue; - const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs = + const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs = getSrcs(MI); for (unsigned j = 0, e = Srcs.size(); j < e; j++) { std::pair<MachineOperand *, unsigned> Src = Srcs[j]; + if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X) + Literals.insert(Src.second); + if (Literals.size() > 4) + return false; if (Src.first->getReg() == AMDGPU::ALU_CONST) Consts.push_back(Src.second); if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) || @@ -503,6 +693,17 @@ int R600InstrInfo::getBranchInstr(const MachineOperand &op) const { }; } +static +MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { + for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend(); + It != E; ++It) { + if (It->getOpcode() == AMDGPU::CF_ALU || + It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE) + return llvm::prior(It.base()); + } + return MBB.end(); +} + unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, @@ -524,6 +725,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) .addMBB(TBB) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); + MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); + if (CfAlu == MBB.end()) + return 1; + assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); + CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); return 1; } } else { @@ -535,6 +741,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, .addMBB(TBB) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB); + MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); + if (CfAlu == MBB.end()) + return 2; + assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); + CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); return 2; } } @@ -558,6 +769,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); clearFlag(predSet, 0, MO_FLAG_PUSH); I->eraseFromParent(); + MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); + if (CfAlu == MBB.end()) + break; + assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); + CfAlu->setDesc(get(AMDGPU::CF_ALU)); break; } case AMDGPU::JUMP: @@ -578,6 +794,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); clearFlag(predSet, 0, MO_FLAG_PUSH); I->eraseFromParent(); + MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); + if (CfAlu == MBB.end()) + break; + assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); + CfAlu->setDesc(get(AMDGPU::CF_ALU)); break; } case AMDGPU::JUMP: @@ -612,6 +833,15 @@ R600InstrInfo::isPredicable(MachineInstr *MI) const { if (MI->getOpcode() == AMDGPU::KILLGT) { return false; + } else if (MI->getOpcode() == AMDGPU::CF_ALU) { + // If the clause start in the middle of MBB then the MBB has more + // than a single clause, unable to predicate several clauses. + if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI)) + return false; + // TODO: We don't support KC merging atm + if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0) + return false; + return true; } else if (isVector(*MI)) { return false; } else { @@ -707,6 +937,11 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI, const SmallVectorImpl<MachineOperand> &Pred) const { int PIdx = MI->findFirstPredOperandIdx(); + if (MI->getOpcode() == AMDGPU::CF_ALU) { + MI->getOperand(8).setImm(0); + return true; + } + if (PIdx != -1) { MachineOperand &PMO = MI->getOperand(PIdx); PMO.setReg(Pred[2].getReg()); @@ -812,13 +1047,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, AMDGPU::AR_X, OffsetReg); - setImmOperand(MOVA, R600Operands::WRITE, 0); + setImmOperand(MOVA, AMDGPU::OpName::write, 0); MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, AddrReg, ValueReg) .addReg(AMDGPU::AR_X, RegState::Implicit | RegState::Kill); - setImmOperand(Mov, R600Operands::DST_REL, 1); + setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1); return Mov; } @@ -830,13 +1065,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, AMDGPU::AR_X, OffsetReg); - setImmOperand(MOVA, R600Operands::WRITE, 0); + setImmOperand(MOVA, AMDGPU::OpName::write, 0); MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, ValueReg, AddrReg) .addReg(AMDGPU::AR_X, RegState::Implicit | RegState::Kill); - setImmOperand(Mov, R600Operands::SRC0_REL, 1); + setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1); return Mov; } @@ -892,7 +1127,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB #define OPERAND_CASE(Label) \ case Label: { \ - static const R600Operands::VecOps Ops[] = \ + static const unsigned Ops[] = \ { \ Label##_X, \ Label##_Y, \ @@ -902,26 +1137,25 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB return Ops[Slot]; \ } -static R600Operands::VecOps -getSlotedOps(R600Operands::Ops Op, unsigned Slot) { +static unsigned getSlotedOps(unsigned Op, unsigned Slot) { switch (Op) { - OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK) - OPERAND_CASE(R600Operands::UPDATE_PREDICATE) - OPERAND_CASE(R600Operands::WRITE) - OPERAND_CASE(R600Operands::OMOD) - OPERAND_CASE(R600Operands::DST_REL) - OPERAND_CASE(R600Operands::CLAMP) - OPERAND_CASE(R600Operands::SRC0) - OPERAND_CASE(R600Operands::SRC0_NEG) - OPERAND_CASE(R600Operands::SRC0_REL) - OPERAND_CASE(R600Operands::SRC0_ABS) - OPERAND_CASE(R600Operands::SRC0_SEL) - OPERAND_CASE(R600Operands::SRC1) - OPERAND_CASE(R600Operands::SRC1_NEG) - OPERAND_CASE(R600Operands::SRC1_REL) - OPERAND_CASE(R600Operands::SRC1_ABS) - OPERAND_CASE(R600Operands::SRC1_SEL) - OPERAND_CASE(R600Operands::PRED_SEL) + OPERAND_CASE(AMDGPU::OpName::update_exec_mask) + OPERAND_CASE(AMDGPU::OpName::update_pred) + OPERAND_CASE(AMDGPU::OpName::write) + OPERAND_CASE(AMDGPU::OpName::omod) + OPERAND_CASE(AMDGPU::OpName::dst_rel) + OPERAND_CASE(AMDGPU::OpName::clamp) + OPERAND_CASE(AMDGPU::OpName::src0) + OPERAND_CASE(AMDGPU::OpName::src0_neg) + OPERAND_CASE(AMDGPU::OpName::src0_rel) + OPERAND_CASE(AMDGPU::OpName::src0_abs) + OPERAND_CASE(AMDGPU::OpName::src0_sel) + OPERAND_CASE(AMDGPU::OpName::src1) + OPERAND_CASE(AMDGPU::OpName::src1_neg) + OPERAND_CASE(AMDGPU::OpName::src1_rel) + OPERAND_CASE(AMDGPU::OpName::src1_abs) + OPERAND_CASE(AMDGPU::OpName::src1_sel) + OPERAND_CASE(AMDGPU::OpName::pred_sel) default: llvm_unreachable("Wrong Operand"); } @@ -929,12 +1163,6 @@ getSlotedOps(R600Operands::Ops Op, unsigned Slot) { #undef OPERAND_CASE -static int -getVecOperandIdx(R600Operands::VecOps Op) { - return 1 + Op; -} - - MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) const { @@ -947,31 +1175,31 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( Opcode = AMDGPU::DOT4_eg; MachineBasicBlock::iterator I = MI; MachineOperand &Src0 = MI->getOperand( - getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot))); MachineOperand &Src1 = MI->getOperand( - getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot))); MachineInstr *MIB = buildDefaultInstruction( MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); - static const R600Operands::Ops Operands[14] = { - R600Operands::UPDATE_EXEC_MASK, - R600Operands::UPDATE_PREDICATE, - R600Operands::WRITE, - R600Operands::OMOD, - R600Operands::DST_REL, - R600Operands::CLAMP, - R600Operands::SRC0_NEG, - R600Operands::SRC0_REL, - R600Operands::SRC0_ABS, - R600Operands::SRC0_SEL, - R600Operands::SRC1_NEG, - R600Operands::SRC1_REL, - R600Operands::SRC1_ABS, - R600Operands::SRC1_SEL, + static const unsigned Operands[14] = { + AMDGPU::OpName::update_exec_mask, + AMDGPU::OpName::update_pred, + AMDGPU::OpName::write, + AMDGPU::OpName::omod, + AMDGPU::OpName::dst_rel, + AMDGPU::OpName::clamp, + AMDGPU::OpName::src0_neg, + AMDGPU::OpName::src0_rel, + AMDGPU::OpName::src0_abs, + AMDGPU::OpName::src0_sel, + AMDGPU::OpName::src1_neg, + AMDGPU::OpName::src1_rel, + AMDGPU::OpName::src1_abs, + AMDGPU::OpName::src1_sel, }; for (unsigned i = 0; i < 14; i++) { MachineOperand &MO = MI->getOperand( - getVecOperandIdx(getSlotedOps(Operands[i], Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot))); assert (MO.isImm()); setImmOperand(MIB, Operands[i], MO.getImm()); } @@ -985,56 +1213,19 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, uint64_t Imm) const { MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, AMDGPU::ALU_LITERAL_X); - setImmOperand(MovImm, R600Operands::IMM, Imm); + setImmOperand(MovImm, AMDGPU::OpName::literal, Imm); return MovImm; } -int R600InstrInfo::getOperandIdx(const MachineInstr &MI, - R600Operands::Ops Op) const { +int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { return getOperandIdx(MI.getOpcode(), Op); } -int R600InstrInfo::getOperandIdx(const MachineInstr &MI, - R600Operands::VecOps Op) const { - return getOperandIdx(MI.getOpcode(), Op); -} - -int R600InstrInfo::getOperandIdx(unsigned Opcode, - R600Operands::Ops Op) const { - unsigned TargetFlags = get(Opcode).TSFlags; - unsigned OpTableIdx; - - if (!HAS_NATIVE_OPERANDS(TargetFlags)) { - switch (Op) { - case R600Operands::DST: return 0; - case R600Operands::SRC0: return 1; - case R600Operands::SRC1: return 2; - case R600Operands::SRC2: return 3; - default: - assert(!"Unknown operand type for instruction"); - return -1; - } - } - - if (TargetFlags & R600_InstFlag::OP1) { - OpTableIdx = 0; - } else if (TargetFlags & R600_InstFlag::OP2) { - OpTableIdx = 1; - } else { - assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined " - "for this instruction"); - OpTableIdx = 2; - } - - return R600Operands::ALUOpTable[OpTableIdx][Op]; -} - -int R600InstrInfo::getOperandIdx(unsigned Opcode, - R600Operands::VecOps Op) const { - return Op + 1; +int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { + return AMDGPU::getNamedOperandIdx(Opcode, Op); } -void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op, +void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const { int Idx = getOperandIdx(*MI, Op); assert(Idx != -1 && "Operand not supported for this instruction."); @@ -1062,20 +1253,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; switch (Flag) { case MO_FLAG_CLAMP: - FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp); break; case MO_FLAG_MASK: - FlagIndex = getOperandIdx(*MI, R600Operands::WRITE); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write); break; case MO_FLAG_NOT_LAST: case MO_FLAG_LAST: - FlagIndex = getOperandIdx(*MI, R600Operands::LAST); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last); break; case MO_FLAG_NEG: switch (SrcIdx) { - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break; - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break; - case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break; + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break; + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break; + case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break; } break; @@ -1084,8 +1275,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, "instructions."); (void)IsOP3; switch (SrcIdx) { - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break; - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break; + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break; + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break; } break; diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h index 6a11c63..cdaa2fb 100644 --- a/lib/Target/R600/R600InstrInfo.h +++ b/lib/Target/R600/R600InstrInfo.h @@ -36,14 +36,14 @@ namespace llvm { int getBranchInstr(const MachineOperand &op) const; std::vector<std::pair<int, unsigned> > - ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV) const; + ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const; public: enum BankSwizzle { - ALU_VEC_012 = 0, - ALU_VEC_021, - ALU_VEC_120, - ALU_VEC_102, + ALU_VEC_012_SCL_210 = 0, + ALU_VEC_021_SCL_122, + ALU_VEC_120_SCL_212, + ALU_VEC_102_SCL_221, ALU_VEC_201, ALU_VEC_210 }; @@ -63,6 +63,8 @@ namespace llvm { /// \returns true if this \p Opcode represents an ALU instruction. bool isALUInstr(unsigned Opcode) const; + bool hasInstrModifiers(unsigned Opcode) const; + bool isLDSInstr(unsigned Opcode) const; bool isTransOnly(unsigned Opcode) const; bool isTransOnly(const MachineInstr *MI) const; @@ -72,6 +74,15 @@ namespace llvm { bool usesTextureCache(unsigned Opcode) const; bool usesTextureCache(const MachineInstr *MI) const; + bool mustBeLastInClause(unsigned Opcode) const; + + /// \returns The operand index for the given source number. Legal values + /// for SrcNum are 0, 1, and 2. + int getSrcIdx(unsigned Opcode, unsigned SrcNum) const; + /// \returns The operand Index for the Sel operand given an index to one + /// of the instruction's src operands. + int getSelIdx(unsigned Opcode, unsigned SrcIdx) const; + /// \returns a pair for each src of an ALU instructions. /// The first member of a pair is the register id. /// If register is ALU_CONST, second member is SEL. @@ -80,17 +91,38 @@ namespace llvm { SmallVector<std::pair<MachineOperand *, int64_t>, 3> getSrcs(MachineInstr *MI) const; + unsigned isLegalUpTo( + const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, + const std::vector<R600InstrInfo::BankSwizzle> &Swz, + const std::vector<std::pair<int, unsigned> > &TransSrcs, + R600InstrInfo::BankSwizzle TransSwz) const; + + bool FindSwizzleForVectorSlot( + const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, + std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, + const std::vector<std::pair<int, unsigned> > &TransSrcs, + R600InstrInfo::BankSwizzle TransSwz) const; + /// Given the order VEC_012 < VEC_021 < VEC_120 < VEC_102 < VEC_201 < VEC_210 /// returns true and the first (in lexical order) BankSwizzle affectation /// starting from the one already provided in the Instruction Group MIs that /// fits Read Port limitations in BS if available. Otherwise returns false /// and undefined content in BS. + /// isLastAluTrans should be set if the last Alu of MIs will be executed on + /// Trans ALU. In this case, ValidTSwizzle returns the BankSwizzle value to + /// apply to the last instruction. /// PV holds GPR to PV registers in the Instruction Group MIs. bool fitsReadPortLimitations(const std::vector<MachineInstr *> &MIs, const DenseMap<unsigned, unsigned> &PV, - std::vector<BankSwizzle> &BS) const; + std::vector<BankSwizzle> &BS, + bool isLastAluTrans) const; + + /// An instruction group can only access 2 channel pair (either [XY] or [ZW]) + /// from KCache bank on R700+. This function check if MI set in input meet + /// this limitations + bool fitsConstReadLimitations(const std::vector<MachineInstr *> &) const; + /// Same but using const index set instead of MI set. bool fitsConstReadLimitations(const std::vector<unsigned>&) const; - bool canBundle(const std::vector<MachineInstr *> &) const; /// \breif Vector instructions are instructions that must fill all /// instruction slots within an instruction group. @@ -210,17 +242,15 @@ namespace llvm { /// \brief Get the index of Op in the MachineInstr. /// /// \returns -1 if the Instruction does not contain the specified \p Op. - int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const; - int getOperandIdx(const MachineInstr &MI, R600Operands::VecOps Op) const; + int getOperandIdx(const MachineInstr &MI, unsigned Op) const; /// \brief Get the index of \p Op for the given Opcode. /// /// \returns -1 if the Instruction does not contain the specified \p Op. - int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const; - int getOperandIdx(unsigned Opcode, R600Operands::VecOps Op) const; + int getOperandIdx(unsigned Opcode, unsigned Op) const; /// \brief Helper function for setting instruction flag values. - void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const; + void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const; /// \returns true if this instruction has an operand for storing target flags. bool hasFlagOperand(const MachineInstr &MI) const; diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td index b4131be..7e61b18 100644 --- a/lib/Target/R600/R600Instructions.td +++ b/lib/Target/R600/R600Instructions.td @@ -12,44 +12,7 @@ //===----------------------------------------------------------------------===// include "R600Intrinsics.td" - -class InstR600 <dag outs, dag ins, string asm, list<dag> pattern, - InstrItinClass itin> - : AMDGPUInst <outs, ins, asm, pattern> { - - field bits<64> Inst; - bit TransOnly = 0; - bit Trig = 0; - bit Op3 = 0; - bit isVector = 0; - bits<2> FlagOperandIdx = 0; - bit Op1 = 0; - bit Op2 = 0; - bit HasNativeOperands = 0; - bit VTXInst = 0; - bit TEXInst = 0; - - let Namespace = "AMDGPU"; - let OutOperandList = outs; - let InOperandList = ins; - let AsmString = asm; - let Pattern = pattern; - let Itinerary = itin; - - let TSFlags{0} = TransOnly; - let TSFlags{4} = Trig; - let TSFlags{5} = Op3; - - // Vector instructions are instructions that must fill all slots in an - // instruction group - let TSFlags{6} = isVector; - let TSFlags{8-7} = FlagOperandIdx; - let TSFlags{9} = HasNativeOperands; - let TSFlags{10} = Op1; - let TSFlags{11} = Op2; - let TSFlags{12} = VTXInst; - let TSFlags{13} = TEXInst; -} +include "R600InstrFormats.td" class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> : InstR600 <outs, ins, asm, pattern, NullALU> { @@ -114,235 +77,6 @@ def ADDRGA_CONST_OFFSET : ComplexPattern<i32, 1, "SelectGlobalValueConstantOffse def ADDRGA_VAR_OFFSET : ComplexPattern<i32, 2, "SelectGlobalValueVariableOffset", [], []>; def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>; -class R600ALU_Word0 { - field bits<32> Word0; - - bits<11> src0; - bits<1> src0_neg; - bits<1> src0_rel; - bits<11> src1; - bits<1> src1_rel; - bits<1> src1_neg; - bits<3> index_mode = 0; - bits<2> pred_sel; - bits<1> last; - - bits<9> src0_sel = src0{8-0}; - bits<2> src0_chan = src0{10-9}; - bits<9> src1_sel = src1{8-0}; - bits<2> src1_chan = src1{10-9}; - - let Word0{8-0} = src0_sel; - let Word0{9} = src0_rel; - let Word0{11-10} = src0_chan; - let Word0{12} = src0_neg; - let Word0{21-13} = src1_sel; - let Word0{22} = src1_rel; - let Word0{24-23} = src1_chan; - let Word0{25} = src1_neg; - let Word0{28-26} = index_mode; - let Word0{30-29} = pred_sel; - let Word0{31} = last; -} - -class R600ALU_Word1 { - field bits<32> Word1; - - bits<11> dst; - bits<3> bank_swizzle; - bits<1> dst_rel; - bits<1> clamp; - - bits<7> dst_sel = dst{6-0}; - bits<2> dst_chan = dst{10-9}; - - let Word1{20-18} = bank_swizzle; - let Word1{27-21} = dst_sel; - let Word1{28} = dst_rel; - let Word1{30-29} = dst_chan; - let Word1{31} = clamp; -} - -class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{ - - bits<1> src0_abs; - bits<1> src1_abs; - bits<1> update_exec_mask; - bits<1> update_pred; - bits<1> write; - bits<2> omod; - - let Word1{0} = src0_abs; - let Word1{1} = src1_abs; - let Word1{2} = update_exec_mask; - let Word1{3} = update_pred; - let Word1{4} = write; - let Word1{6-5} = omod; - let Word1{17-7} = alu_inst; -} - -class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{ - - bits<11> src2; - bits<1> src2_rel; - bits<1> src2_neg; - - bits<9> src2_sel = src2{8-0}; - bits<2> src2_chan = src2{10-9}; - - let Word1{8-0} = src2_sel; - let Word1{9} = src2_rel; - let Word1{11-10} = src2_chan; - let Word1{12} = src2_neg; - let Word1{17-13} = alu_inst; -} - -class VTX_WORD0 { - field bits<32> Word0; - bits<7> SRC_GPR; - bits<5> VC_INST; - bits<2> FETCH_TYPE; - bits<1> FETCH_WHOLE_QUAD; - bits<8> BUFFER_ID; - bits<1> SRC_REL; - bits<2> SRC_SEL_X; - bits<6> MEGA_FETCH_COUNT; - - let Word0{4-0} = VC_INST; - let Word0{6-5} = FETCH_TYPE; - let Word0{7} = FETCH_WHOLE_QUAD; - let Word0{15-8} = BUFFER_ID; - let Word0{22-16} = SRC_GPR; - let Word0{23} = SRC_REL; - let Word0{25-24} = SRC_SEL_X; - let Word0{31-26} = MEGA_FETCH_COUNT; -} - -class VTX_WORD1_GPR { - field bits<32> Word1; - bits<7> DST_GPR; - bits<1> DST_REL; - bits<3> DST_SEL_X; - bits<3> DST_SEL_Y; - bits<3> DST_SEL_Z; - bits<3> DST_SEL_W; - bits<1> USE_CONST_FIELDS; - bits<6> DATA_FORMAT; - bits<2> NUM_FORMAT_ALL; - bits<1> FORMAT_COMP_ALL; - bits<1> SRF_MODE_ALL; - - let Word1{6-0} = DST_GPR; - let Word1{7} = DST_REL; - let Word1{8} = 0; // Reserved - let Word1{11-9} = DST_SEL_X; - let Word1{14-12} = DST_SEL_Y; - let Word1{17-15} = DST_SEL_Z; - let Word1{20-18} = DST_SEL_W; - let Word1{21} = USE_CONST_FIELDS; - let Word1{27-22} = DATA_FORMAT; - let Word1{29-28} = NUM_FORMAT_ALL; - let Word1{30} = FORMAT_COMP_ALL; - let Word1{31} = SRF_MODE_ALL; -} - -class TEX_WORD0 { - field bits<32> Word0; - - bits<5> TEX_INST; - bits<2> INST_MOD; - bits<1> FETCH_WHOLE_QUAD; - bits<8> RESOURCE_ID; - bits<7> SRC_GPR; - bits<1> SRC_REL; - bits<1> ALT_CONST; - bits<2> RESOURCE_INDEX_MODE; - bits<2> SAMPLER_INDEX_MODE; - - let Word0{4-0} = TEX_INST; - let Word0{6-5} = INST_MOD; - let Word0{7} = FETCH_WHOLE_QUAD; - let Word0{15-8} = RESOURCE_ID; - let Word0{22-16} = SRC_GPR; - let Word0{23} = SRC_REL; - let Word0{24} = ALT_CONST; - let Word0{26-25} = RESOURCE_INDEX_MODE; - let Word0{28-27} = SAMPLER_INDEX_MODE; -} - -class TEX_WORD1 { - field bits<32> Word1; - - bits<7> DST_GPR; - bits<1> DST_REL; - bits<3> DST_SEL_X; - bits<3> DST_SEL_Y; - bits<3> DST_SEL_Z; - bits<3> DST_SEL_W; - bits<7> LOD_BIAS; - bits<1> COORD_TYPE_X; - bits<1> COORD_TYPE_Y; - bits<1> COORD_TYPE_Z; - bits<1> COORD_TYPE_W; - - let Word1{6-0} = DST_GPR; - let Word1{7} = DST_REL; - let Word1{11-9} = DST_SEL_X; - let Word1{14-12} = DST_SEL_Y; - let Word1{17-15} = DST_SEL_Z; - let Word1{20-18} = DST_SEL_W; - let Word1{27-21} = LOD_BIAS; - let Word1{28} = COORD_TYPE_X; - let Word1{29} = COORD_TYPE_Y; - let Word1{30} = COORD_TYPE_Z; - let Word1{31} = COORD_TYPE_W; -} - -class TEX_WORD2 { - field bits<32> Word2; - - bits<5> OFFSET_X; - bits<5> OFFSET_Y; - bits<5> OFFSET_Z; - bits<5> SAMPLER_ID; - bits<3> SRC_SEL_X; - bits<3> SRC_SEL_Y; - bits<3> SRC_SEL_Z; - bits<3> SRC_SEL_W; - - let Word2{4-0} = OFFSET_X; - let Word2{9-5} = OFFSET_Y; - let Word2{14-10} = OFFSET_Z; - let Word2{19-15} = SAMPLER_ID; - let Word2{22-20} = SRC_SEL_X; - let Word2{25-23} = SRC_SEL_Y; - let Word2{28-26} = SRC_SEL_Z; - let Word2{31-29} = SRC_SEL_W; -} - -/* -XXX: R600 subtarget uses a slightly different encoding than the other -subtargets. We currently handle this in R600MCCodeEmitter, but we may -want to use these instruction classes in the future. - -class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 { - - bits<1> fog_merge; - bits<10> alu_inst; - - let Inst{37} = fog_merge; - let Inst{39-38} = omod; - let Inst{49-40} = alu_inst; -} - -class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 { - - bits<11> alu_inst; - - let Inst{38-37} = omod; - let Inst{49-39} = alu_inst; -} -*/ def R600_Pred : PredicateOperand<i32, (ops R600_Predicate), (ops PRED_SEL_OFF)>; @@ -380,7 +114,9 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern, let update_pred = 0; let HasNativeOperands = 1; let Op1 = 1; + let ALUInst = 1; let DisableEncoding = "$literal"; + let UseNamedOperandTable = 1; let Inst{31-0} = Word0; let Inst{63-32} = Word1; @@ -392,7 +128,7 @@ class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node, [(set R600_Reg32:$dst, (node R600_Reg32:$src0))] >; -// If you add our change the operands for R600_2OP instructions, you must +// If you add or change the operands for R600_2OP instructions, you must // also update the R600Op2OperandIndex::ROI enum in R600Defines.h, // R600InstrInfo::buildDefaultInstruction(), and R600InstrInfo::getOperandIdx(). class R600_2OP <bits<11> inst, string opName, list<dag> pattern, @@ -416,7 +152,9 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern, let HasNativeOperands = 1; let Op2 = 1; + let ALUInst = 1; let DisableEncoding = "$literal"; + let UseNamedOperandTable = 1; let Inst{31-0} = Word0; let Inst{63-32} = Word1; @@ -456,6 +194,8 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern, let HasNativeOperands = 1; let DisableEncoding = "$literal"; let Op3 = 1; + let UseNamedOperandTable = 1; + let ALUInst = 1; let Inst{31-0} = Word0; let Inst{63-32} = Word1; @@ -501,55 +241,84 @@ def TEX_SHADOW_ARRAY : PatLeaf< }] >; -class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs, +class EG_CF_RAT <bits <8> cfinst, bits <6> ratinst, bits<4> mask, dag outs, dag ins, string asm, list<dag> pattern> : - InstR600ISA <outs, ins, asm, pattern> { - bits<7> RW_GPR; - bits<7> INDEX_GPR; - - bits<2> RIM; - bits<2> TYPE; - bits<1> RW_REL; - bits<2> ELEM_SIZE; - - bits<12> ARRAY_SIZE; - bits<4> COMP_MASK; - bits<4> BURST_COUNT; - bits<1> VPM; - bits<1> eop; - bits<1> MARK; - bits<1> BARRIER; - - // CF_ALLOC_EXPORT_WORD0_RAT - let Inst{3-0} = rat_id; - let Inst{9-4} = rat_inst; - let Inst{10} = 0; // Reserved - let Inst{12-11} = RIM; - let Inst{14-13} = TYPE; - let Inst{21-15} = RW_GPR; - let Inst{22} = RW_REL; - let Inst{29-23} = INDEX_GPR; - let Inst{31-30} = ELEM_SIZE; - - // CF_ALLOC_EXPORT_WORD1_BUF - let Inst{43-32} = ARRAY_SIZE; - let Inst{47-44} = COMP_MASK; - let Inst{51-48} = BURST_COUNT; - let Inst{52} = VPM; - let Inst{53} = eop; - let Inst{61-54} = cf_inst; - let Inst{62} = MARK; - let Inst{63} = BARRIER; + InstR600ISA <outs, ins, asm, pattern>, + CF_ALLOC_EXPORT_WORD0_RAT, CF_ALLOC_EXPORT_WORD1_BUF { + + let rat_id = 0; + let rat_inst = ratinst; + let rim = 0; + // XXX: Have a separate instruction for non-indexed writes. + let type = 1; + let rw_rel = 0; + let elem_size = 0; + + let array_size = 0; + let comp_mask = mask; + let burst_count = 0; + let vpm = 0; + let cf_inst = cfinst; + let mark = 0; + let barrier = 1; + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; + +} + +class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern> + : InstR600ISA <outs, (ins MEMxi:$src_gpr), name, pattern>, + VTX_WORD1_GPR { + + // Static fields + let DST_REL = 0; + // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL, + // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored, + // however, based on my testing if USE_CONST_FIELDS is set, then all + // these fields need to be set to 0. + let USE_CONST_FIELDS = 0; + let NUM_FORMAT_ALL = 1; + let FORMAT_COMP_ALL = 0; + let SRF_MODE_ALL = 0; + + let Inst{63-32} = Word1; + // LLVM can only encode 64-bit instructions, so these fields are manually + // encoded in R600CodeEmitter + // + // bits<16> OFFSET; + // bits<2> ENDIAN_SWAP = 0; + // bits<1> CONST_BUF_NO_STRIDE = 0; + // bits<1> MEGA_FETCH = 0; + // bits<1> ALT_CONST = 0; + // bits<2> BUFFER_INDEX_MODE = 0; + + // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding + // is done in R600CodeEmitter + // + // Inst{79-64} = OFFSET; + // Inst{81-80} = ENDIAN_SWAP; + // Inst{82} = CONST_BUF_NO_STRIDE; + // Inst{83} = MEGA_FETCH; + // Inst{84} = ALT_CONST; + // Inst{86-85} = BUFFER_INDEX_MODE; + // Inst{95-86} = 0; Reserved + + // VTX_WORD3 (Padding) + // + // Inst{127-96} = 0; + + let VTXInst = 1; } class LoadParamFrag <PatFrag load_type> : PatFrag < (ops node:$ptr), (load_type node:$ptr), - [{ return isParamLoad(dyn_cast<LoadSDNode>(N)); }] + [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }] >; def load_param : LoadParamFrag<load>; -def load_param_zexti8 : LoadParamFrag<zextloadi8>; -def load_param_zexti16 : LoadParamFrag<zextloadi16>; +def load_param_exti8 : LoadParamFrag<az_extloadi8>; +def load_param_exti16 : LoadParamFrag<az_extloadi16>; def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">; def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">; @@ -595,6 +364,14 @@ def DOT4 : SDNode<"AMDGPUISD::DOT4", [] >; +def COS_HW : SDNode<"AMDGPUISD::COS_HW", + SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]> +>; + +def SIN_HW : SDNode<"AMDGPUISD::SIN_HW", + SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]> +>; + def TEXTURE_FETCH_Type : SDTypeProfile<1, 19, [SDTCisFP<0>]>; def TEXTURE_FETCH: SDNode<"AMDGPUISD::TEXTURE_FETCH", TEXTURE_FETCH_Type, []>; @@ -753,9 +530,9 @@ let usesCustomInserter = 1, isNotDuplicable = 1 in { class ExportSwzInst : InstR600ISA<( outs), (ins R600_Reg128:$gpr, i32imm:$type, i32imm:$arraybase, - i32imm:$sw_x, i32imm:$sw_y, i32imm:$sw_z, i32imm:$sw_w, i32imm:$inst, + RSel:$sw_x, RSel:$sw_y, RSel:$sw_z, RSel:$sw_w, i32imm:$inst, i32imm:$eop), - !strconcat("EXPORT", " $gpr"), + !strconcat("EXPORT", " $gpr.$sw_x$sw_y$sw_z$sw_w"), []>, ExportWord0, ExportSwzWord1 { let elem_size = 3; let Inst{31-0} = Word0; @@ -779,41 +556,6 @@ class ExportBufInst : InstR600ISA<( // Control Flow Instructions //===----------------------------------------------------------------------===// -class CF_ALU_WORD0 { - field bits<32> Word0; - - bits<22> ADDR; - bits<4> KCACHE_BANK0; - bits<4> KCACHE_BANK1; - bits<2> KCACHE_MODE0; - - let Word0{21-0} = ADDR; - let Word0{25-22} = KCACHE_BANK0; - let Word0{29-26} = KCACHE_BANK1; - let Word0{31-30} = KCACHE_MODE0; -} - -class CF_ALU_WORD1 { - field bits<32> Word1; - - bits<2> KCACHE_MODE1; - bits<8> KCACHE_ADDR0; - bits<8> KCACHE_ADDR1; - bits<7> COUNT; - bits<1> ALT_CONST; - bits<4> CF_INST; - bits<1> WHOLE_QUAD_MODE; - bits<1> BARRIER; - - let Word1{1-0} = KCACHE_MODE1; - let Word1{9-2} = KCACHE_ADDR0; - let Word1{17-10} = KCACHE_ADDR1; - let Word1{24-18} = COUNT; - let Word1{25} = ALT_CONST; - let Word1{29-26} = CF_INST; - let Word1{30} = WHOLE_QUAD_MODE; - let Word1{31} = BARRIER; -} def KCACHE : InstFlag<"printKCache">; @@ -821,7 +563,7 @@ class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs), (ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1, KCACHE:$KCACHE_MODE0, KCACHE:$KCACHE_MODE1, i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1, -i32imm:$COUNT), +i32imm:$COUNT, i32imm:$Enabled), !strconcat(OpName, " $COUNT, @$ADDR, " "KC0[$KCACHE_MODE0], KC1[$KCACHE_MODE1]"), [] >, CF_ALU_WORD0, CF_ALU_WORD1 { @@ -844,45 +586,19 @@ class CF_WORD0_R600 { let Word0 = ADDR; } -class CF_WORD1_R600 { - field bits<32> Word1; - - bits<3> POP_COUNT; - bits<5> CF_CONST; - bits<2> COND; - bits<3> COUNT; - bits<6> CALL_COUNT; - bits<1> COUNT_3; - bits<1> END_OF_PROGRAM; - bits<1> VALID_PIXEL_MODE; - bits<7> CF_INST; - bits<1> WHOLE_QUAD_MODE; - bits<1> BARRIER; - - let Word1{2-0} = POP_COUNT; - let Word1{7-3} = CF_CONST; - let Word1{9-8} = COND; - let Word1{12-10} = COUNT; - let Word1{18-13} = CALL_COUNT; - let Word1{19} = COUNT_3; - let Word1{21} = END_OF_PROGRAM; - let Word1{22} = VALID_PIXEL_MODE; - let Word1{29-23} = CF_INST; - let Word1{30} = WHOLE_QUAD_MODE; - let Word1{31} = BARRIER; -} - class CF_CLAUSE_R600 <bits<7> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs), ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 { field bits<64> Inst; + bits<4> CNT; let CF_INST = inst; let BARRIER = 1; let CF_CONST = 0; let VALID_PIXEL_MODE = 0; let COND = 0; + let COUNT = CNT{2-0}; let CALL_COUNT = 0; - let COUNT_3 = 0; + let COUNT_3 = CNT{3}; let END_OF_PROGRAM = 0; let WHOLE_QUAD_MODE = 0; @@ -890,38 +606,6 @@ ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 { let Inst{63-32} = Word1; } -class CF_WORD0_EG { - field bits<32> Word0; - - bits<24> ADDR; - bits<3> JUMPTABLE_SEL; - - let Word0{23-0} = ADDR; - let Word0{26-24} = JUMPTABLE_SEL; -} - -class CF_WORD1_EG { - field bits<32> Word1; - - bits<3> POP_COUNT; - bits<5> CF_CONST; - bits<2> COND; - bits<6> COUNT; - bits<1> VALID_PIXEL_MODE; - bits<1> END_OF_PROGRAM; - bits<8> CF_INST; - bits<1> BARRIER; - - let Word1{2-0} = POP_COUNT; - let Word1{7-3} = CF_CONST; - let Word1{9-8} = COND; - let Word1{15-10} = COUNT; - let Word1{20} = VALID_PIXEL_MODE; - let Word1{21} = END_OF_PROGRAM; - let Word1{29-22} = CF_INST; - let Word1{31} = BARRIER; -} - class CF_CLAUSE_EG <bits<8> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs), ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG { field bits<64> Inst; @@ -940,6 +624,7 @@ ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG { def CF_ALU : ALU_CLAUSE<8, "ALU">; def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">; +def CF_ALU_POP_AFTER : ALU_CLAUSE<10, "ALU_POP_AFTER">; def FETCH_CLAUSE : AMDGPUInst <(outs), (ins i32imm:$addr), "Fetch clause starting at $addr:", [] > { @@ -1261,7 +946,11 @@ class R600_VEC2OP<list<dag> pattern> : InstR600 <(outs R600_Reg32:$dst), (ins LITERAL:$literal0, LITERAL:$literal1), "", pattern, - AnyALU> {} + AnyALU> { + + let UseNamedOperandTable = 1; + +} } def DOT_4 : R600_VEC2OP<[(set R600_Reg32:$dst, (DOT4 @@ -1279,12 +968,13 @@ multiclass CUBE_Common <bits<11> inst> { def _pseudo : InstR600 < (outs R600_Reg128:$dst), - (ins R600_Reg128:$src), - "CUBE $dst $src", - [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src))], + (ins R600_Reg128:$src0), + "CUBE $dst $src0", + [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src0))], VecALU > { let isPseudo = 1; + let UseNamedOperandTable = 1; } def _real : R600_2OP <inst, "CUBE", []>; @@ -1399,14 +1089,14 @@ class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP < } class SIN_Common <bits<11> inst> : R600_1OP < - inst, "SIN", []>{ + inst, "SIN", [(set f32:$dst, (SIN_HW f32:$src0))]>{ let Trig = 1; let TransOnly = 1; let Itinerary = TransALU; } class COS_Common <bits<11> inst> : R600_1OP < - inst, "COS", []> { + inst, "COS", [(set f32:$dst, (COS_HW f32:$src0))]> { let Trig = 1; let TransOnly = 1; let Itinerary = TransALU; @@ -1494,52 +1184,52 @@ let Predicates = [isR600] in { } defm : SteamOutputExportPattern<R600_ExportBuf, 0x20, 0x21, 0x22, 0x23>; - def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$COUNT), - "TEX $COUNT @$ADDR"> { + def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$CNT), + "TEX $CNT @$ADDR"> { let POP_COUNT = 0; } - def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$COUNT), - "VTX $COUNT @$ADDR"> { + def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$CNT), + "VTX $CNT @$ADDR"> { let POP_COUNT = 0; } def WHILE_LOOP_R600 : CF_CLAUSE_R600<6, (ins i32imm:$ADDR), "LOOP_START_DX10 @$ADDR"> { let POP_COUNT = 0; - let COUNT = 0; + let CNT = 0; } def END_LOOP_R600 : CF_CLAUSE_R600<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> { let POP_COUNT = 0; - let COUNT = 0; + let CNT = 0; } def LOOP_BREAK_R600 : CF_CLAUSE_R600<9, (ins i32imm:$ADDR), "LOOP_BREAK @$ADDR"> { let POP_COUNT = 0; - let COUNT = 0; + let CNT = 0; } def CF_CONTINUE_R600 : CF_CLAUSE_R600<8, (ins i32imm:$ADDR), "CONTINUE @$ADDR"> { let POP_COUNT = 0; - let COUNT = 0; + let CNT = 0; } def CF_JUMP_R600 : CF_CLAUSE_R600<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT), "JUMP @$ADDR POP:$POP_COUNT"> { - let COUNT = 0; + let CNT = 0; } def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT), "ELSE @$ADDR POP:$POP_COUNT"> { - let COUNT = 0; + let CNT = 0; } def CF_CALL_FS_R600 : CF_CLAUSE_R600<19, (ins), "CALL_FS"> { let ADDR = 0; - let COUNT = 0; + let CNT = 0; let POP_COUNT = 0; } def POP_R600 : CF_CLAUSE_R600<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT), "POP @$ADDR POP:$POP_COUNT"> { - let COUNT = 0; + let CNT = 0; } def CF_END_R600 : CF_CLAUSE_R600<0, (ins), "CF_END"> { - let COUNT = 0; + let CNT = 0; let POP_COUNT = 0; let ADDR = 0; let END_OF_PROGRAM = 1; @@ -1547,18 +1237,6 @@ let Predicates = [isR600] in { } -// Helper pattern for normalizing inputs to triginomic instructions for R700+ -// cards. -class COS_PAT <InstR600 trig> : Pat< - (fcos f32:$src), - (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src)) ->; - -class SIN_PAT <InstR600 trig> : Pat< - (fsin f32:$src), - (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src)) ->; - //===----------------------------------------------------------------------===// // R700 Only instructions //===----------------------------------------------------------------------===// @@ -1566,10 +1244,6 @@ class SIN_PAT <InstR600 trig> : Pat< let Predicates = [isR700] in { def SIN_r700 : SIN_Common<0x6E>; def COS_r700 : COS_Common<0x6F>; - - // R700 normalizes inputs to SIN/COS the same as EG - def : SIN_PAT <SIN_r700>; - def : COS_PAT <COS_r700>; } //===----------------------------------------------------------------------===// @@ -1595,9 +1269,183 @@ def SIN_eg : SIN_Common<0x8D>; def COS_eg : COS_Common<0x8E>; def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>; -def : SIN_PAT <SIN_eg>; -def : COS_PAT <COS_eg>; def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>; + +//===----------------------------------------------------------------------===// +// Memory read/write instructions +//===----------------------------------------------------------------------===// +let usesCustomInserter = 1 in { + +class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> mask, string name, + list<dag> pattern> + : EG_CF_RAT <0x57, 0x2, mask, (outs), ins, name, pattern> { +} + +} // End usesCustomInserter = 1 + +// 32-bit store +def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg < + (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), + 0x1, "RAT_WRITE_CACHELESS_32_eg $rw_gpr, $index_gpr, $eop", + [(global_store i32:$rw_gpr, i32:$index_gpr)] +>; + +// 64-bit store +def RAT_WRITE_CACHELESS_64_eg : RAT_WRITE_CACHELESS_eg < + (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), + 0x3, "RAT_WRITE_CACHELESS_64_eg $rw_gpr.XY, $index_gpr, $eop", + [(global_store v2i32:$rw_gpr, i32:$index_gpr)] +>; + +//128-bit store +def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg < + (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), + 0xf, "RAT_WRITE_CACHELESS_128 $rw_gpr.XYZW, $index_gpr, $eop", + [(global_store v4i32:$rw_gpr, i32:$index_gpr)] +>; + +class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> + : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> { + + // Static fields + let VC_INST = 0; + let FETCH_TYPE = 2; + let FETCH_WHOLE_QUAD = 0; + let BUFFER_ID = buffer_id; + let SRC_REL = 0; + // XXX: We can infer this field based on the SRC_GPR. This would allow us + // to store vertex addresses in any channel, not just X. + let SRC_SEL_X = 0; + + let Inst{31-0} = Word0; +} + +class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { + + let MEGA_FETCH_COUNT = 1; + let DST_SEL_X = 0; + let DST_SEL_Y = 7; // Masked + let DST_SEL_Z = 7; // Masked + let DST_SEL_W = 7; // Masked + let DATA_FORMAT = 1; // FMT_8 +} + +class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { + let MEGA_FETCH_COUNT = 2; + let DST_SEL_X = 0; + let DST_SEL_Y = 7; // Masked + let DST_SEL_Z = 7; // Masked + let DST_SEL_W = 7; // Masked + let DATA_FORMAT = 5; // FMT_16 + +} + +class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { + + let MEGA_FETCH_COUNT = 4; + let DST_SEL_X = 0; + let DST_SEL_Y = 7; // Masked + let DST_SEL_Z = 7; // Masked + let DST_SEL_W = 7; // Masked + let DATA_FORMAT = 0xD; // COLOR_32 + + // This is not really necessary, but there were some GPU hangs that appeared + // to be caused by ALU instructions in the next instruction group that wrote + // to the $src_gpr registers of the VTX_READ. + // e.g. + // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24 + // %T2_X<def> = MOV %ZERO + //Adding this constraint prevents this from happening. + let Constraints = "$src_gpr.ptr = $dst_gpr"; +} + +class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id, + (outs R600_Reg64:$dst_gpr), pattern> { + + let MEGA_FETCH_COUNT = 8; + let DST_SEL_X = 0; + let DST_SEL_Y = 1; + let DST_SEL_Z = 7; + let DST_SEL_W = 7; + let DATA_FORMAT = 0x1D; // COLOR_32_32 +} + +class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id, + (outs R600_Reg128:$dst_gpr), pattern> { + + let MEGA_FETCH_COUNT = 16; + let DST_SEL_X = 0; + let DST_SEL_Y = 1; + let DST_SEL_Z = 2; + let DST_SEL_W = 3; + let DATA_FORMAT = 0x22; // COLOR_32_32_32_32 + + // XXX: Need to force VTX_READ_128 instructions to write to the same register + // that holds its buffer address to avoid potential hangs. We can't use + // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst + // registers are different sizes. +} + +//===----------------------------------------------------------------------===// +// VTX Read from parameter memory space +//===----------------------------------------------------------------------===// + +def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0, + [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))] +>; + +def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0, + [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))] +>; + +def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0, + [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] +>; + +def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0, + [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] +>; + +def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0, + [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] +>; + +//===----------------------------------------------------------------------===// +// VTX Read from global memory space +//===----------------------------------------------------------------------===// + +// 8-bit reads +def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1, + [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))] +>; + +def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1, + [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))] +>; + +// 32-bit reads +def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1, + [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] +>; + +// 64-bit reads +def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1, + [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] +>; + +// 128-bit reads +def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1, + [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] +>; + } // End Predicates = [isEG] //===----------------------------------------------------------------------===// @@ -1630,6 +1478,9 @@ let Predicates = [isEGorCayman] in { def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>; defm : BFIPatterns <BFI_INT_eg>; + def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24", + [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))], VecALU + >; def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>; def : ROTRPattern <BIT_ALIGN_INT_eg>; @@ -1643,6 +1494,9 @@ let Predicates = [isEGorCayman] in { def CNDGE_eg : CNDGE_Common<0x1B>; def MUL_LIT_eg : MUL_LIT_Common<0x1F>; def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>; + def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24", + [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU + >; def DOT4_eg : DOT4_Common<0xBE>; defm CUBE_eg : CUBE_Common<0xC0>; @@ -1654,6 +1508,8 @@ let hasSideEffects = 1 in { def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> { let Pattern = []; + let TransOnly = 0; + let Itinerary = AnyALU; } def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>; @@ -1664,6 +1520,111 @@ let hasSideEffects = 1 in { def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>; +def GROUP_BARRIER : InstR600 < + (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local)], AnyALU>, + R600ALU_Word0, + R600ALU_Word1_OP2 <0x54> { + + let dst = 0; + let dst_rel = 0; + let src0 = 0; + let src0_rel = 0; + let src0_neg = 0; + let src0_abs = 0; + let src1 = 0; + let src1_rel = 0; + let src1_neg = 0; + let src1_abs = 0; + let write = 0; + let omod = 0; + let clamp = 0; + let last = 1; + let bank_swizzle = 0; + let pred_sel = 0; + let update_exec_mask = 0; + let update_pred = 0; + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; + + let ALUInst = 1; +} + +//===----------------------------------------------------------------------===// +// LDS Instructions +//===----------------------------------------------------------------------===// +class R600_LDS <bits<6> op, dag outs, dag ins, string asm, + list<dag> pattern = []> : + + InstR600 <outs, ins, asm, pattern, XALU>, + R600_ALU_LDS_Word0, + R600LDS_Word1 { + + bits<6> offset = 0; + let lds_op = op; + + let Word1{27} = offset{0}; + let Word1{12} = offset{1}; + let Word1{28} = offset{2}; + let Word1{31} = offset{3}; + let Word0{12} = offset{4}; + let Word0{25} = offset{5}; + + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; + + let ALUInst = 1; + let HasNativeOperands = 1; + let UseNamedOperandTable = 1; +} + +class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS < + lds_op, + (outs R600_Reg32:$dst), + (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel, + LAST:$last, R600_Pred:$pred_sel, + BANK_SWIZZLE:$bank_swizzle), + " "#name#" $last OQAP, $src0$src0_rel $pred_sel", + pattern + > { + + let src1 = 0; + let src1_rel = 0; + let src2 = 0; + let src2_rel = 0; + + let Defs = [OQAP]; + let usesCustomInserter = 1; + let LDS_1A = 1; + let DisableEncoding = "$dst"; +} + +class R600_LDS_1A1D <bits<6> lds_op, string name, list<dag> pattern> : + R600_LDS < + lds_op, + (outs), + (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel, + R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel, + LAST:$last, R600_Pred:$pred_sel, + BANK_SWIZZLE:$bank_swizzle), + " "#name#" $last $src0$src0_rel, $src1$src1_rel, $pred_sel", + pattern + > { + + let src2 = 0; + let src2_rel = 0; + let LDS_1A1D = 1; +} + +def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET", + [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))] +>; + +def LDS_WRITE : R600_LDS_1A1D <0xD, "LDS_WRITE", + [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)] +>; + // TRUNC is used for the FLT_TO_INT instructions to work around a // perceived problem where the rounding modes are applied differently // depending on the instruction and the slot they are in. @@ -1752,47 +1713,84 @@ let hasSideEffects = 1 in { let END_OF_PROGRAM = 1; } +} // End Predicates = [isEGorCayman] + //===----------------------------------------------------------------------===// -// Memory read/write instructions +// Regist loads and stores - for indirect addressing //===----------------------------------------------------------------------===// -let usesCustomInserter = 1 in { -class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name, - list<dag> pattern> - : EG_CF_RAT <0x57, 0x2, 0, (outs), ins, name, pattern> { - let RIM = 0; - // XXX: Have a separate instruction for non-indexed writes. - let TYPE = 1; - let RW_REL = 0; - let ELEM_SIZE = 0; +defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>; - let ARRAY_SIZE = 0; - let COMP_MASK = comp_mask; - let BURST_COUNT = 0; - let VPM = 0; - let MARK = 0; - let BARRIER = 1; -} +//===----------------------------------------------------------------------===// +// Cayman Instructions +//===----------------------------------------------------------------------===// -} // End usesCustomInserter = 1 +let Predicates = [isCayman] in { -// 32-bit store -def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg < - (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), - 0x1, "RAT_WRITE_CACHELESS_32_eg $rw_gpr, $index_gpr, $eop", +def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24", + [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))], VecALU +>; +def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24", + [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU +>; + +let isVector = 1 in { + +def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>; + +def MULLO_INT_cm : MULLO_INT_Common<0x8F>; +def MULHI_INT_cm : MULHI_INT_Common<0x90>; +def MULLO_UINT_cm : MULLO_UINT_Common<0x91>; +def MULHI_UINT_cm : MULHI_UINT_Common<0x92>; +def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>; +def EXP_IEEE_cm : EXP_IEEE_Common<0x81>; +def LOG_IEEE_cm : LOG_IEEE_Common<0x83>; +def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>; +def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>; +def SIN_cm : SIN_Common<0x8D>; +def COS_cm : COS_Common<0x8E>; +} // End isVector = 1 + +def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>; + +defm DIV_cm : DIV_Common<RECIP_IEEE_cm>; + +// RECIP_UINT emulation for Cayman +// The multiplication scales from [0,1] to the unsigned integer range +def : Pat < + (AMDGPUurecip i32:$src0), + (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)), + (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1))) +>; + + def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> { + let ADDR = 0; + let POP_COUNT = 0; + let COUNT = 0; + } + +def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>; + + +class RAT_STORE_DWORD_cm <bits<4> mask, dag ins, list<dag> pat> : EG_CF_RAT < + 0x57, 0x14, mask, (outs), ins, + "EXPORT_RAT_INST_STORE_DWORD $rw_gpr, $index_gpr", pat +> { + let eop = 0; // This bit is not used on Cayman. +} + +def RAT_STORE_DWORD32_cm : RAT_STORE_DWORD_cm <0x1, + (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr), [(global_store i32:$rw_gpr, i32:$index_gpr)] >; -//128-bit store -def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg < - (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), - 0xf, "RAT_WRITE_CACHELESS_128 $rw_gpr.XYZW, $index_gpr, $eop", - [(global_store v4i32:$rw_gpr, i32:$index_gpr)] +def RAT_STORE_DWORD64_cm : RAT_STORE_DWORD_cm <0x3, + (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr), + [(global_store v2i32:$rw_gpr, i32:$index_gpr)] >; -class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> - : InstR600ISA <outs, (ins MEMxi:$ptr), name, pattern>, - VTX_WORD1_GPR, VTX_WORD0 { +class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern> + : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> { // Static fields let VC_INST = 0; @@ -1803,53 +1801,18 @@ class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> // XXX: We can infer this field based on the SRC_GPR. This would allow us // to store vertex addresses in any channel, not just X. let SRC_SEL_X = 0; - let DST_REL = 0; - // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL, - // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored, - // however, based on my testing if USE_CONST_FIELDS is set, then all - // these fields need to be set to 0. - let USE_CONST_FIELDS = 0; - let NUM_FORMAT_ALL = 1; - let FORMAT_COMP_ALL = 0; - let SRF_MODE_ALL = 0; + let SRC_SEL_Y = 0; + let STRUCTURED_READ = 0; + let LDS_REQ = 0; + let COALESCED_READ = 0; let Inst{31-0} = Word0; - let Inst{63-32} = Word1; - // LLVM can only encode 64-bit instructions, so these fields are manually - // encoded in R600CodeEmitter - // - // bits<16> OFFSET; - // bits<2> ENDIAN_SWAP = 0; - // bits<1> CONST_BUF_NO_STRIDE = 0; - // bits<1> MEGA_FETCH = 0; - // bits<1> ALT_CONST = 0; - // bits<2> BUFFER_INDEX_MODE = 0; - - - - // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding - // is done in R600CodeEmitter - // - // Inst{79-64} = OFFSET; - // Inst{81-80} = ENDIAN_SWAP; - // Inst{82} = CONST_BUF_NO_STRIDE; - // Inst{83} = MEGA_FETCH; - // Inst{84} = ALT_CONST; - // Inst{86-85} = BUFFER_INDEX_MODE; - // Inst{95-86} = 0; Reserved - - // VTX_WORD3 (Padding) - // - // Inst{127-96} = 0; - - let VTXInst = 1; } -class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_8 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), - pattern> { +class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { - let MEGA_FETCH_COUNT = 1; let DST_SEL_X = 0; let DST_SEL_Y = 7; // Masked let DST_SEL_Z = 7; // Masked @@ -1857,10 +1820,9 @@ class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> let DATA_FORMAT = 1; // FMT_8 } -class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_16 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), - pattern> { - let MEGA_FETCH_COUNT = 2; +class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { let DST_SEL_X = 0; let DST_SEL_Y = 7; // Masked let DST_SEL_Z = 7; // Masked @@ -1869,11 +1831,10 @@ class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern> } -class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_32 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), - pattern> { +class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id, + (outs R600_TReg32_X:$dst_gpr), pattern> { - let MEGA_FETCH_COUNT = 4; let DST_SEL_X = 0; let DST_SEL_Y = 7; // Masked let DST_SEL_Z = 7; // Masked @@ -1882,19 +1843,29 @@ class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern> // This is not really necessary, but there were some GPU hangs that appeared // to be caused by ALU instructions in the next instruction group that wrote - // to the $ptr registers of the VTX_READ. + // to the $src_gpr registers of the VTX_READ. // e.g. // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24 // %T2_X<def> = MOV %ZERO //Adding this constraint prevents this from happening. - let Constraints = "$ptr.ptr = $dst"; + let Constraints = "$src_gpr.ptr = $dst_gpr"; } -class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_128 $dst.XYZW, $ptr", buffer_id, (outs R600_Reg128:$dst), - pattern> { +class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id, + (outs R600_Reg64:$dst_gpr), pattern> { + + let DST_SEL_X = 0; + let DST_SEL_Y = 1; + let DST_SEL_Z = 7; + let DST_SEL_W = 7; + let DATA_FORMAT = 0x1D; // COLOR_32_32 +} + +class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern> + : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id, + (outs R600_Reg128:$dst_gpr), pattern> { - let MEGA_FETCH_COUNT = 16; let DST_SEL_X = 0; let DST_SEL_Y = 1; let DST_SEL_Z = 2; @@ -1903,28 +1874,31 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern> // XXX: Need to force VTX_READ_128 instructions to write to the same register // that holds its buffer address to avoid potential hangs. We can't use - // the same constraint as VTX_READ_32_eg, because the $ptr.ptr and $dst + // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst // registers are different sizes. } //===----------------------------------------------------------------------===// // VTX Read from parameter memory space //===----------------------------------------------------------------------===// +def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0, + [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))] +>; -def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0, - [(set i32:$dst, (load_param_zexti8 ADDRVTX_READ:$ptr))] +def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0, + [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))] >; -def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0, - [(set i32:$dst, (load_param_zexti16 ADDRVTX_READ:$ptr))] +def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0, + [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] >; -def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0, - [(set i32:$dst, (load_param ADDRVTX_READ:$ptr))] +def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0, + [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] >; -def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0, - [(set v4i32:$dst, (load_param ADDRVTX_READ:$ptr))] +def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0, + [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))] >; //===----------------------------------------------------------------------===// @@ -1932,78 +1906,29 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0, //===----------------------------------------------------------------------===// // 8-bit reads -def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1, - [(set i32:$dst, (zextloadi8_global ADDRVTX_READ:$ptr))] +def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1, + [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))] >; -// 32-bit reads -def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1, - [(set i32:$dst, (global_load ADDRVTX_READ:$ptr))] +def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1, + [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))] >; -// 128-bit reads -def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1, - [(set v4i32:$dst, (global_load ADDRVTX_READ:$ptr))] +// 32-bit reads +def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1, + [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] >; -//===----------------------------------------------------------------------===// -// Constant Loads -// XXX: We are currently storing all constants in the global address space. -//===----------------------------------------------------------------------===// - -def CONSTANT_LOAD_eg : VTX_READ_32_eg <1, - [(set i32:$dst, (constant_load ADDRVTX_READ:$ptr))] +// 64-bit reads +def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1, + [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] >; -} - -//===----------------------------------------------------------------------===// -// Regist loads and stores - for indirect addressing -//===----------------------------------------------------------------------===// - -defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>; - -let Predicates = [isCayman] in { - -let isVector = 1 in { - -def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>; - -def MULLO_INT_cm : MULLO_INT_Common<0x8F>; -def MULHI_INT_cm : MULHI_INT_Common<0x90>; -def MULLO_UINT_cm : MULLO_UINT_Common<0x91>; -def MULHI_UINT_cm : MULHI_UINT_Common<0x92>; -def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>; -def EXP_IEEE_cm : EXP_IEEE_Common<0x81>; -def LOG_IEEE_cm : LOG_IEEE_Common<0x83>; -def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>; -def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>; -def SIN_cm : SIN_Common<0x8D>; -def COS_cm : COS_Common<0x8E>; -} // End isVector = 1 - -def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>; -def : SIN_PAT <SIN_cm>; -def : COS_PAT <COS_cm>; - -defm DIV_cm : DIV_Common<RECIP_IEEE_cm>; - -// RECIP_UINT emulation for Cayman -// The multiplication scales from [0,1] to the unsigned integer range -def : Pat < - (AMDGPUurecip i32:$src0), - (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)), - (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1))) +// 128-bit reads +def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1, + [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))] >; - def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> { - let ADDR = 0; - let POP_COUNT = 0; - let COUNT = 0; - } - -def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>; - } // End isCayman //===----------------------------------------------------------------------===// @@ -2014,9 +1939,6 @@ def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>; def IF_PREDICATE_SET : ILFormat<(outs), (ins GPRI32:$src), "IF_PREDICATE_SET $src", []>; -def PREDICATED_BREAK : ILFormat<(outs), (ins GPRI32:$src), - "PREDICATED_BREAK $src", []>; - //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// @@ -2124,7 +2046,7 @@ def CONST_COPY : Instruction { def TEX_VTX_CONSTBUF : InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "VTX_READ_eg $dst, $ptr", [(set v4i32:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>, - VTX_WORD1_GPR, VTX_WORD0 { + VTX_WORD1_GPR, VTX_WORD0_eg { let VC_INST = 0; let FETCH_TYPE = 2; @@ -2178,7 +2100,7 @@ def TEX_VTX_CONSTBUF : def TEX_VTX_TEXBUF: InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr", [(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>, -VTX_WORD1_GPR, VTX_WORD0 { +VTX_WORD1_GPR, VTX_WORD0_eg { let VC_INST = 0; let FETCH_TYPE = 2; @@ -2431,10 +2353,24 @@ def : Insert_Element <i32, v4i32, 3, sub3>; def : Vector4_Build <v4f32, f32>; def : Vector4_Build <v4i32, i32>; +def : Extract_Element <f32, v2f32, 0, sub0>; +def : Extract_Element <f32, v2f32, 1, sub1>; + +def : Insert_Element <f32, v2f32, 0, sub0>; +def : Insert_Element <f32, v2f32, 1, sub1>; + +def : Extract_Element <i32, v2i32, 0, sub0>; +def : Extract_Element <i32, v2i32, 1, sub1>; + +def : Insert_Element <i32, v2i32, 0, sub0>; +def : Insert_Element <i32, v2i32, 1, sub1>; + // bitconvert patterns def : BitConvert <i32, f32, R600_Reg32>; def : BitConvert <f32, i32, R600_Reg32>; +def : BitConvert <v2f32, v2i32, R600_Reg64>; +def : BitConvert <v2i32, v2f32, R600_Reg64>; def : BitConvert <v4f32, v4i32, R600_Reg128>; def : BitConvert <v4i32, v4f32, R600_Reg128>; diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp index a330d88..0dc0365 100644 --- a/lib/Target/R600/R600MachineScheduler.cpp +++ b/lib/Target/R600/R600MachineScheduler.cpp @@ -32,7 +32,7 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { MRI = &DAG->MRI; CurInstKind = IDOther; CurEmitted = 0; - OccupedSlotsMask = 15; + OccupedSlotsMask = 31; InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); InstKindLimit[IDOther] = 32; @@ -160,7 +160,7 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { if (NextInstKind != CurInstKind) { DEBUG(dbgs() << "Instruction Type Switch\n"); if (NextInstKind != IDAlu) - OccupedSlotsMask = 15; + OccupedSlotsMask |= 31; CurEmitted = 0; CurInstKind = NextInstKind; } @@ -251,6 +251,9 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg, R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { MachineInstr *MI = SU->getInstr(); + if (TII->isTransOnly(MI)) + return AluTrans; + switch (MI->getOpcode()) { case AMDGPU::PRED_X: return AluPredX; @@ -269,10 +272,18 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { } // Does the instruction take a whole IG ? + // XXX: Is it possible to add a helper function in R600InstrInfo that can + // be used here and in R600PacketizerList::isSoloInstruction() ? if(TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode()) || - TII->isReductionOp(MI->getOpcode())) + TII->isReductionOp(MI->getOpcode()) || + MI->getOpcode() == AMDGPU::GROUP_BARRIER) { return AluT_XYZW; + } + + if (TII->isLDSInstr(MI->getOpcode())) { + return AluT_X; + } // Is the result already assigned to a channel ? unsigned DestSubReg = MI->getOperand(0).getSubReg(); @@ -338,7 +349,7 @@ SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) { It != E; ++It) { SUnit *SU = *It; InstructionsGroupCandidate.push_back(SU->getInstr()); - if (TII->canBundle(InstructionsGroupCandidate)) { + if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)) { InstructionsGroupCandidate.pop_back(); Q.erase((It + 1).base()); return SU; @@ -367,14 +378,18 @@ void R600SchedStrategy::PrepareNextSlot() { } void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { - unsigned DestReg = MI->getOperand(0).getReg(); + int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); + if (DstIndex == -1) { + return; + } + unsigned DestReg = MI->getOperand(DstIndex).getReg(); // PressureRegister crashes if an operand is def and used in the same inst // and we try to constraint its regclass for (MachineInstr::mop_iterator It = MI->operands_begin(), E = MI->operands_end(); It != E; ++It) { MachineOperand &MO = *It; if (MO.isReg() && !MO.isDef() && - MO.getReg() == MI->getOperand(0).getReg()) + MO.getReg() == DestReg) return; } // Constrains the regclass of DestReg to assign it to Slot @@ -409,7 +424,8 @@ unsigned R600SchedStrategy::AvailablesAluCount() const { return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() + AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() + AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() + - AvailableAlus[AluDiscarded].size() + AvailableAlus[AluPredX].size(); + AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() + + AvailableAlus[AluPredX].size(); } SUnit* R600SchedStrategy::pickAlu() { @@ -417,20 +433,27 @@ SUnit* R600SchedStrategy::pickAlu() { if (!OccupedSlotsMask) { // Bottom up scheduling : predX must comes first if (!AvailableAlus[AluPredX].empty()) { - OccupedSlotsMask = 15; + OccupedSlotsMask |= 31; return PopInst(AvailableAlus[AluPredX]); } // Flush physical reg copies (RA will discard them) if (!AvailableAlus[AluDiscarded].empty()) { - OccupedSlotsMask = 15; + OccupedSlotsMask |= 31; return PopInst(AvailableAlus[AluDiscarded]); } // If there is a T_XYZW alu available, use it if (!AvailableAlus[AluT_XYZW].empty()) { - OccupedSlotsMask = 15; + OccupedSlotsMask |= 15; return PopInst(AvailableAlus[AluT_XYZW]); } } + bool TransSlotOccuped = OccupedSlotsMask & 16; + if (!TransSlotOccuped) { + if (!AvailableAlus[AluTrans].empty()) { + OccupedSlotsMask |= 16; + return PopInst(AvailableAlus[AluTrans]); + } + } for (int Chan = 3; Chan > -1; --Chan) { bool isOccupied = OccupedSlotsMask & (1 << Chan); if (!isOccupied) { diff --git a/lib/Target/R600/R600MachineScheduler.h b/lib/Target/R600/R600MachineScheduler.h index aae8b3f..f8965d8 100644 --- a/lib/Target/R600/R600MachineScheduler.h +++ b/lib/Target/R600/R600MachineScheduler.h @@ -46,6 +46,7 @@ class R600SchedStrategy : public MachineSchedStrategy { AluT_W, AluT_XYZW, AluPredX, + AluTrans, AluDiscarded, // LLVM Instructions that are going to be eliminated AluLast }; diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/R600/R600OptimizeVectorRegisters.cpp index 4636426..acacffa 100644 --- a/lib/Target/R600/R600OptimizeVectorRegisters.cpp +++ b/lib/Target/R600/R600OptimizeVectorRegisters.cpp @@ -183,10 +183,6 @@ MachineInstr *R600VectorRegMerger::RebuildVector( std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg; for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(), E = RSI->RegToChan.end(); It != E; ++It) { - if (BaseRSI->RegToChan.find((*It).first) != BaseRSI->RegToChan.end()) { - UpdatedRegToChan[(*It).first] = (*It).second; - continue; - } unsigned DstReg = MRI->createVirtualRegister(&AMDGPU::R600_Reg128RegClass); unsigned SubReg = (*It).first; unsigned Swizzle = (*It).second; @@ -326,8 +322,17 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end(); MII != MIIE; ++MII) { MachineInstr *MI = MII; - if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) + if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) { + if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) { + unsigned Reg = MI->getOperand(1).getReg(); + for (MachineRegisterInfo::def_iterator It = MRI->def_begin(Reg), + E = MRI->def_end(); It != E; ++It) { + RemoveMI(&(*It)); + } + } continue; + } + RegSeqInfo RSI(*MRI, MI); diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp index da614c7..5cf1fd3 100644 --- a/lib/Target/R600/R600Packetizer.cpp +++ b/lib/Target/R600/R600Packetizer.cpp @@ -77,17 +77,26 @@ private: do { if (TII->isPredicated(BI)) continue; - if (TII->isTransOnly(BI)) - continue; - int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE); + int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write); if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0) continue; - unsigned Dst = BI->getOperand(0).getReg(); + int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst); + if (DstIdx == -1) { + continue; + } + unsigned Dst = BI->getOperand(DstIdx).getReg(); + if (TII->isTransOnly(BI)) { + Result[Dst] = AMDGPU::PS; + continue; + } if (BI->getOpcode() == AMDGPU::DOT4_r600 || BI->getOpcode() == AMDGPU::DOT4_eg) { Result[Dst] = AMDGPU::PV_X; continue; } + if (Dst == AMDGPU::OQAP) { + continue; + } unsigned PVReg = 0; switch (TRI.getHWRegChan(Dst)) { case 0: @@ -112,10 +121,10 @@ private: void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs) const { - R600Operands::Ops Ops[] = { - R600Operands::SRC0, - R600Operands::SRC1, - R600Operands::SRC2 + unsigned Ops[] = { + AMDGPU::OpName::src0, + AMDGPU::OpName::src1, + AMDGPU::OpName::src2 }; for (unsigned i = 0; i < 3; i++) { int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]); @@ -150,9 +159,11 @@ public: return true; if (!TII->isALUInstr(MI->getOpcode())) return true; - if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TRANS_ONLY) + if (MI->getOpcode() == AMDGPU::GROUP_BARRIER) return true; - if (TII->isTransOnly(MI)) + // XXX: This can be removed once the packetizer properly handles all the + // LDS instruction group restrictions. + if (TII->isLDSInstr(MI->getOpcode())) return true; return false; } @@ -161,11 +172,11 @@ public: // together. bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr(); - if (getSlot(MII) <= getSlot(MIJ)) + if (getSlot(MII) <= getSlot(MIJ) && !TII->isTransOnly(MII)) return false; // Does MII and MIJ share the same pred_sel ? - int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL), - OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL); + int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel), + OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel); unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0, PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0; if (PredI != PredJ) @@ -191,15 +202,20 @@ public: bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;} void setIsLastBit(MachineInstr *MI, unsigned Bit) const { - unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST); + unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last); MI->getOperand(LastOp).setImm(Bit); } - MachineBasicBlock::iterator addToPacket(MachineInstr *MI) { + bool isBundlableWithCurrentPMI(MachineInstr *MI, + const DenseMap<unsigned, unsigned> &PV, + std::vector<R600InstrInfo::BankSwizzle> &BS, + bool &isTransSlot) { + isTransSlot = TII->isTransOnly(MI); + + // Are the Constants limitations met ? CurrentPacketMIs.push_back(MI); - bool FitsConstLimits = TII->canBundle(CurrentPacketMIs); - DEBUG( - if (!FitsConstLimits) { + if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) { + DEBUG( dbgs() << "Couldn't pack :\n"; MI->dump(); dbgs() << "with the following packets :\n"; @@ -208,14 +224,15 @@ public: dbgs() << "\n"; } dbgs() << "because of Consts read limitations\n"; - }); - const DenseMap<unsigned, unsigned> &PV = - getPreviousVector(CurrentPacketMIs.front()); - std::vector<R600InstrInfo::BankSwizzle> BS; - bool FitsReadPortLimits = - TII->fitsReadPortLimitations(CurrentPacketMIs, PV, BS); - DEBUG( - if (!FitsReadPortLimits) { + ); + CurrentPacketMIs.pop_back(); + return false; + } + + // Is there a BankSwizzle set that meet Read Port limitations ? + if (!TII->fitsReadPortLimitations(CurrentPacketMIs, + PV, BS, isTransSlot)) { + DEBUG( dbgs() << "Couldn't pack :\n"; MI->dump(); dbgs() << "with the following packets :\n"; @@ -224,25 +241,43 @@ public: dbgs() << "\n"; } dbgs() << "because of Read port limitations\n"; - }); - bool isBundlable = FitsConstLimits && FitsReadPortLimits; - if (isBundlable) { + ); + CurrentPacketMIs.pop_back(); + return false; + } + + CurrentPacketMIs.pop_back(); + return true; + } + + MachineBasicBlock::iterator addToPacket(MachineInstr *MI) { + MachineBasicBlock::iterator FirstInBundle = + CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front(); + const DenseMap<unsigned, unsigned> &PV = + getPreviousVector(FirstInBundle); + std::vector<R600InstrInfo::BankSwizzle> BS; + bool isTransSlot; + + if (isBundlableWithCurrentPMI(MI, PV, BS, isTransSlot)) { for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) { MachineInstr *MI = CurrentPacketMIs[i]; - unsigned Op = TII->getOperandIdx(MI->getOpcode(), - R600Operands::BANK_SWIZZLE); - MI->getOperand(Op).setImm(BS[i]); + unsigned Op = TII->getOperandIdx(MI->getOpcode(), + AMDGPU::OpName::bank_swizzle); + MI->getOperand(Op).setImm(BS[i]); } + unsigned Op = TII->getOperandIdx(MI->getOpcode(), + AMDGPU::OpName::bank_swizzle); + MI->getOperand(Op).setImm(BS.back()); + if (!CurrentPacketMIs.empty()) + setIsLastBit(CurrentPacketMIs.back(), 0); + substitutePV(MI, PV); + MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI); + if (isTransSlot) { + endPacket(llvm::next(It)->getParent(), llvm::next(It)); + } + return It; } - CurrentPacketMIs.pop_back(); - if (!isBundlable) { - endPacket(MI->getParent(), MI); - substitutePV(MI, getPreviousVector(MI)); - return VLIWPacketizerList::addToPacket(MI); - } - if (!CurrentPacketMIs.empty()) - setIsLastBit(CurrentPacketMIs.back(), 0); - substitutePV(MI, PV); + endPacket(MI->getParent(), MI); return VLIWPacketizerList::addToPacket(MI); } }; @@ -273,7 +308,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { MachineBasicBlock::iterator End = MBB->end(); MachineBasicBlock::iterator MI = MBB->begin(); while (MI != End) { - if (MI->isKill()) { + if (MI->isKill() || + (MI->getOpcode() == AMDGPU::CF_ALU && !MI->getOperand(8).getImm())) { MachineBasicBlock::iterator DeleteMI = MI; ++MI; MBB->erase(DeleteMI); diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td index a8b9b70..fa987cf 100644 --- a/lib/Target/R600/R600RegisterInfo.td +++ b/lib/Target/R600/R600RegisterInfo.td @@ -23,6 +23,14 @@ class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> : let HWEncoding = encoding; } +class R600Reg_64<string n, list<Register> subregs, bits<16> encoding> : + RegisterWithSubRegs<n, subregs> { + let Namespace = "AMDGPU"; + let SubRegIndices = [sub0, sub1]; + let HWEncoding = encoding; +} + + foreach Index = 0-127 in { foreach Chan = [ "X", "Y", "Z", "W" ] in { // 32-bit Temporary Registers @@ -41,16 +49,21 @@ foreach Index = 0-127 in { !cast<Register>("T"#Index#"_Z"), !cast<Register>("T"#Index#"_W")], Index>; + + def T#Index#_XY : R600Reg_64 <"T"#Index#"", + [!cast<Register>("T"#Index#"_X"), + !cast<Register>("T"#Index#"_Y")], + Index>; } // KCACHE_BANK0 foreach Index = 159-128 in { foreach Chan = [ "X", "Y", "Z", "W" ] in { // 32-bit Temporary Registers - def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#Index#"-128]."#Chan, Index, Chan>; + def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#!add(Index,-128)#"]."#Chan, Index, Chan>; } // 128-bit Temporary Registers - def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#Index#"-128].XYZW", + def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#!add(Index, -128)#"].XYZW", [!cast<Register>("KC0_"#Index#"_X"), !cast<Register>("KC0_"#Index#"_Y"), !cast<Register>("KC0_"#Index#"_Z"), @@ -62,10 +75,10 @@ foreach Index = 159-128 in { foreach Index = 191-160 in { foreach Chan = [ "X", "Y", "Z", "W" ] in { // 32-bit Temporary Registers - def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#Index#"-160]."#Chan, Index, Chan>; + def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#!add(Index,-160)#"]."#Chan, Index, Chan>; } // 128-bit Temporary Registers - def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#Index#"-160].XYZW", + def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#!add(Index, -160)#"].XYZW", [!cast<Register>("KC1_"#Index#"_X"), !cast<Register>("KC1_"#Index#"_Y"), !cast<Register>("KC1_"#Index#"_Z"), @@ -96,11 +109,13 @@ def PV_X : R600RegWithChan<"PV.X", 254, "X">; def PV_Y : R600RegWithChan<"PV.Y", 254, "Y">; def PV_Z : R600RegWithChan<"PV.Z", 254, "Z">; def PV_W : R600RegWithChan<"PV.W", 254, "W">; +def PS: R600Reg<"PS", 255>; def PREDICATE_BIT : R600Reg<"PredicateBit", 0>; def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>; def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>; def PRED_SEL_ONE : R600Reg<"Pred_sel_one", 3>; def AR_X : R600Reg<"AR.x", 0>; +def OQAP : R600Reg<"OQAP", 221>; def R600_ArrayBase : RegisterClass <"AMDGPU", [f32, i32], 32, (add (sequence "ArrayBase%u", 448, 480))>; @@ -170,7 +185,7 @@ def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add R600_ArrayBase, R600_Addr, ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF, - ALU_CONST, ALU_PARAM + ALU_CONST, ALU_PARAM, OQAP )>; def R600_Predicate : RegisterClass <"AMDGPU", [i32], 32, (add @@ -184,6 +199,9 @@ def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, let CopyCost = -1; } +def R600_Reg64 : RegisterClass<"AMDGPU", [v2f32, v2i32], 64, + (add (sequence "T%u_XY", 0, 63))>; + //===----------------------------------------------------------------------===// // Register classes for indirect addressing //===----------------------------------------------------------------------===// diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/R600/R600Schedule.td index 78a460a..df62bf8 100644 --- a/lib/Target/R600/R600Schedule.td +++ b/lib/Target/R600/R600Schedule.td @@ -23,14 +23,16 @@ def TRANS : FuncUnit; def AnyALU : InstrItinClass; def VecALU : InstrItinClass; def TransALU : InstrItinClass; +def XALU : InstrItinClass; def R600_VLIW5_Itin : ProcessorItineraries < [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL], [], [ InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>, - InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>, + InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>, InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>, + InstrItinData<XALU, [InstrStage<1, [ALU_X]>]>, InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]> ] >; @@ -40,7 +42,7 @@ def R600_VLIW4_Itin : ProcessorItineraries < [], [ InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>, - InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>, + InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>, InstrItinData<TransALU, [InstrStage<1, [ALU_NULL]>]>, InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]> ] diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp index 9791ef4..6bbdf59 100644 --- a/lib/Target/R600/SIAnnotateControlFlow.cpp +++ b/lib/Target/R600/SIAnnotateControlFlow.cpp @@ -31,13 +31,13 @@ typedef std::pair<BasicBlock *, Value *> StackEntry; typedef SmallVector<StackEntry, 16> StackVector; // Intrinsic names the control flow is annotated with -static const char *IfIntrinsic = "llvm.SI.if"; -static const char *ElseIntrinsic = "llvm.SI.else"; -static const char *BreakIntrinsic = "llvm.SI.break"; -static const char *IfBreakIntrinsic = "llvm.SI.if.break"; -static const char *ElseBreakIntrinsic = "llvm.SI.else.break"; -static const char *LoopIntrinsic = "llvm.SI.loop"; -static const char *EndCfIntrinsic = "llvm.SI.end.cf"; +static const char *const IfIntrinsic = "llvm.SI.if"; +static const char *const ElseIntrinsic = "llvm.SI.else"; +static const char *const BreakIntrinsic = "llvm.SI.break"; +static const char *const IfBreakIntrinsic = "llvm.SI.if.break"; +static const char *const ElseBreakIntrinsic = "llvm.SI.else.break"; +static const char *const LoopIntrinsic = "llvm.SI.loop"; +static const char *const EndCfIntrinsic = "llvm.SI.end.cf"; class SIAnnotateControlFlow : public FunctionPass { diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h index 716b093..147578c 100644 --- a/lib/Target/R600/SIDefines.h +++ b/lib/Target/R600/SIDefines.h @@ -12,11 +12,15 @@ #define SIDEFINES_H_ #define R_00B028_SPI_SHADER_PGM_RSRC1_PS 0x00B028 +#define R_00B02C_SPI_SHADER_PGM_RSRC2_PS 0x00B02C +#define S_00B02C_EXTRA_LDS_SIZE(x) (((x) & 0xFF) << 8) #define R_00B128_SPI_SHADER_PGM_RSRC1_VS 0x00B128 #define R_00B228_SPI_SHADER_PGM_RSRC1_GS 0x00B228 #define R_00B848_COMPUTE_PGM_RSRC1 0x00B848 #define S_00B028_VGPRS(x) (((x) & 0x3F) << 0) #define S_00B028_SGPRS(x) (((x) & 0x0F) << 6) +#define R_00B84C_COMPUTE_PGM_RSRC2 0x00B84C +#define S_00B84C_LDS_SIZE(x) (((x) & 0x1FF) << 15) #define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC #endif // SIDEFINES_H_ diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp new file mode 100644 index 0000000..435172a --- /dev/null +++ b/lib/Target/R600/SIFixSGPRCopies.cpp @@ -0,0 +1,152 @@ +//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// Copies from VGPR to SGPR registers are illegal and the register coalescer +/// will sometimes generate these illegal copies in situations like this: +/// +/// Register Class <vsrc> is the union of <vgpr> and <sgpr> +/// +/// BB0: +/// %vreg0 <sgpr> = SCALAR_INST +/// %vreg1 <vsrc> = COPY %vreg0 <sgpr> +/// ... +/// BRANCH %cond BB1, BB2 +/// BB1: +/// %vreg2 <vgpr> = VECTOR_INST +/// %vreg3 <vsrc> = COPY %vreg2 <vgpr> +/// BB2: +/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1> +/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc> +/// +/// +/// The coalescer will begin at BB0 and eliminate its copy, then the resulting +/// code will look like this: +/// +/// BB0: +/// %vreg0 <sgpr> = SCALAR_INST +/// ... +/// BRANCH %cond BB1, BB2 +/// BB1: +/// %vreg2 <vgpr> = VECTOR_INST +/// %vreg3 <vsrc> = COPY %vreg2 <vgpr> +/// BB2: +/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1> +/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr> +/// +/// Now that the result of the PHI instruction is an SGPR, the register +/// allocator is now forced to constrain the register class of %vreg3 to +/// <sgpr> so we end up with final code like this: +/// +/// BB0: +/// %vreg0 <sgpr> = SCALAR_INST +/// ... +/// BRANCH %cond BB1, BB2 +/// BB1: +/// %vreg2 <vgpr> = VECTOR_INST +/// %vreg3 <sgpr> = COPY %vreg2 <vgpr> +/// BB2: +/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1> +/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr> +/// +/// Now this code contains an illegal copy from a VGPR to an SGPR. +/// +/// In order to avoid this problem, this pass searches for PHI instructions +/// which define a <vsrc> register and constrains its definition class to +/// <vgpr> if the user of the PHI's definition register is a vector instruction. +/// If the PHI's definition class is constrained to <vgpr> then the coalescer +/// will be unable to perform the COPY removal from the above example which +/// ultimately led to the creation of an illegal COPY. +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "SIInstrInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +namespace { + +class SIFixSGPRCopies : public MachineFunctionPass { + +private: + static char ID; + const TargetRegisterClass *inferRegClass(const TargetRegisterInfo *TRI, + const MachineRegisterInfo &MRI, + unsigned Reg) const; + +public: + SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { } + + virtual bool runOnMachineFunction(MachineFunction &MF); + + const char *getPassName() const { + return "SI Fix SGPR copies"; + } + +}; + +} // End anonymous namespace + +char SIFixSGPRCopies::ID = 0; + +FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) { + return new SIFixSGPRCopies(tm); +} + +/// This functions walks the use/def chains starting with the definition of +/// \p Reg until it finds an Instruction that isn't a COPY returns +/// the register class of that instruction. +const TargetRegisterClass *SIFixSGPRCopies::inferRegClass( + const TargetRegisterInfo *TRI, + const MachineRegisterInfo &MRI, + unsigned Reg) const { + // The Reg parameter to the function must always be defined by either a PHI + // or a COPY, therefore it cannot be a physical register. + assert(TargetRegisterInfo::isVirtualRegister(Reg) && + "Reg cannot be a physical register"); + + const TargetRegisterClass *RC = MRI.getRegClass(Reg); + for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg), + E = MRI.use_end(); I != E; ++I) { + switch (I->getOpcode()) { + case AMDGPU::COPY: + RC = TRI->getCommonSubClass(RC, inferRegClass(TRI, MRI, + I->getOperand(0).getReg())); + break; + } + } + + return RC; +} + +bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); + for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); + BI != BE; ++BI) { + + MachineBasicBlock &MBB = *BI; + for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); + I != E; ++I) { + MachineInstr &MI = *I; + if (MI.getOpcode() != AMDGPU::PHI) { + continue; + } + unsigned Reg = MI.getOperand(0).getReg(); + const TargetRegisterClass *RC = inferRegClass(TRI, MRI, Reg); + if (RC == &AMDGPU::VSrc_32RegClass) { + MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass); + } + } + } + return false; +} diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index d74f401..c64027f 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -32,19 +32,23 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : AMDGPUTargetLowering(TM) { addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass); - addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); + addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass); + + addRegisterClass(MVT::v2i1, &AMDGPU::VReg_64RegClass); + addRegisterClass(MVT::v4i1, &AMDGPU::VReg_128RegClass); addRegisterClass(MVT::v16i8, &AMDGPU::SReg_128RegClass); addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass); addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass); - addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass); - addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass); + addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass); + addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass); - addRegisterClass(MVT::v1i32, &AMDGPU::VReg_32RegClass); + addRegisterClass(MVT::v1i32, &AMDGPU::VSrc_32RegClass); - addRegisterClass(MVT::v2i32, &AMDGPU::VReg_64RegClass); - addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); + addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass); + addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass); + addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass); addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); @@ -71,10 +75,18 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + setOperationAction(ISD::SETCC, MVT::v2i1, Expand); + setOperationAction(ISD::SETCC, MVT::v4i1, Expand); + setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand); + + setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); + setTargetDAGCombine(ISD::SELECT_CC); setTargetDAGCombine(ISD::SETCC); @@ -82,6 +94,18 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setSchedulingPreference(Sched::RegPressure); } +//===----------------------------------------------------------------------===// +// TargetLowering queries +//===----------------------------------------------------------------------===// + +bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT, + bool *IsFast) const { + // XXX: This depends on the address space and also we may want to revist + // the alignment values we specify in the DataLayout. + return VT.bitsGT(MVT::i32); +} + + SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, SDLoc DL, SDValue Chain, unsigned Offset) const { @@ -93,9 +117,9 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64); SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, DAG.getConstant(Offset, MVT::i64)); - return DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, Chain, Ptr, + return DAG.getLoad(VT, DL, Chain, Ptr, MachinePointerInfo(UndefValue::get(PtrTy)), - VT, false, false, ArgVT.getSizeInBits() >> 3); + false, false, false, ArgVT.getSizeInBits() >> 3); } @@ -281,12 +305,30 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MI->eraseFromParent(); break; } + case AMDGPU::V_SUB_F64: { + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64), + MI->getOperand(0).getReg()) + .addReg(MI->getOperand(1).getReg()) + .addReg(MI->getOperand(2).getReg()) + .addImm(0) /* src2 */ + .addImm(0) /* ABS */ + .addImm(0) /* CLAMP */ + .addImm(0) /* OMOD */ + .addImm(2); /* NEG */ + MI->eraseFromParent(); + break; + } } return BB; } EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { - return MVT::i1; + if (!VT.isVector()) { + return MVT::i1; + } + return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); } MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const { @@ -298,11 +340,15 @@ MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const { //===----------------------------------------------------------------------===// SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); switch (Op.getOpcode()) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG); + case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG); + case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG); case ISD::INTRINSIC_WO_CHAIN: { unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); @@ -483,6 +529,19 @@ SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op, return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi); } +SDValue SITargetLowering::LowerZERO_EXTEND(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + if (VT != MVT::i64) { + return SDValue(); + } + + return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), + DAG.getConstant(0, MVT::i32)); +} + //===----------------------------------------------------------------------===// // Custom DAG optimizations //===----------------------------------------------------------------------===// @@ -614,43 +673,67 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate, return false; } +const TargetRegisterClass *SITargetLowering::getRegClassForNode( + SelectionDAG &DAG, const SDValue &Op) const { + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); + const SIRegisterInfo &TRI = TII->getRegisterInfo(); + + if (!Op->isMachineOpcode()) { + switch(Op->getOpcode()) { + case ISD::CopyFromReg: { + MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); + unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + return MRI.getRegClass(Reg); + } + return TRI.getPhysRegClass(Reg); + } + default: return NULL; + } + } + const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode()); + int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass; + if (OpClassID != -1) { + return TRI.getRegClass(OpClassID); + } + switch(Op.getMachineOpcode()) { + case AMDGPU::COPY_TO_REGCLASS: + // Operand 1 is the register class id for COPY_TO_REGCLASS instructions. + OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); + + // If the COPY_TO_REGCLASS instruction is copying to a VSrc register + // class, then the register class for the value could be either a + // VReg or and SReg. In order to get a more accurate + if (OpClassID == AMDGPU::VSrc_32RegClassID || + OpClassID == AMDGPU::VSrc_64RegClassID) { + return getRegClassForNode(DAG, Op.getOperand(0)); + } + return TRI.getRegClass(OpClassID); + case AMDGPU::EXTRACT_SUBREG: { + int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); + const TargetRegisterClass *SuperClass = + getRegClassForNode(DAG, Op.getOperand(0)); + return TRI.getSubClassWithSubReg(SuperClass, SubIdx); + } + case AMDGPU::REG_SEQUENCE: + // Operand 0 is the register class id for REG_SEQUENCE instructions. + return TRI.getRegClass( + cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()); + default: + return getRegClassFor(Op.getSimpleValueType()); + } +} + /// \brief Does "Op" fit into register class "RegClass" ? bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op, unsigned RegClass) const { - - MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); - SDNode *Node = Op.getNode(); - - const TargetRegisterClass *OpClass; const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); - if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) { - const SIInstrInfo *TII = - static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); - const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode()); - int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass; - if (OpClassID == -1) { - switch (MN->getMachineOpcode()) { - case AMDGPU::REG_SEQUENCE: - // Operand 0 is the register class id for REG_SEQUENCE instructions. - OpClass = TRI->getRegClass( - cast<ConstantSDNode>(MN->getOperand(0))->getZExtValue()); - break; - default: - OpClass = getRegClassFor(Op.getSimpleValueType()); - break; - } - } else { - OpClass = TRI->getRegClass(OpClassID); - } - - } else if (Node->getOpcode() == ISD::CopyFromReg) { - RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode()); - OpClass = MRI.getRegClass(Reg->getReg()); - - } else + const TargetRegisterClass *RC = getRegClassForNode(DAG, Op); + if (!RC) { return false; - - return TRI->getRegClass(RegClass)->hasSubClassEq(OpClass); + } + return TRI->getRegClass(RegClass)->hasSubClassEq(RC); } /// \brief Make sure that we don't exeed the number of allowed scalars @@ -959,20 +1042,6 @@ MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N, switch (N->getMachineOpcode()) { default: return N; - case AMDGPU::REG_SEQUENCE: { - // MVT::i128 only use SGPRs, so i128 REG_SEQUENCEs don't need to be - // rewritten. - if (N->getValueType(0) == MVT::i128) { - return N; - } - const SDValue Ops[] = { - DAG.getTargetConstant(AMDGPU::VReg_64RegClassID, MVT::i32), - N->getOperand(1) , N->getOperand(2), - N->getOperand(3), N->getOperand(4) - }; - return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, Ops); - } - case AMDGPU::S_LOAD_DWORD_IMM: NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64; // Fall-through diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h index 78ae6a1..b4202c4 100644 --- a/lib/Target/R600/SIISelLowering.h +++ b/lib/Target/R600/SIISelLowering.h @@ -25,10 +25,13 @@ class SITargetLowering : public AMDGPUTargetLowering { SDValue Chain, unsigned Offset) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; bool foldImm(SDValue &Operand, int32_t &Immediate, bool &ScalarSlotUsed) const; + const TargetRegisterClass *getRegClassForNode(SelectionDAG &DAG, + const SDValue &Op) const; bool fitsRegClass(SelectionDAG &DAG, const SDValue &Op, unsigned RegClass) const; void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, @@ -40,6 +43,7 @@ class SITargetLowering : public AMDGPUTargetLowering { public: SITargetLowering(TargetMachine &tm); + bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp index c36e1dc..ba202e3 100644 --- a/lib/Target/R600/SIInsertWaits.cpp +++ b/lib/Target/R600/SIInsertWaits.cpp @@ -98,7 +98,8 @@ public: SIInsertWaits(TargetMachine &tm) : MachineFunctionPass(ID), TII(0), - TRI(0) { } + TRI(0), + ExpInstrTypesSeen(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); @@ -134,6 +135,8 @@ Counters SIInsertWaits::getHwCounts(MachineInstr &MI) { if (TSFlags & SIInstrFlags::LGKM_CNT) { MachineOperand &Op = MI.getOperand(0); + if (!Op.isReg()) + Op = MI.getOperand(1); assert(Op.isReg() && "First LGKM operand must be a register!"); unsigned Reg = Op.getReg(); diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td index 51f323d..434aa7e 100644 --- a/lib/Target/R600/SIInstrFormats.td +++ b/lib/Target/R600/SIInstrFormats.td @@ -281,6 +281,30 @@ class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> : let Uses = [EXEC] in { +class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> : + Enc64 <outs, ins, asm, pattern> { + + bits<8> vdst; + bits<1> gds; + bits<8> addr; + bits<8> data0; + bits<8> data1; + bits<8> offset0; + bits<8> offset1; + + let Inst{7-0} = offset0; + let Inst{15-8} = offset1; + let Inst{17} = gds; + let Inst{25-18} = op; + let Inst{31-26} = 0x36; //encoding + let Inst{39-32} = addr; + let Inst{47-40} = data0; + let Inst{55-48} = data1; + let Inst{63-56} = vdst; + + let LGKM_CNT = 1; +} + class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> : Enc64<outs, ins, asm, pattern> { diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index cb582a6..551ae86 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -42,27 +42,27 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, // never be necessary. assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); - const int16_t Sub0_15[] = { + static const int16_t Sub0_15[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0 }; - const int16_t Sub0_7[] = { + static const int16_t Sub0_7[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0 }; - const int16_t Sub0_3[] = { + static const int16_t Sub0_3[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0 }; - const int16_t Sub0_2[] = { + static const int16_t Sub0_2[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0 }; - const int16_t Sub0_1[] = { + static const int16_t Sub0_1[] = { AMDGPU::sub0, AMDGPU::sub1, 0 }; diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td index 42fa95f..52af79c 100644 --- a/lib/Target/R600/SIInstrInfo.td +++ b/lib/Target/R600/SIInstrInfo.td @@ -21,11 +21,21 @@ def LO32 : SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32); }]>; +def LO32f : SDNodeXForm<fpimm, [{ + APInt V = N->getValueAPF().bitcastToAPInt().trunc(32); + return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32); +}]>; + // Transformation function, extract the upper 32bit of a 64bit immediate def HI32 : SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32); }]>; +def HI32f : SDNodeXForm<fpimm, [{ + APInt V = N->getValueAPF().bitcastToAPInt().lshr(32).trunc(32); + return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32); +}]>; + def IMM8bitDWORD : ImmLeaf < i32, [{ return (Imm & ~0x3FC) == 0; @@ -44,7 +54,8 @@ def IMM12bit : PatLeaf <(imm), >; class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{ - return (*(const SITargetLowering *)TLI).analyzeImmediate(N) == 0; + return + (*(const SITargetLowering *)getTargetLowering()).analyzeImmediate(N) == 0; }]>; //===----------------------------------------------------------------------===// @@ -286,6 +297,29 @@ class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 < // Vector I/O classes //===----------------------------------------------------------------------===// +class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS < + op, + (outs regClass:$vdst), + (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1, + i8imm:$offset0, i8imm:$offset1), + asm#" $vdst, $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]", + []> { + let mayLoad = 1; + let mayStore = 0; +} + +class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS < + op, + (outs), + (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1, + i8imm:$offset0, i8imm:$offset1), + asm#" $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]", + []> { + let mayStore = 1; + let mayLoad = 0; + let vdst = 0; +} + class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF < op, (outs), diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index e8ed2dd..500d15e 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -176,19 +176,19 @@ defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">; } // End hasSideEffects = 1, Defs = [EXEC] defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">; -defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64">; -defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64">; -defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64">; -defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64">; +defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_LT>; +defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", f64, COND_EQ>; +defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", f64, COND_LE>; +defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", f64, COND_GT>; defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64">; -defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64">; +defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", f64, COND_GE>; defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64">; defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64">; defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64">; defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64">; defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64">; defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64">; -defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64">; +defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_NE>; defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">; defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">; @@ -391,6 +391,9 @@ defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">; } // End isCompare = 1 +def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "DS_WRITE_B32", VReg_32>; +def DS_READ_B32 : DS_Load_Helper <0x00000036, "DS_READ_B32", VReg_32>; + //def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>; //def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>; //def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>; @@ -400,9 +403,9 @@ defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMA //def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>; //def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>; defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>; -//def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>; -//def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>; -//def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>; +defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <0x00000009, "BUFFER_LOAD_SBYTE", VReg_32>; +defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <0x0000000a, "BUFFER_LOAD_USHORT", VReg_32>; +defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32>; defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>; defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>; defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>; @@ -535,7 +538,7 @@ def IMAGE_SAMPLE_B : MIMG_Sampler_Helper <0x00000025, "IMAGE_SAMPLE_B">; //def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>; def IMAGE_SAMPLE_C : MIMG_Sampler_Helper <0x00000028, "IMAGE_SAMPLE_C">; //def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>; -//def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>; +def IMAGE_SAMPLE_C_D : MIMG_Sampler_Helper <0x0000002a, "IMAGE_SAMPLE_C_D">; //def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>; def IMAGE_SAMPLE_C_L : MIMG_Sampler_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">; def IMAGE_SAMPLE_C_B : MIMG_Sampler_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">; @@ -660,12 +663,18 @@ defm V_RSQ_LEGACY_F32 : VOP1_32 < [(set f32:$dst, (int_AMDGPU_rsq f32:$src0))] >; defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>; -defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", []>; +defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", + [(set f64:$dst, (fdiv FP_ONE, f64:$src0))] +>; defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>; defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", []>; defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", []>; -defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", []>; -defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", []>; +defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", + [(set f32:$dst, (fsqrt f32:$src0))] +>; +defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", + [(set f64:$dst, (fsqrt f64:$src0))] +>; defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>; defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>; defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>; @@ -771,8 +780,17 @@ def S_CBRANCH_EXECNZ : SOPP < } // End isBranch = 1 } // End isTerminator = 1 -//def S_BARRIER : SOPP_ <0x0000000a, "S_BARRIER", []>; let hasSideEffects = 1 in { +def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER", + [(int_AMDGPU_barrier_local)] +> { + let SIMM16 = 0; + let isBarrier = 1; + let hasCtrlDep = 1; + let mayLoad = 1; + let mayStore = 1; +} + def S_WAITCNT : SOPP <0x0000000c, (ins i32imm:$simm16), "S_WAITCNT $simm16", [] >; @@ -809,6 +827,18 @@ def : Pat < (V_CNDMASK_B32_e64 $src0, $src1, $src2) >; +//use two V_CNDMASK_B32_e64 instructions for f64 +def : Pat < + (f64 (select i1:$src2, f64:$src1, f64:$src0)), + (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)), + (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub0), + (EXTRACT_SUBREG $src1, sub0), + $src2), sub0), + (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub1), + (EXTRACT_SUBREG $src1, sub1), + $src2), sub1) +>; + defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>; defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>; @@ -836,14 +866,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", [(set f32:$dst, (fmul f32:$src0, f32:$src1))] >; -} // End isCommutable = 1 -//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>; +defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", + [(set i32:$dst, (mul I24:$src0, I24:$src1))] +>; //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>; -//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>; +defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", + [(set i32:$dst, (mul U24:$src0, U24:$src1))] +>; //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>; -let isCommutable = 1 in { defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32", [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))] @@ -900,8 +932,8 @@ defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>; defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>; defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>; //defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>; -//defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>; -//defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>; +defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>; +defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>; let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", @@ -951,8 +983,12 @@ let neverHasSideEffects = 1 in { def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>; def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>; -//def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", []>; -//def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", []>; +def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", + [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))] +>; +def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", + [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))] +>; } // End neverHasSideEffects def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>; @@ -994,12 +1030,29 @@ def V_LSHL_B64 : VOP3_64_Shift <0x00000161, "V_LSHL_B64", def V_LSHR_B64 : VOP3_64_Shift <0x00000162, "V_LSHR_B64", [(set i64:$dst, (srl i64:$src0, i32:$src1))] >; -def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64", []>; +def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64", + [(set i64:$dst, (sra i64:$src0, i32:$src1))] +>; + +let isCommutable = 1 in { def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>; def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>; def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>; def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>; + +} // isCommutable = 1 + +def : Pat < + (fadd f64:$src0, f64:$src1), + (V_ADD_F64 $src0, $src1, (i64 0)) +>; + +def : Pat < + (fmul f64:$src0, f64:$src1), + (V_MUL_F64 $src0, $src1, (i64 0)) +>; + def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>; let isCommutable = 1 in { @@ -1203,17 +1256,23 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>; } // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0] -// This psuedo instruction takes a pointer as input and outputs a resource -// constant that can be used with the ADDR64 MUBUF instructions. - let usesCustomInserter = 1 in { +// This psuedo instruction takes a pointer as input and outputs a resource +// constant that can be used with the ADDR64 MUBUF instructions. def SI_ADDR64_RSRC : InstSI < (outs SReg_128:$srsrc), (ins SReg_64:$ptr), "", [] >; +def V_SUB_F64 : InstSI < + (outs VReg_64:$dst), + (ins VReg_64:$src0, VReg_64:$src1), + "V_SUB_F64 $dst, $src0, $src1", + [] +>; + } // end usesCustomInserter } // end IsCodeGenOnly, isPseudo @@ -1242,6 +1301,11 @@ def : Pat < $src0, $src1, $src2, $src3) >; +def : Pat < + (f64 (fsub f64:$src0, f64:$src1)), + (V_SUB_F64 $src0, $src1) +>; + /********** ======================= **********/ /********** Image sampling patterns **********/ /********** ======================= **********/ @@ -1296,6 +1360,11 @@ multiclass SamplePatterns<ValueType addr_type> { def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>; def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>; def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>; + + def : SamplePattern <int_SI_sampled, IMAGE_SAMPLE_D, addr_type>; + def : SampleArrayPattern <int_SI_sampled, IMAGE_SAMPLE_D, addr_type>; + def : SampleShadowPattern <int_SI_sampled, IMAGE_SAMPLE_C_D, addr_type>; + def : SampleShadowArrayPattern <int_SI_sampled, IMAGE_SAMPLE_C_D, addr_type>; } defm : SamplePatterns<v2i32>; @@ -1417,6 +1486,16 @@ def : BitConvert <i32, f32, VReg_32>; def : BitConvert <f32, i32, SReg_32>; def : BitConvert <f32, i32, VReg_32>; +def : BitConvert <i64, f64, VReg_64>; + +def : BitConvert <f64, i64, VReg_64>; + +def : BitConvert <v2f32, v2i32, VReg_64>; +def : BitConvert <v2i32, v2f32, VReg_64>; + +def : BitConvert <v4f32, v4i32, VReg_128>; +def : BitConvert <v4i32, v4f32, VReg_128>; + /********** =================== **********/ /********** Src & Dst modifiers **********/ /********** =================== **********/ @@ -1471,6 +1550,13 @@ def : Pat < (S_MOV_B32 (i32 (HI32 imm:$imm))), sub1) >; +def : Pat < + (f64 fpimm:$imm), + (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)), + (V_MOV_B32_e32 (f32 (LO32f fpimm:$imm))), sub0), + (V_MOV_B32_e32 (f32 (HI32f fpimm:$imm))), sub1) +>; + /********** ===================== **********/ /********** Interpolation Paterns **********/ /********** ===================== **********/ @@ -1505,6 +1591,11 @@ def : Pat< (V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1)) >; +def : Pat< + (fdiv f64:$src0, f64:$src1), + (V_MUL_F64 $src0, (V_RCP_F64_e32 $src1), (i64 0)) +>; + def : Pat < (fcos f32:$src0), (V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) @@ -1567,6 +1658,12 @@ def : Pat < (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0)))) >; +def : Pat < + (int_SI_tid), + (V_MBCNT_HI_U32_B32_e32 0xffffffff, + (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0, 0, 0)) +>; + /********** ================== **********/ /********** VOP3 Patterns **********/ /********** ================== **********/ @@ -1576,6 +1673,21 @@ def : Pat < (V_MAD_F32 $src0, $src1, $src2) >; +/********** ======================= **********/ +/********** Load/Store Patterns **********/ +/********** ======================= **********/ + +def : Pat < + (local_load i64:$src0), + (i32 (DS_READ_B32 0, (EXTRACT_SUBREG $src0, sub0), + (EXTRACT_SUBREG $src0, sub0), (EXTRACT_SUBREG $src0, sub0), 0, 0)) +>; + +def : Pat < + (local_store i32:$src1, i64:$src0), + (DS_WRITE_B32 0, (EXTRACT_SUBREG $src0, sub0), $src1, $src1, 0, 0) +>; + /********** ================== **********/ /********** SMRD Patterns **********/ /********** ================== **********/ @@ -1604,6 +1716,7 @@ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> { defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>; defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>; defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>; +defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>; defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v16i8>; defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>; @@ -1634,10 +1747,24 @@ multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt, >; } +defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, + sextloadi8_global, sextloadi8_constant>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, + az_extloadi8_global, az_extloadi8_constant>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, + sextloadi16_global, sextloadi16_constant>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, + az_extloadi16_global, az_extloadi16_constant>; defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, global_load, constant_load>; -defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, - zextloadi8_global, zextloadi8_constant>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64, + global_load, constant_load>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64, + az_extloadi32_global, az_extloadi32_constant>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32, + global_load, constant_load>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32, + global_load, constant_load>; multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt> { @@ -1654,6 +1781,7 @@ multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt> { defm : MUBUFStore_Pattern <BUFFER_STORE_DWORD, i32>; defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, i64>; +defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, v2i32>; defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4, v4i32>; /********** ====================== **********/ @@ -1664,25 +1792,25 @@ multiclass SI_INDIRECT_Pattern <ValueType vt, SI_INDIRECT_DST IndDst> { // 1. Extract with offset def : Pat< - (vector_extract vt:$vec, (i64 (zext (add i32:$idx, imm:$off)))), + (vector_extract vt:$vec, (add i32:$idx, imm:$off)), (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off)) >; // 2. Extract without offset def : Pat< - (vector_extract vt:$vec, (i64 (zext i32:$idx))), + (vector_extract vt:$vec, i32:$idx), (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0)) >; // 3. Insert with offset def : Pat< - (vector_insert vt:$vec, f32:$val, (i64 (zext (add i32:$idx, imm:$off)))), + (vector_insert vt:$vec, f32:$val, (add i32:$idx, imm:$off)), (IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val) >; // 4. Insert without offset def : Pat< - (vector_insert vt:$vec, f32:$val, (i64 (zext i32:$idx))), + (vector_insert vt:$vec, f32:$val, i32:$idx), (IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val) >; } diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td index 224cd2f..2fa073e 100644 --- a/lib/Target/R600/SIIntrinsics.td +++ b/lib/Target/R600/SIIntrinsics.td @@ -14,6 +14,7 @@ let TargetPrefix = "SI", isTarget = 1 in { + def int_SI_tid : Intrinsic <[llvm_i32_ty], [], [IntrNoMem]>; def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>; def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>; @@ -23,6 +24,7 @@ let TargetPrefix = "SI", isTarget = 1 in { def int_SI_sample : Sample; def int_SI_sampleb : Sample; + def int_SI_sampled : Sample; def int_SI_samplel : Sample; def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>; diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp index 5b434fb..c2e8f02 100644 --- a/lib/Target/R600/SILowerControlFlow.cpp +++ b/lib/Target/R600/SILowerControlFlow.cpp @@ -411,6 +411,7 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { TRI = MF.getTarget().getRegisterInfo(); bool HaveKill = false; + bool NeedM0 = false; bool NeedWQM = false; unsigned Depth = 0; @@ -482,6 +483,13 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { IndirectDst(MI); break; + case AMDGPU::DS_READ_B32: + NeedWQM = true; + // Fall through + case AMDGPU::DS_WRITE_B32: + NeedM0 = true; + break; + case AMDGPU::V_INTERP_P1_F32: case AMDGPU::V_INTERP_P2_F32: case AMDGPU::V_INTERP_MOV_F32: @@ -492,6 +500,14 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { } } + if (NeedM0) { + MachineBasicBlock &MBB = MF.front(); + // Initialize M0 to a value that won't cause LDS access to be discarded + // due to offset clamping + BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32), + AMDGPU::M0).addImm(0xffffffff); + } + if (NeedWQM) { MachineBasicBlock &MBB = MF.front(); BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp index ddfc54e..50fd4c7 100644 --- a/lib/Target/R600/SIRegisterInfo.cpp +++ b/lib/Target/R600/SIRegisterInfo.cpp @@ -49,3 +49,24 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass( case MVT::i32: return &AMDGPU::VReg_32RegClass; } } + +const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const { + assert(!TargetRegisterInfo::isVirtualRegister(Reg)); + + const TargetRegisterClass *BaseClasses[] = { + &AMDGPU::VReg_32RegClass, + &AMDGPU::SReg_32RegClass, + &AMDGPU::VReg_64RegClass, + &AMDGPU::SReg_64RegClass, + &AMDGPU::SReg_128RegClass, + &AMDGPU::SReg_256RegClass + }; + + for (unsigned i = 0, e = sizeof(BaseClasses) / + sizeof(const TargetRegisterClass*); i != e; ++i) { + if (BaseClasses[i]->contains(Reg)) { + return BaseClasses[i]; + } + } + return NULL; +} diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h index c322f94..d0df4f9 100644 --- a/lib/Target/R600/SIRegisterInfo.h +++ b/lib/Target/R600/SIRegisterInfo.h @@ -41,6 +41,10 @@ struct SIRegisterInfo : public AMDGPURegisterInfo { /// \brief get the register class of the specified type to use in the /// CFGStructurizer virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const; + + /// \brief Return the 'base' register class for this register. + /// e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR1 -> SReg_32, etc. + const TargetRegisterClass *getPhysRegClass(unsigned Reg) const; }; } // End namespace llvm diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td index 244d4c0..292b9d2 100644 --- a/lib/Target/R600/SIRegisterInfo.td +++ b/lib/Target/R600/SIRegisterInfo.td @@ -153,7 +153,7 @@ def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32, (add SGPR_32, M0Reg) >; -def SReg_64 : RegisterClass<"AMDGPU", [i64, i1], 64, +def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64, (add SGPR_64, VCCReg, EXECReg) >; diff --git a/lib/Target/Sparc/CMakeLists.txt b/lib/Target/Sparc/CMakeLists.txt index 0ab7a1c..acf7496 100644 --- a/lib/Target/Sparc/CMakeLists.txt +++ b/lib/Target/Sparc/CMakeLists.txt @@ -22,7 +22,7 @@ add_llvm_target(SparcCodeGen SparcSelectionDAGInfo.cpp ) -add_dependencies(LLVMSparcCodeGen intrinsics_gen) +add_dependencies(LLVMSparcCodeGen SparcCommonTableGen intrinsics_gen) add_subdirectory(TargetInfo) add_subdirectory(MCTargetDesc) diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp index b93f5e4..b101751 100644 --- a/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/lib/Target/Sparc/DelaySlotFiller.cpp @@ -137,7 +137,7 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { MachineBasicBlock::iterator J = MI; ++J; // skip the delay filler. assert (J != MBB.end() && "MI needs a delay instruction."); - BuildMI(MBB, ++J, I->getDebugLoc(), + BuildMI(MBB, ++J, MI->getDebugLoc(), TII->get(SP::UNIMP)).addImm(structSize); } } diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp index b538d5c..3fe2b44 100644 --- a/lib/Target/Sparc/SparcAsmPrinter.cpp +++ b/lib/Target/Sparc/SparcAsmPrinter.cpp @@ -63,8 +63,6 @@ namespace { virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const; - - virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const; }; } // end of anonymous namespace @@ -266,15 +264,6 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { return I == Pred->end() || !I->isBarrier(); } -MachineLocation SparcAsmPrinter:: -getDebugValueLocation(const MachineInstr *MI) const { - assert(MI->getNumOperands() == 4 && "Invalid number of operands!"); - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() && - "Unexpected MachineOperand types"); - return MachineLocation(MI->getOperand(0).getReg(), - MI->getOperand(1).getImm()); -} - // Force static initialization. extern "C" void LLVMInitializeSparcAsmPrinter() { RegisterAsmPrinter<SparcAsmPrinter> X(TheSparcTarget); diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp index 7e91bc3..536e466 100644 --- a/lib/Target/Sparc/SparcFrameLowering.cpp +++ b/lib/Target/Sparc/SparcFrameLowering.cpp @@ -188,6 +188,17 @@ void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const { MRI.setPhysRegUnused(reg); } + // Rewrite MBB's Live-ins. + for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); + MBB != E; ++MBB) { + for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) { + if (!MBB->isLiveIn(reg)) + continue; + MBB->removeLiveIn(reg); + MBB->addLiveIn(reg - SP::I0 + SP::O0); + } + } + assert(verifyLeafProcRegUse(&MRI)); #ifdef XDEBUG MF.verify(0, "After LeafProc Remapping"); diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index e85cf74..db62151 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -33,7 +33,7 @@ class SparcDAGToDAGISel : public SelectionDAGISel { /// Subtarget - Keep a pointer to the Sparc Subtarget around so that we can /// make the right decision when generating code for different targets. const SparcSubtarget &Subtarget; - SparcTargetMachine& TM; + SparcTargetMachine &TM; public: explicit SparcDAGToDAGISel(SparcTargetMachine &tm) : SelectionDAGISel(tm), @@ -67,13 +67,15 @@ private: SDNode* SparcDAGToDAGISel::getGlobalBaseReg() { unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF); - return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); + return CurDAG->getRegister(GlobalBaseReg, + getTargetLowering()->getPointerTy()).getNode(); } bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base, SDValue &Offset) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { - Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), TLI->getPointerTy()); + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), + getTargetLowering()->getPointerTy()); Offset = CurDAG->getTargetConstant(0, MVT::i32); return true; } @@ -88,7 +90,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr, dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) { // Constant offset from frame ref. Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), - TLI->getPointerTy()); + getTargetLowering()->getPointerTy()); } else { Base = Addr.getOperand(0); } @@ -131,7 +133,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) { } R1 = Addr; - R2 = CurDAG->getRegister(SP::G0, TLI->getPointerTy()); + R2 = CurDAG->getRegister(SP::G0, getTargetLowering()->getPointerTy()); return true; } diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 1d765f2..4b0fa67 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -654,9 +654,9 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -1722,20 +1722,22 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { return FrameAddr; } -static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { - MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); +static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, + const SparcTargetLowering &TLI) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); MFI->setReturnAddressIsTaken(true); EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned RetReg = SP::I7; - uint64_t depth = Op.getConstantOperandVal(0); SDValue RetAddr; - if (depth == 0) + if (depth == 0) { + unsigned RetReg = MF.addLiveIn(SP::I7, + TLI.getRegClassFor(TLI.getPointerTy())); RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); - else { + } else { // Need frame address to find return address of the caller. MFI->setFrameAddressIsTaken(true); @@ -1793,7 +1795,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::FNEG: case ISD::FABS: return LowerF64Op(Op, DAG); - case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for Sparc."); @@ -1906,7 +1908,7 @@ SparcTargetLowering::getConstraintType(const std::string &Constraint) const { std::pair<unsigned, const TargetRegisterClass*> SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h index 7137171..261c25a 100644 --- a/lib/Target/Sparc/SparcISelLowering.h +++ b/lib/Target/Sparc/SparcISelLowering.h @@ -68,7 +68,7 @@ namespace llvm { ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> - getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; + getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; } diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index 626bc40..6c14bc9 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -17,7 +17,9 @@ #include "SparcSubtarget.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TargetRegistry.h" @@ -114,18 +116,6 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC) llvm_unreachable("Invalid cond code"); } -MachineInstr * -SparcInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc dl) const { - MachineInstrBuilder MIB = BuildMI(MF, dl, get(SP::DBG_VALUE)) - .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - - bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, @@ -322,19 +312,27 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = *MF->getFrameInfo(); + MachineMemOperand *MMO = + MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), + MachineMemOperand::MOStore, + MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + // On the order of operands here: think "[FrameIdx + 0] = SrcReg". if (RC == &SP::I64RegsRegClass) BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0) - .addReg(SrcReg, getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); else if (RC == &SP::IntRegsRegClass) BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0) - .addReg(SrcReg, getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); else if (RC == &SP::FPRegsRegClass) BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0) - .addReg(SrcReg, getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); else if (RC == &SP::DFPRegsRegClass) BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0) - .addReg(SrcReg, getKillRegState(isKill)); + .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); else llvm_unreachable("Can't store this register to stack slot"); } @@ -347,14 +345,26 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = *MF->getFrameInfo(); + MachineMemOperand *MMO = + MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), + MachineMemOperand::MOLoad, + MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + if (RC == &SP::I64RegsRegClass) - BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0); + BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); else if (RC == &SP::IntRegsRegClass) - BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0); + BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); else if (RC == &SP::FPRegsRegClass) - BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0); + BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); else if (RC == &SP::DFPRegsRegClass) - BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0); + BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); else llvm_unreachable("Can't load this register from stack slot"); } diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h index a0a0ffd8..d0b220b 100644 --- a/lib/Target/Sparc/SparcInstrInfo.h +++ b/lib/Target/Sparc/SparcInstrInfo.h @@ -62,14 +62,6 @@ public: virtual unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const; - /// emitFrameIndexDebugValue - Emit a target-dependent form of - /// DBG_VALUE encoding the address of a frame index. - virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc dl) const; - virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond, diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index 7c28abd..58af2c4 100644 --- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "MCTargetDesc/SystemZMCTargetDesc.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" @@ -29,19 +30,25 @@ static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { } namespace { +enum RegisterKind { + GR32Reg, + GR64Reg, + GR128Reg, + ADDR32Reg, + ADDR64Reg, + FP32Reg, + FP64Reg, + FP128Reg +}; + +enum MemoryKind { + BDMem, + BDXMem, + BDLMem +}; + class SystemZOperand : public MCParsedAsmOperand { public: - enum RegisterKind { - GR32Reg, - GR64Reg, - GR128Reg, - ADDR32Reg, - ADDR64Reg, - FP32Reg, - FP64Reg, - FP128Reg - }; - private: enum OperandKind { KindInvalid, @@ -77,12 +84,15 @@ private: // Base + Disp + Index, where Base and Index are LLVM registers or 0. // RegKind says what type the registers have (ADDR32Reg or ADDR64Reg). + // Length is the operand length for D(L,B)-style operands, otherwise + // it is null. struct MemOp { unsigned Base : 8; unsigned Index : 8; unsigned RegKind : 8; unsigned Unused : 8; const MCExpr *Disp; + const MCExpr *Length; }; union { @@ -139,12 +149,14 @@ public: } static SystemZOperand *createMem(RegisterKind RegKind, unsigned Base, const MCExpr *Disp, unsigned Index, - SMLoc StartLoc, SMLoc EndLoc) { + const MCExpr *Length, SMLoc StartLoc, + SMLoc EndLoc) { SystemZOperand *Op = new SystemZOperand(KindMem, StartLoc, EndLoc); Op->Mem.RegKind = RegKind; Op->Mem.Base = Base; Op->Mem.Index = Index; Op->Mem.Disp = Disp; + Op->Mem.Length = Length; return Op; } @@ -191,16 +203,20 @@ public: virtual bool isMem() const LLVM_OVERRIDE { return Kind == KindMem; } - bool isMem(RegisterKind RegKind, bool HasIndex) const { + bool isMem(RegisterKind RegKind, MemoryKind MemKind) const { return (Kind == KindMem && Mem.RegKind == RegKind && - (HasIndex || !Mem.Index)); + (MemKind == BDXMem || !Mem.Index) && + (MemKind == BDLMem) == (Mem.Length != 0)); + } + bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const { + return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff); } - bool isMemDisp12(RegisterKind RegKind, bool HasIndex) const { - return isMem(RegKind, HasIndex) && inRange(Mem.Disp, 0, 0xfff); + bool isMemDisp20(RegisterKind RegKind, MemoryKind MemKind) const { + return isMem(RegKind, MemKind) && inRange(Mem.Disp, -524288, 524287); } - bool isMemDisp20(RegisterKind RegKind, bool HasIndex) const { - return isMem(RegKind, HasIndex) && inRange(Mem.Disp, -524288, 524287); + bool isMemDisp12Len8(RegisterKind RegKind) const { + return isMemDisp12(RegKind, BDLMem) && inRange(Mem.Length, 1, 0x100); } // Override MCParsedAsmOperand. @@ -236,6 +252,13 @@ public: addExpr(Inst, Mem.Disp); Inst.addOperand(MCOperand::CreateReg(Mem.Index)); } + void addBDLAddrOperands(MCInst &Inst, unsigned N) const { + assert(N == 3 && "Invalid number of operands"); + assert(Kind == KindMem && "Invalid operand type"); + Inst.addOperand(MCOperand::CreateReg(Mem.Base)); + addExpr(Inst, Mem.Disp); + addExpr(Inst, Mem.Length); + } // Used by the TableGen code to check for particular operand types. bool isGR32() const { return isReg(GR32Reg); } @@ -247,12 +270,13 @@ public: bool isFP32() const { return isReg(FP32Reg); } bool isFP64() const { return isReg(FP64Reg); } bool isFP128() const { return isReg(FP128Reg); } - bool isBDAddr32Disp12() const { return isMemDisp12(ADDR32Reg, false); } - bool isBDAddr32Disp20() const { return isMemDisp20(ADDR32Reg, false); } - bool isBDAddr64Disp12() const { return isMemDisp12(ADDR64Reg, false); } - bool isBDAddr64Disp20() const { return isMemDisp20(ADDR64Reg, false); } - bool isBDXAddr64Disp12() const { return isMemDisp12(ADDR64Reg, true); } - bool isBDXAddr64Disp20() const { return isMemDisp20(ADDR64Reg, true); } + bool isBDAddr32Disp12() const { return isMemDisp12(ADDR32Reg, BDMem); } + bool isBDAddr32Disp20() const { return isMemDisp20(ADDR32Reg, BDMem); } + bool isBDAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDMem); } + bool isBDAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDMem); } + bool isBDXAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDXMem); } + bool isBDXAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDXMem); } + bool isBDLAddr64Disp12Len8() const { return isMemDisp12Len8(ADDR64Reg); } bool isU4Imm() const { return isImm(0, 15); } bool isU6Imm() const { return isImm(0, 63); } bool isU8Imm() const { return isImm(0, 255); } @@ -288,19 +312,16 @@ private: OperandMatchResultTy parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands, - RegisterGroup Group, const unsigned *Regs, - SystemZOperand::RegisterKind Kind, - bool IsAddress = false); + RegisterGroup Group, const unsigned *Regs, RegisterKind Kind); bool parseAddress(unsigned &Base, const MCExpr *&Disp, - unsigned &Index, const unsigned *Regs, - SystemZOperand::RegisterKind RegKind, - bool HasIndex); + unsigned &Index, const MCExpr *&Length, + const unsigned *Regs, RegisterKind RegKind); OperandMatchResultTy parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands, - const unsigned *Regs, SystemZOperand::RegisterKind RegKind, - bool HasIndex); + const unsigned *Regs, RegisterKind RegKind, + MemoryKind MemKind); bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Mnemonic); @@ -331,28 +352,23 @@ public: // Used by the TableGen code to parse particular operand types. OperandMatchResultTy parseGR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, - SystemZOperand::GR32Reg); + return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, GR32Reg); } OperandMatchResultTy parseGR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, - SystemZOperand::GR64Reg); + return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, GR64Reg); } OperandMatchResultTy parseGR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, - SystemZOperand::GR128Reg); + return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, GR128Reg); } OperandMatchResultTy parseADDR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, - SystemZOperand::ADDR32Reg, true); + return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, ADDR32Reg); } OperandMatchResultTy parseADDR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, - SystemZOperand::ADDR64Reg, true); + return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, ADDR64Reg); } OperandMatchResultTy parseADDR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { @@ -360,33 +376,31 @@ public: } OperandMatchResultTy parseFP32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, - SystemZOperand::FP32Reg); + return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, FP32Reg); } OperandMatchResultTy parseFP64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, - SystemZOperand::FP64Reg); + return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, FP64Reg); } OperandMatchResultTy parseFP128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, - SystemZOperand::FP128Reg); + return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, FP128Reg); } OperandMatchResultTy parseBDAddr32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseAddress(Operands, SystemZMC::GR32Regs, - SystemZOperand::ADDR32Reg, false); + return parseAddress(Operands, SystemZMC::GR32Regs, ADDR32Reg, BDMem); } OperandMatchResultTy parseBDAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseAddress(Operands, SystemZMC::GR64Regs, - SystemZOperand::ADDR64Reg, false); + return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDMem); } OperandMatchResultTy parseBDXAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - return parseAddress(Operands, SystemZMC::GR64Regs, - SystemZOperand::ADDR64Reg, true); + return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDXMem); + } + OperandMatchResultTy + parseBDLAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem); } OperandMatchResultTy parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands); @@ -474,12 +488,12 @@ bool SystemZAsmParser::parseRegister(Register &Reg, RegisterGroup Group, SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands, RegisterGroup Group, const unsigned *Regs, - SystemZOperand::RegisterKind Kind, - bool IsAddress) { + RegisterKind Kind) { if (Parser.getTok().isNot(AsmToken::Percent)) return MatchOperand_NoMatch; Register Reg; + bool IsAddress = (Kind == ADDR32Reg || Kind == ADDR64Reg); if (parseRegister(Reg, Group, Regs, IsAddress)) return MatchOperand_ParseFail; @@ -488,14 +502,13 @@ SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands, return MatchOperand_Success; } -// Parse a memory operand into Base, Disp and Index. Regs maps asm -// register numbers to LLVM register numbers and RegKind says what kind -// of address register we're using (ADDR32Reg or ADDR64Reg). HasIndex -// says whether the address allows index registers. +// Parse a memory operand into Base, Disp, Index and Length. +// Regs maps asm register numbers to LLVM register numbers and RegKind +// says what kind of address register we're using (ADDR32Reg or ADDR64Reg). bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp, - unsigned &Index, const unsigned *Regs, - SystemZOperand::RegisterKind RegKind, - bool HasIndex) { + unsigned &Index, const MCExpr *&Length, + const unsigned *Regs, + RegisterKind RegKind) { // Parse the displacement, which must always be present. if (getParser().parseExpression(Disp)) return true; @@ -503,27 +516,33 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp, // Parse the optional base and index. Index = 0; Base = 0; + Length = 0; if (getLexer().is(AsmToken::LParen)) { Parser.Lex(); - // Parse the first register. - Register Reg; - if (parseRegister(Reg, RegGR, Regs, RegKind)) - return true; + if (getLexer().is(AsmToken::Percent)) { + // Parse the first register and decide whether it's a base or an index. + Register Reg; + if (parseRegister(Reg, RegGR, Regs, RegKind)) + return true; + if (getLexer().is(AsmToken::Comma)) + Index = Reg.Num; + else + Base = Reg.Num; + } else { + // Parse the length. + if (getParser().parseExpression(Length)) + return true; + } - // Check whether there's a second register. If so, the one that we - // just parsed was the index. + // Check whether there's a second register. It's the base if so. if (getLexer().is(AsmToken::Comma)) { Parser.Lex(); - - if (!HasIndex) - return Error(Reg.StartLoc, "invalid use of indexed addressing"); - - Index = Reg.Num; + Register Reg; if (parseRegister(Reg, RegGR, Regs, RegKind)) return true; + Base = Reg.Num; } - Base = Reg.Num; // Consume the closing bracket. if (getLexer().isNot(AsmToken::RParen)) @@ -537,19 +556,37 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp, // are as above. SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands, - const unsigned *Regs, - SystemZOperand::RegisterKind RegKind, - bool HasIndex) { + const unsigned *Regs, RegisterKind RegKind, + MemoryKind MemKind) { SMLoc StartLoc = Parser.getTok().getLoc(); unsigned Base, Index; const MCExpr *Disp; - if (parseAddress(Base, Disp, Index, Regs, RegKind, HasIndex)) + const MCExpr *Length; + if (parseAddress(Base, Disp, Index, Length, Regs, RegKind)) return MatchOperand_ParseFail; + if (Index && MemKind != BDXMem) + { + Error(StartLoc, "invalid use of indexed addressing"); + return MatchOperand_ParseFail; + } + + if (Length && MemKind != BDLMem) + { + Error(StartLoc, "invalid use of length addressing"); + return MatchOperand_ParseFail; + } + + if (!Length && MemKind == BDLMem) + { + Error(StartLoc, "missing length in address"); + return MatchOperand_ParseFail; + } + SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(SystemZOperand::createMem(RegKind, Base, Disp, Index, - StartLoc, EndLoc)); + Length, StartLoc, EndLoc)); return MatchOperand_Success; } @@ -639,14 +676,13 @@ parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, // so we treat any plain expression as an immediate. SMLoc StartLoc = Parser.getTok().getLoc(); unsigned Base, Index; - const MCExpr *Expr; - if (parseAddress(Base, Expr, Index, SystemZMC::GR64Regs, - SystemZOperand::ADDR64Reg, true)) + const MCExpr *Expr, *Length; + if (parseAddress(Base, Expr, Index, Length, SystemZMC::GR64Regs, ADDR64Reg)) return true; SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); - if (Base || Index) + if (Base || Index || Length) Operands.push_back(SystemZOperand::createInvalid(StartLoc, EndLoc)); else Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc)); diff --git a/lib/Target/SystemZ/CMakeLists.txt b/lib/Target/SystemZ/CMakeLists.txt index edb679d..ab657f6 100644 --- a/lib/Target/SystemZ/CMakeLists.txt +++ b/lib/Target/SystemZ/CMakeLists.txt @@ -15,6 +15,7 @@ add_llvm_target(SystemZCodeGen SystemZAsmPrinter.cpp SystemZCallingConv.cpp SystemZConstantPoolValue.cpp + SystemZElimCompare.cpp SystemZFrameLowering.cpp SystemZISelDAGToDAG.cpp SystemZISelLowering.cpp @@ -22,11 +23,12 @@ add_llvm_target(SystemZCodeGen SystemZLongBranch.cpp SystemZMCInstLower.cpp SystemZRegisterInfo.cpp + SystemZSelectionDAGInfo.cpp SystemZSubtarget.cpp SystemZTargetMachine.cpp ) -add_dependencies(LLVMSystemZCodeGen intrinsics_gen) +add_dependencies(LLVMSystemZCodeGen SystemZCommonTableGen intrinsics_gen) add_subdirectory(AsmParser) add_subdirectory(Disassembler) diff --git a/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp index 4e4816b..79469b6 100644 --- a/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp +++ b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp @@ -226,6 +226,18 @@ static DecodeStatus decodeBDXAddr20Operand(MCInst &Inst, uint64_t Field, return MCDisassembler::Success; } +static DecodeStatus decodeBDLAddr12Len8Operand(MCInst &Inst, uint64_t Field, + const unsigned *Regs) { + uint64_t Length = Field >> 16; + uint64_t Base = (Field >> 12) & 0xf; + uint64_t Disp = Field & 0xfff; + assert(Length < 256 && "Invalid BDLAddr12Len8"); + Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base])); + Inst.addOperand(MCOperand::CreateImm(Disp)); + Inst.addOperand(MCOperand::CreateImm(Length + 1)); + return MCDisassembler::Success; +} + static DecodeStatus decodeBDAddr32Disp12Operand(MCInst &Inst, uint64_t Field, uint64_t Address, const void *Decoder) { @@ -262,6 +274,13 @@ static DecodeStatus decodeBDXAddr64Disp20Operand(MCInst &Inst, uint64_t Field, return decodeBDXAddr20Operand(Inst, Field, SystemZMC::GR64Regs); } +static DecodeStatus decodeBDLAddr64Disp12Len8Operand(MCInst &Inst, + uint64_t Field, + uint64_t Address, + const void *Decoder) { + return decodeBDLAddr12Len8Operand(Inst, Field, SystemZMC::GR64Regs); +} + #include "SystemZGenDisassemblerTables.inc" DecodeStatus SystemZDisassembler::getInstruction(MCInst &MI, uint64_t &Size, diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp index 369802b..37ebff3 100644 --- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp +++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp @@ -154,6 +154,17 @@ void SystemZInstPrinter::printBDXAddrOperand(const MCInst *MI, int OpNum, MI->getOperand(OpNum + 2).getReg(), O); } +void SystemZInstPrinter::printBDLAddrOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + unsigned Base = MI->getOperand(OpNum).getReg(); + uint64_t Disp = MI->getOperand(OpNum + 1).getImm(); + uint64_t Length = MI->getOperand(OpNum + 2).getImm(); + O << Disp << '(' << Length; + if (Base) + O << ",%" << getRegisterName(Base); + O << ')'; +} + void SystemZInstPrinter::printCond4Operand(const MCInst *MI, int OpNum, raw_ostream &O) { static const char *const CondNames[] = { diff --git a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h index f77282e..30cdee5 100644 --- a/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h +++ b/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h @@ -48,6 +48,7 @@ private: void printOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printBDAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printBDXAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printBDLAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printU4ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printU6ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printS8ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O); diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp index 7721b1f..bda7714 100644 --- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp +++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp @@ -49,7 +49,7 @@ private: SmallVectorImpl<MCFixup> &Fixups) const; // Called by the TableGen code to get the binary encoding of an address. - // The index, if any, is encoded first, followed by the base, + // The index or length, if any, is encoded first, followed by the base, // followed by the displacement. In a 20-bit displacement, // the low 12 bits are encoded before the high 8 bits. uint64_t getBDAddr12Encoding(const MCInst &MI, unsigned OpNum, @@ -60,6 +60,8 @@ private: SmallVectorImpl<MCFixup> &Fixups) const; uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum, SmallVectorImpl<MCFixup> &Fixups) const; + uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const; // Operand OpNum of MI needs a PC-relative fixup of kind Kind at // Offset bytes from the start of MI. Add the fixup to Fixups @@ -112,7 +114,7 @@ uint64_t SystemZMCCodeEmitter:: getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups) const { if (MO.isReg()) - return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()); + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); if (MO.isImm()) return static_cast<uint64_t>(MO.getImm()); llvm_unreachable("Unexpected operand type!"); @@ -157,6 +159,16 @@ getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum, | ((Disp & 0xff000) >> 12); } +uint64_t SystemZMCCodeEmitter:: +getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const { + uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups); + uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups); + uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups) - 1; + assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<8>(Len)); + return (Len << 16) | (Base << 12) | Disp; +} + uint64_t SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum, SmallVectorImpl<MCFixup> &Fixups, diff --git a/lib/Target/SystemZ/README.txt b/lib/Target/SystemZ/README.txt index 55e9fc0..2782b63 100644 --- a/lib/Target/SystemZ/README.txt +++ b/lib/Target/SystemZ/README.txt @@ -118,11 +118,6 @@ such as ICM and STCM. -- -We could make more use of the ROTATE AND ... SELECTED BITS instructions. -At the moment we only use RISBG, and only then for subword atomic operations. - --- - DAGCombiner can detect integer absolute, but there's not yet an associated ISD opcode. We could add one and implement it using LOAD POSITIVE. Negated absolutes could use LOAD NEGATIVE. diff --git a/lib/Target/SystemZ/SystemZ.h b/lib/Target/SystemZ/SystemZ.h index 24612bb..eccc2aa 100644 --- a/lib/Target/SystemZ/SystemZ.h +++ b/lib/Target/SystemZ/SystemZ.h @@ -30,16 +30,28 @@ namespace llvm { const unsigned CCMASK_3 = 1 << 0; const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3; - // Condition-code mask assignments for floating-point comparisons. + // Condition-code mask assignments for integer and floating-point + // comparisons. const unsigned CCMASK_CMP_EQ = CCMASK_0; const unsigned CCMASK_CMP_LT = CCMASK_1; const unsigned CCMASK_CMP_GT = CCMASK_2; - const unsigned CCMASK_CMP_UO = CCMASK_3; const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT; const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT; const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT; + + // Condition-code mask assignments for floating-point comparisons only. + const unsigned CCMASK_CMP_UO = CCMASK_3; const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO; + // All condition-code values produced by comparisons. + const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2; + const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3; + + // Condition-code mask assignments for CS. + const unsigned CCMASK_CS_EQ = CCMASK_0; + const unsigned CCMASK_CS_NE = CCMASK_1; + const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1; + // Return true if Val fits an LLILL operand. static inline bool isImmLL(uint64_t Val) { return (Val & ~0x000000000000ffffULL) == 0; @@ -73,6 +85,7 @@ namespace llvm { FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel); + FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM); FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM); } // end namespace llvm; #endif diff --git a/lib/Target/SystemZ/SystemZ.td b/lib/Target/SystemZ/SystemZ.td index e03c32f..abf5c8e 100644 --- a/lib/Target/SystemZ/SystemZ.td +++ b/lib/Target/SystemZ/SystemZ.td @@ -14,13 +14,10 @@ include "llvm/Target/Target.td" //===----------------------------------------------------------------------===// -// SystemZ supported processors +// SystemZ supported processors and features //===----------------------------------------------------------------------===// -class Proc<string Name, list<SubtargetFeature> Features> - : Processor<Name, NoItineraries, Features>; - -def : Proc<"z10", []>; +include "SystemZProcessors.td" //===----------------------------------------------------------------------===// // Register file description diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp index 1e15ab1..3a57ea0 100644 --- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -100,7 +100,7 @@ void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) { for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { OutStreamer.EmitLabel(Stubs[i].first); OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(), - TD->getPointerSize(0), 0); + TD->getPointerSize(0)); } Stubs.clear(); } diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp new file mode 100644 index 0000000..b8a77db --- /dev/null +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -0,0 +1,471 @@ +//===-- SystemZElimCompare.cpp - Eliminate comparison instructions --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass: +// (1) tries to remove compares if CC already contains the required information +// (2) fuses compares and branches into COMPARE AND BRANCH instructions +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "systemz-elim-compare" + +#include "SystemZTargetMachine.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" + +using namespace llvm; + +STATISTIC(BranchOnCounts, "Number of branch-on-count instructions"); +STATISTIC(EliminatedComparisons, "Number of eliminated comparisons"); +STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions"); + +namespace { + // Represents the references to a particular register in one or more + // instructions. + struct Reference { + Reference() + : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {} + + Reference &operator|=(const Reference &Other) { + Def |= Other.Def; + IndirectDef |= Other.IndirectDef; + Use |= Other.Use; + IndirectUse |= Other.IndirectUse; + return *this; + } + + operator bool() const { return Def || Use; } + + // True if the register is defined or used in some form, either directly or + // via a sub- or super-register. + bool Def; + bool Use; + + // True if the register is defined or used indirectly, by a sub- or + // super-register. + bool IndirectDef; + bool IndirectUse; + }; + + class SystemZElimCompare : public MachineFunctionPass { + public: + static char ID; + SystemZElimCompare(const SystemZTargetMachine &tm) + : MachineFunctionPass(ID), TII(0), TRI(0) {} + + virtual const char *getPassName() const { + return "SystemZ Comparison Elimination"; + } + + bool processBlock(MachineBasicBlock *MBB); + bool runOnMachineFunction(MachineFunction &F); + + private: + Reference getRegReferences(MachineInstr *MI, unsigned Reg); + bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers); + bool convertToLoadAndTest(MachineInstr *MI); + bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers); + bool optimizeCompareZero(MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers); + bool fuseCompareAndBranch(MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers); + + const SystemZInstrInfo *TII; + const TargetRegisterInfo *TRI; + }; + + char SystemZElimCompare::ID = 0; +} // end of anonymous namespace + +FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) { + return new SystemZElimCompare(TM); +} + +// Return true if CC is live out of MBB. +static bool isCCLiveOut(MachineBasicBlock *MBB) { + for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), + SE = MBB->succ_end(); SI != SE; ++SI) + if ((*SI)->isLiveIn(SystemZ::CC)) + return true; + return false; +} + +// Return true if any CC result of MI would reflect the value of subreg +// SubReg of Reg. +static bool resultTests(MachineInstr *MI, unsigned Reg, unsigned SubReg) { + if (MI->getNumOperands() > 0 && + MI->getOperand(0).isReg() && + MI->getOperand(0).isDef() && + MI->getOperand(0).getReg() == Reg && + MI->getOperand(0).getSubReg() == SubReg) + return true; + + switch (MI->getOpcode()) { + case SystemZ::LR: + case SystemZ::LGR: + case SystemZ::LGFR: + case SystemZ::LTR: + case SystemZ::LTGR: + case SystemZ::LTGFR: + case SystemZ::LER: + case SystemZ::LDR: + case SystemZ::LXR: + case SystemZ::LTEBR: + case SystemZ::LTDBR: + case SystemZ::LTXBR: + if (MI->getOperand(1).getReg() == Reg && + MI->getOperand(1).getSubReg() == SubReg) + return true; + } + + return false; +} + +// Describe the references to Reg in MI, including sub- and super-registers. +Reference SystemZElimCompare::getRegReferences(MachineInstr *MI, unsigned Reg) { + Reference Ref; + for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { + const MachineOperand &MO = MI->getOperand(I); + if (MO.isReg()) { + if (unsigned MOReg = MO.getReg()) { + if (MOReg == Reg || TRI->regsOverlap(MOReg, Reg)) { + if (MO.isUse()) { + Ref.Use = true; + Ref.IndirectUse |= (MOReg != Reg); + } + if (MO.isDef()) { + Ref.Def = true; + Ref.IndirectDef |= (MOReg != Reg); + } + } + } + } + } + return Ref; +} + +// Compare compares the result of MI against zero. If MI is an addition +// of -1 and if CCUsers is a single branch on nonzero, eliminate the addition +// and convert the branch to a BRCT(G). Return true on success. +bool +SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers) { + // Check whether we have an addition of -1. + unsigned Opcode = MI->getOpcode(); + unsigned BRCT; + if (Opcode == SystemZ::AHI) + BRCT = SystemZ::BRCT; + else if (Opcode == SystemZ::AGHI) + BRCT = SystemZ::BRCTG; + else + return false; + if (MI->getOperand(2).getImm() != -1) + return false; + + // Check whether we have a single JLH. + if (CCUsers.size() != 1) + return false; + MachineInstr *Branch = CCUsers[0]; + if (Branch->getOpcode() != SystemZ::BRC || + Branch->getOperand(0).getImm() != SystemZ::CCMASK_ICMP || + Branch->getOperand(1).getImm() != SystemZ::CCMASK_CMP_NE) + return false; + + // We already know that there are no references to the register between + // MI and Compare. Make sure that there are also no references between + // Compare and Branch. + unsigned SrcReg = Compare->getOperand(0).getReg(); + MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch; + for (++MBBI; MBBI != MBBE; ++MBBI) + if (getRegReferences(MBBI, SrcReg)) + return false; + + // The transformation is OK. Rebuild Branch as a BRCT(G). + MachineOperand Target(Branch->getOperand(2)); + Branch->RemoveOperand(2); + Branch->RemoveOperand(1); + Branch->RemoveOperand(0); + Branch->setDesc(TII->get(BRCT)); + MachineInstrBuilder(*Branch->getParent()->getParent(), Branch) + .addOperand(MI->getOperand(0)) + .addOperand(MI->getOperand(1)) + .addOperand(Target) + .addReg(SystemZ::CC, RegState::ImplicitDefine); + MI->removeFromParent(); + return true; +} + +// If MI is a load instruction, try to convert it into a LOAD AND TEST. +// Return true on success. +bool SystemZElimCompare::convertToLoadAndTest(MachineInstr *MI) { + unsigned Opcode = TII->getLoadAndTest(MI->getOpcode()); + if (!Opcode) + return false; + + MI->setDesc(TII->get(Opcode)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(SystemZ::CC, RegState::ImplicitDefine); + return true; +} + +// The CC users in CCUsers are testing the result of a comparison of some +// value X against zero and we know that any CC value produced by MI +// would also reflect the value of X. Try to adjust CCUsers so that +// they test the result of MI directly, returning true on success. +// Leave everything unchanged on failure. +bool SystemZElimCompare:: +adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers) { + int Opcode = MI->getOpcode(); + const MCInstrDesc &Desc = TII->get(Opcode); + unsigned MIFlags = Desc.TSFlags; + + // See which compare-style condition codes are available. + unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags); + + // For unsigned comparisons with zero, only equality makes sense. + unsigned CompareFlags = Compare->getDesc().TSFlags; + if (CompareFlags & SystemZII::IsLogical) + ReusableCCMask &= SystemZ::CCMASK_CMP_EQ; + + if (ReusableCCMask == 0) + return false; + + unsigned CCValues = SystemZII::getCCValues(MIFlags); + assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues"); + + // Now check whether these flags are enough for all users. + SmallVector<MachineOperand *, 4> AlterMasks; + for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) { + MachineInstr *MI = CCUsers[I]; + + // Fail if this isn't a use of CC that we understand. + unsigned Flags = MI->getDesc().TSFlags; + unsigned FirstOpNum; + if (Flags & SystemZII::CCMaskFirst) + FirstOpNum = 0; + else if (Flags & SystemZII::CCMaskLast) + FirstOpNum = MI->getNumExplicitOperands() - 2; + else + return false; + + // Check whether the instruction predicate treats all CC values + // outside of ReusableCCMask in the same way. In that case it + // doesn't matter what those CC values mean. + unsigned CCValid = MI->getOperand(FirstOpNum).getImm(); + unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm(); + unsigned OutValid = ~ReusableCCMask & CCValid; + unsigned OutMask = ~ReusableCCMask & CCMask; + if (OutMask != 0 && OutMask != OutValid) + return false; + + AlterMasks.push_back(&MI->getOperand(FirstOpNum)); + AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1)); + } + + // All users are OK. Adjust the masks for MI. + for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) { + AlterMasks[I]->setImm(CCValues); + unsigned CCMask = AlterMasks[I + 1]->getImm(); + if (CCMask & ~ReusableCCMask) + AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) | + (CCValues & ~ReusableCCMask)); + } + + // CC is now live after MI. + int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI); + assert(CCDef >= 0 && "Couldn't find CC set"); + MI->getOperand(CCDef).setIsDead(false); + + // Clear any intervening kills of CC. + MachineBasicBlock::iterator MBBI = MI, MBBE = Compare; + for (++MBBI; MBBI != MBBE; ++MBBI) + MBBI->clearRegisterKills(SystemZ::CC, TRI); + + return true; +} + +// Return true if Compare is a comparison against zero. +static bool isCompareZero(MachineInstr *Compare) { + switch (Compare->getOpcode()) { + case SystemZ::LTEBRCompare: + case SystemZ::LTDBRCompare: + case SystemZ::LTXBRCompare: + return true; + + default: + return (Compare->getNumExplicitOperands() == 2 && + Compare->getOperand(1).isImm() && + Compare->getOperand(1).getImm() == 0); + } +} + +// Try to optimize cases where comparison instruction Compare is testing +// a value against zero. Return true on success and if Compare should be +// deleted as dead. CCUsers is the list of instructions that use the CC +// value produced by Compare. +bool SystemZElimCompare:: +optimizeCompareZero(MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers) { + if (!isCompareZero(Compare)) + return false; + + // Search back for CC results that are based on the first operand. + unsigned SrcReg = Compare->getOperand(0).getReg(); + unsigned SrcSubReg = Compare->getOperand(0).getSubReg(); + MachineBasicBlock *MBB = Compare->getParent(); + MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->begin(); + Reference CCRefs; + Reference SrcRefs; + while (MBBI != MBBE) { + --MBBI; + MachineInstr *MI = MBBI; + if (resultTests(MI, SrcReg, SrcSubReg)) { + // Try to remove both MI and Compare by converting a branch to BRCT(G). + // We don't care in this case whether CC is modified between MI and + // Compare. + if (!CCRefs.Use && !SrcRefs && convertToBRCT(MI, Compare, CCUsers)) { + BranchOnCounts += 1; + return true; + } + // Try to eliminate Compare by reusing a CC result from MI. + if ((!CCRefs && convertToLoadAndTest(MI)) || + (!CCRefs.Def && adjustCCMasksForInstr(MI, Compare, CCUsers))) { + EliminatedComparisons += 1; + return true; + } + } + SrcRefs |= getRegReferences(MI, SrcReg); + if (SrcRefs.Def) + return false; + CCRefs |= getRegReferences(MI, SystemZ::CC); + if (CCRefs.Use && CCRefs.Def) + return false; + } + return false; +} + +// Try to fuse comparison instruction Compare into a later branch. +// Return true on success and if Compare is therefore redundant. +bool SystemZElimCompare:: +fuseCompareAndBranch(MachineInstr *Compare, + SmallVectorImpl<MachineInstr *> &CCUsers) { + // See whether we have a comparison that can be fused. + unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), + Compare); + if (!FusedOpcode) + return false; + + // See whether we have a single branch with which to fuse. + if (CCUsers.size() != 1) + return false; + MachineInstr *Branch = CCUsers[0]; + if (Branch->getOpcode() != SystemZ::BRC) + return false; + + // Make sure that the operands are available at the branch. + unsigned SrcReg = Compare->getOperand(0).getReg(); + unsigned SrcReg2 = (Compare->getOperand(1).isReg() ? + Compare->getOperand(1).getReg() : 0); + MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch; + for (++MBBI; MBBI != MBBE; ++MBBI) + if (MBBI->modifiesRegister(SrcReg, TRI) || + (SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI))) + return false; + + // Read the branch mask and target. + MachineOperand CCMask(MBBI->getOperand(1)); + MachineOperand Target(MBBI->getOperand(2)); + assert((CCMask.getImm() & ~SystemZ::CCMASK_ICMP) == 0 && + "Invalid condition-code mask for integer comparison"); + + // Clear out all current operands. + int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI); + assert(CCUse >= 0 && "BRC must use CC"); + Branch->RemoveOperand(CCUse); + Branch->RemoveOperand(2); + Branch->RemoveOperand(1); + Branch->RemoveOperand(0); + + // Rebuild Branch as a fused compare and branch. + Branch->setDesc(TII->get(FusedOpcode)); + MachineInstrBuilder(*Branch->getParent()->getParent(), Branch) + .addOperand(Compare->getOperand(0)) + .addOperand(Compare->getOperand(1)) + .addOperand(CCMask) + .addOperand(Target) + .addReg(SystemZ::CC, RegState::ImplicitDefine); + + // Clear any intervening kills of SrcReg and SrcReg2. + MBBI = Compare; + for (++MBBI; MBBI != MBBE; ++MBBI) { + MBBI->clearRegisterKills(SrcReg, TRI); + if (SrcReg2) + MBBI->clearRegisterKills(SrcReg2, TRI); + } + FusedComparisons += 1; + return true; +} + +// Process all comparison instructions in MBB. Return true if something +// changed. +bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) { + bool Changed = false; + + // Walk backwards through the block looking for comparisons, recording + // all CC users as we go. The subroutines can delete Compare and + // instructions before it. + bool CompleteCCUsers = !isCCLiveOut(MBB); + SmallVector<MachineInstr *, 4> CCUsers; + MachineBasicBlock::iterator MBBI = MBB->end(); + while (MBBI != MBB->begin()) { + MachineInstr *MI = --MBBI; + if (CompleteCCUsers && + MI->isCompare() && + (optimizeCompareZero(MI, CCUsers) || + fuseCompareAndBranch(MI, CCUsers))) { + ++MBBI; + MI->removeFromParent(); + Changed = true; + CCUsers.clear(); + CompleteCCUsers = true; + continue; + } + + Reference CCRefs(getRegReferences(MI, SystemZ::CC)); + if (CCRefs.Def) { + CCUsers.clear(); + CompleteCCUsers = !CCRefs.IndirectDef; + } + if (CompleteCCUsers && CCRefs.Use) + CCUsers.push_back(MI); + } + return Changed; +} + +bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) { + TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo()); + TRI = &TII->getRegisterInfo(); + + bool Changed = false; + for (MachineFunction::iterator MFI = F.begin(), MFE = F.end(); + MFI != MFE; ++MFI) + Changed |= processBlock(MFI); + + return Changed; +} diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index c0d72c3..a58da90 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -14,19 +14,15 @@ #include "SystemZTargetMachine.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/IR/Function.h" using namespace llvm; -SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm, - const SystemZSubtarget &sti) - : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, - -SystemZMC::CallFrameSize), - TM(tm), - STI(sti) { +namespace { // The ABI-defined register save slots, relative to the incoming stack // pointer. - static const unsigned SpillOffsetTable[][2] = { + static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = { { SystemZ::R2D, 0x10 }, { SystemZ::R3D, 0x18 }, { SystemZ::R4D, 0x20 }, @@ -46,11 +42,23 @@ SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm, { SystemZ::F4D, 0x90 }, { SystemZ::F6D, 0x98 } }; +} +SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm, + const SystemZSubtarget &sti) + : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, + -SystemZMC::CallFrameSize, 8), + TM(tm), STI(sti) { // Create a mapping from register number to save slot offset. RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS); for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I) - RegSpillOffsets[SpillOffsetTable[I][0]] = SpillOffsetTable[I][1]; + RegSpillOffsets[SpillOffsetTable[I].Reg] = SpillOffsetTable[I].Offset; +} + +const TargetFrameLowering::SpillSlot * +SystemZFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const { + NumEntries = array_lengthof(SpillOffsetTable); + return SpillOffsetTable; } void SystemZFrameLowering:: @@ -127,14 +135,12 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB, DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); // Scan the call-saved GPRs and find the bounds of the register spill area. - unsigned SavedGPRFrameSize = 0; unsigned LowGPR = 0; unsigned HighGPR = SystemZ::R15D; unsigned StartOffset = -1U; for (unsigned I = 0, E = CSI.size(); I != E; ++I) { unsigned Reg = CSI[I].getReg(); if (SystemZ::GR64BitRegClass.contains(Reg)) { - SavedGPRFrameSize += 8; unsigned Offset = RegSpillOffsets[Reg]; assert(Offset && "Unexpected GPR save"); if (StartOffset > Offset) { @@ -144,9 +150,7 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB, } } - // Save information about the range and location of the call-saved - // registers, for use by the epilogue inserter. - ZFI->setSavedGPRFrameSize(SavedGPRFrameSize); + // Save the range of call-saved registers, for use by the epilogue inserter. ZFI->setLowSavedGPR(LowGPR); ZFI->setHighSavedGPR(HighGPR); @@ -260,6 +264,22 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB, return true; } +void SystemZFrameLowering:: +processFunctionBeforeFrameFinalized(MachineFunction &MF, + RegScavenger *RS) const { + MachineFrameInfo *MFFrame = MF.getFrameInfo(); + uint64_t MaxReach = (MFFrame->estimateStackSize(MF) + + SystemZMC::CallFrameSize * 2); + if (!isUInt<12>(MaxReach)) { + // We may need register scavenging slots if some parts of the frame + // are outside the reach of an unsigned 12-bit displacement. + // Create 2 for the case where both addresses in an MVC are + // out of range. + RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false)); + RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false)); + } +} + // Emit instructions before MBBI (in MBB) to add NumBytes to Reg. static void emitIncrement(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, @@ -297,7 +317,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); MachineBasicBlock::iterator MBBI = MBB.begin(); MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); const std::vector<CalleeSavedInfo> &CSI = MFFrame->getCalleeSavedInfo(); bool HasFP = hasFP(MF); DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); @@ -322,7 +342,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { if (SystemZ::GR64BitRegClass.contains(Reg)) { int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg]; MMI.addFrameInst(MCCFIInstruction::createOffset( - GPRSaveLabel, MRI.getDwarfRegNum(Reg, true), Offset)); + GPRSaveLabel, MRI->getDwarfRegNum(Reg, true), Offset)); } } } @@ -351,7 +371,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol(); BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL)) .addSym(SetFPLabel); - unsigned HardFP = MRI.getDwarfRegNum(SystemZ::R11D, true); + unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true); MMI.addFrameInst( MCCFIInstruction::createDefCfaRegister(SetFPLabel, HardFP)); @@ -379,7 +399,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { // Add CFI for the this save. if (!FPRSaveLabel) FPRSaveLabel = MMI.getContext().CreateTempSymbol(); - unsigned Reg = MRI.getDwarfRegNum(I->getReg(), true); + unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true); int64_t Offset = getFrameIndexOffset(MF, I->getFrameIdx()); MMI.addFrameInst(MCCFIInstruction::createOffset( FPRSaveLabel, Reg, SPOffsetFromCFA + Offset)); @@ -449,11 +469,6 @@ int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF, // offset is therefore negative. int64_t Offset = (MFFrame->getObjectOffset(FI) + MFFrame->getOffsetAdjustment()); - if (FI >= 0) - // Non-fixed objects are allocated below the incoming stack pointer. - // Account for the space at the top of the frame that we choose not - // to allocate. - Offset += getUnallocatedTopBytes(MF); // Make the offset relative to the incoming stack pointer. Offset -= getOffsetOfLocalArea(); @@ -465,23 +480,12 @@ int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF, } uint64_t SystemZFrameLowering:: -getUnallocatedTopBytes(const MachineFunction &MF) const { - return MF.getInfo<SystemZMachineFunctionInfo>()->getSavedGPRFrameSize(); -} - -uint64_t SystemZFrameLowering:: getAllocatedStackSize(const MachineFunction &MF) const { const MachineFrameInfo *MFFrame = MF.getFrameInfo(); // Start with the size of the local variables and spill slots. uint64_t StackSize = MFFrame->getStackSize(); - // Remove any bytes that we choose not to allocate. - StackSize -= getUnallocatedTopBytes(MF); - - // Include space for an emergency spill slot, if one might be needed. - StackSize += getEmergencySpillSlotSize(MF); - // We need to allocate the ABI-defined 160-byte base area whenever // we allocate stack space for our own use and whenever we call another // function. @@ -491,19 +495,6 @@ getAllocatedStackSize(const MachineFunction &MF) const { return StackSize; } -unsigned SystemZFrameLowering:: -getEmergencySpillSlotSize(const MachineFunction &MF) const { - const MachineFrameInfo *MFFrame = MF.getFrameInfo(); - uint64_t MaxReach = MFFrame->getStackSize() + SystemZMC::CallFrameSize * 2; - return isUInt<12>(MaxReach) ? 0 : 8; -} - -unsigned SystemZFrameLowering:: -getEmergencySpillSlotOffset(const MachineFunction &MF) const { - assert(getEmergencySpillSlotSize(MF) && "No emergency spill slot"); - return SystemZMC::CallFrameSize; -} - bool SystemZFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { // The ABI requires us to allocate 160 bytes of stack space for the callee, diff --git a/lib/Target/SystemZ/SystemZFrameLowering.h b/lib/Target/SystemZ/SystemZFrameLowering.h index 5ca049c..9b0a1d5 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.h +++ b/lib/Target/SystemZ/SystemZFrameLowering.h @@ -29,7 +29,10 @@ public: SystemZFrameLowering(const SystemZTargetMachine &tm, const SystemZSubtarget &sti); - // Override FrameLowering. + // Override TargetFrameLowering. + virtual bool isFPCloseToIncomingSP() const LLVM_OVERRIDE { return false; } + virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const + LLVM_OVERRIDE; virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS) const LLVM_OVERRIDE; @@ -45,6 +48,8 @@ public: const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const LLVM_OVERRIDE; + virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF, + RegScavenger *RS) const; virtual void emitPrologue(MachineFunction &MF) const LLVM_OVERRIDE; virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const LLVM_OVERRIDE; @@ -59,29 +64,9 @@ public: MachineBasicBlock::iterator MI) const LLVM_OVERRIDE; - // The target-independent code automatically allocates save slots for - // call-saved GPRs. However, we don't need those slots for SystemZ, - // because the ABI sets aside GPR save slots in the caller-allocated part - // of the frame. Since the target-independent code puts this unneeded - // area at the top of the callee-allocated part of frame, we choose not - // to allocate it and adjust the offsets accordingly. Return the - // size of this unallocated area. - // FIXME: seems a bit hackish. - uint64_t getUnallocatedTopBytes(const MachineFunction &MF) const; - // Return the number of bytes in the callee-allocated part of the frame. uint64_t getAllocatedStackSize(const MachineFunction &MF) const; - // Return the number of frame bytes that should be reserved for - // an emergency spill slot, for use by the register scaveneger. - // Return 0 if register scaveging won't be needed. - unsigned getEmergencySpillSlotSize(const MachineFunction &MF) const; - - // Return the offset from the frame pointer of the emergency spill slot, - // which always fits within a 12-bit unsigned displacement field. - // Only valid if getEmergencySpillSlotSize(MF) returns nonzero. - unsigned getEmergencySpillSlotOffset(const MachineFunction &MF) const; - // Return the byte offset from the incoming stack pointer of Reg's // ABI-defined save slot. Return 0 if no slot is defined for Reg. unsigned getRegSpillOffset(unsigned Reg) const { diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index f10ba23..d9794b1 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "SystemZTargetMachine.h" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" @@ -91,6 +92,37 @@ struct SystemZAddressingMode { } }; +// Return a mask with Count low bits set. +static uint64_t allOnes(unsigned int Count) { + return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; +} + +// Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation +// given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and +// Rotate (I5). The combined operand value is effectively: +// +// (or (rotl Input, Rotate), ~Mask) +// +// for RNSBG and: +// +// (and (rotl Input, Rotate), Mask) +// +// otherwise. The value has BitSize bits. +struct RxSBGOperands { + RxSBGOperands(unsigned Op, SDValue N) + : Opcode(Op), BitSize(N.getValueType().getSizeInBits()), + Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63), + Rotate(0) {} + + unsigned Opcode; + unsigned BitSize; + uint64_t Mask; + SDValue Input; + unsigned Start; + unsigned End; + unsigned Rotate; +}; + class SystemZDAGToDAGISel : public SelectionDAGISel { const SystemZTargetLowering &Lowering; const SystemZSubtarget &Subtarget; @@ -100,6 +132,14 @@ class SystemZDAGToDAGISel : public SelectionDAGISel { return CurDAG->getTargetConstant(Imm, Node->getValueType(0)); } + const SystemZTargetMachine &getTargetMachine() const { + return static_cast<const SystemZTargetMachine &>(TM); + } + + const SystemZInstrInfo *getInstrInfo() const { + return getTargetMachine().getInstrInfo(); + } + // Try to fold more of the base or index of AM into AM, where IsBase // selects between the base and index. bool expandAddress(SystemZAddressingMode &AM, bool IsBase); @@ -199,6 +239,33 @@ class SystemZDAGToDAGISel : public SelectionDAGISel { Addr, Base, Disp, Index); } + // Check whether (or Op (and X InsertMask)) is effectively an insertion + // of X into bits InsertMask of some Y != Op. Return true if so and + // set Op to that Y. + bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask); + + // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used. + // Return true on success. + bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask); + + // Try to fold some of RxSBG.Input into other fields of RxSBG. + // Return true on success. + bool expandRxSBG(RxSBGOperands &RxSBG); + + // Return an undefined i64 value. + SDValue getUNDEF64(SDLoc DL); + + // Convert N to VT, if it isn't already. + SDValue convertTo(SDLoc DL, EVT VT, SDValue N); + + // Try to implement AND or shift node N using RISBG with the zero flag set. + // Return the selected node on success, otherwise return null. + SDNode *tryRISBGZero(SDNode *N); + + // Try to use RISBG or Opcode to implement OR or XOR node N. + // Return the selected node on success, otherwise return null. + SDNode *tryRxSBG(SDNode *N, unsigned Opcode); + // If Op0 is null, then Node is a constant that can be loaded using: // // (Opcode UpperVal LowerVal) @@ -209,6 +276,8 @@ class SystemZDAGToDAGISel : public SelectionDAGISel { SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, uint64_t UpperVal, uint64_t LowerVal); + bool storeLoadCanUseMVC(SDNode *N) const; + public: SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) : SelectionDAGISel(TM, OptLevel), @@ -518,6 +587,304 @@ bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form, return true; } +bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, + uint64_t InsertMask) { + // We're only interested in cases where the insertion is into some operand + // of Op, rather than into Op itself. The only useful case is an AND. + if (Op.getOpcode() != ISD::AND) + return false; + + // We need a constant mask. + ConstantSDNode *MaskNode = + dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode()); + if (!MaskNode) + return false; + + // It's not an insertion of Op.getOperand(0) if the two masks overlap. + uint64_t AndMask = MaskNode->getZExtValue(); + if (InsertMask & AndMask) + return false; + + // It's only an insertion if all bits are covered or are known to be zero. + // The inner check covers all cases but is more expensive. + uint64_t Used = allOnes(Op.getValueType().getSizeInBits()); + if (Used != (AndMask | InsertMask)) { + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne); + if (Used != (AndMask | InsertMask | KnownZero.getZExtValue())) + return false; + } + + Op = Op.getOperand(0); + return true; +} + +bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) { + const SystemZInstrInfo *TII = getInstrInfo(); + if (RxSBG.Rotate != 0) + Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)); + Mask &= RxSBG.Mask; + if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) { + RxSBG.Mask = Mask; + return true; + } + return false; +} + +// RxSBG.Input is a shift of Count bits in the direction given by IsLeft. +// Return true if the result depends on the signs or zeros that are +// shifted in. +static bool shiftedInBitsMatter(RxSBGOperands &RxSBG, uint64_t Count, + bool IsLeft) { + // Work out which bits of the shift result are zeros or sign copies. + uint64_t ShiftedIn = allOnes(Count); + if (!IsLeft) + ShiftedIn <<= RxSBG.BitSize - Count; + + // Rotate that mask in the same way as RxSBG.Input is rotated. + if (RxSBG.Rotate != 0) + ShiftedIn = ((ShiftedIn << RxSBG.Rotate) | + (ShiftedIn >> (64 - RxSBG.Rotate))); + + // Fail if any of the zero or sign bits are used. + return (ShiftedIn & RxSBG.Mask) != 0; +} + +bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) { + SDValue N = RxSBG.Input; + unsigned Opcode = N.getOpcode(); + switch (Opcode) { + case ISD::AND: { + if (RxSBG.Opcode == SystemZ::RNSBG) + return false; + + ConstantSDNode *MaskNode = + dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); + if (!MaskNode) + return false; + + SDValue Input = N.getOperand(0); + uint64_t Mask = MaskNode->getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) { + // If some bits of Input are already known zeros, those bits will have + // been removed from the mask. See if adding them back in makes the + // mask suitable. + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne); + Mask |= KnownZero.getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) + return false; + } + RxSBG.Input = Input; + return true; + } + + case ISD::OR: { + if (RxSBG.Opcode != SystemZ::RNSBG) + return false; + + ConstantSDNode *MaskNode = + dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); + if (!MaskNode) + return false; + + SDValue Input = N.getOperand(0); + uint64_t Mask = ~MaskNode->getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) { + // If some bits of Input are already known ones, those bits will have + // been removed from the mask. See if adding them back in makes the + // mask suitable. + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne); + Mask &= ~KnownOne.getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) + return false; + } + RxSBG.Input = Input; + return true; + } + + case ISD::ROTL: { + // Any 64-bit rotate left can be merged into the RxSBG. + if (RxSBG.BitSize != 64) + return false; + ConstantSDNode *CountNode + = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); + if (!CountNode) + return false; + + RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63; + RxSBG.Input = N.getOperand(0); + return true; + } + + case ISD::SHL: { + ConstantSDNode *CountNode = + dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); + if (!CountNode) + return false; + + uint64_t Count = CountNode->getZExtValue(); + if (Count < 1 || Count >= RxSBG.BitSize) + return false; + + if (RxSBG.Opcode == SystemZ::RNSBG) { + // Treat (shl X, count) as (rotl X, size-count) as long as the bottom + // count bits from RxSBG.Input are ignored. + if (shiftedInBitsMatter(RxSBG, Count, true)) + return false; + } else { + // Treat (shl X, count) as (and (rotl X, count), ~0<<count). + if (!refineRxSBGMask(RxSBG, allOnes(RxSBG.BitSize - Count) << Count)) + return false; + } + + RxSBG.Rotate = (RxSBG.Rotate + Count) & 63; + RxSBG.Input = N.getOperand(0); + return true; + } + + case ISD::SRL: + case ISD::SRA: { + ConstantSDNode *CountNode = + dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); + if (!CountNode) + return false; + + uint64_t Count = CountNode->getZExtValue(); + if (Count < 1 || Count >= RxSBG.BitSize) + return false; + + if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) { + // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top + // count bits from RxSBG.Input are ignored. + if (shiftedInBitsMatter(RxSBG, Count, false)) + return false; + } else { + // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count), + // which is similar to SLL above. + if (!refineRxSBGMask(RxSBG, allOnes(RxSBG.BitSize - Count))) + return false; + } + + RxSBG.Rotate = (RxSBG.Rotate - Count) & 63; + RxSBG.Input = N.getOperand(0); + return true; + } + default: + return false; + } +} + +SDValue SystemZDAGToDAGISel::getUNDEF64(SDLoc DL) { + SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64); + return SDValue(N, 0); +} + +SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) { + if (N.getValueType() == MVT::i32 && VT == MVT::i64) { + SDValue Index = CurDAG->getTargetConstant(SystemZ::subreg_32bit, MVT::i64); + SDNode *Insert = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, + DL, VT, getUNDEF64(DL), N, Index); + return SDValue(Insert, 0); + } + if (N.getValueType() == MVT::i64 && VT == MVT::i32) { + SDValue Index = CurDAG->getTargetConstant(SystemZ::subreg_32bit, MVT::i64); + SDNode *Extract = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + DL, VT, N, Index); + return SDValue(Extract, 0); + } + assert(N.getValueType() == VT && "Unexpected value types"); + return N; +} + +SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { + EVT VT = N->getValueType(0); + RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0)); + unsigned Count = 0; + while (expandRxSBG(RISBG)) + Count += 1; + if (Count == 0) + return 0; + if (Count == 1) { + // Prefer to use normal shift instructions over RISBG, since they can handle + // all cases and are sometimes shorter. + if (N->getOpcode() != ISD::AND) + return 0; + + // Prefer register extensions like LLC over RISBG. Also prefer to start + // out with normal ANDs if one instruction would be enough. We can convert + // these ANDs into an RISBG later if a three-address instruction is useful. + if (VT == MVT::i32 || + RISBG.Mask == 0xff || + RISBG.Mask == 0xffff || + SystemZ::isImmLF(~RISBG.Mask) || + SystemZ::isImmHF(~RISBG.Mask)) { + // Force the new mask into the DAG, since it may include known-one bits. + ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode()); + if (MaskN->getZExtValue() != RISBG.Mask) { + SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT); + N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask); + return SelectCode(N); + } + return 0; + } + } + + SDValue Ops[5] = { + getUNDEF64(SDLoc(N)), + convertTo(SDLoc(N), MVT::i64, RISBG.Input), + CurDAG->getTargetConstant(RISBG.Start, MVT::i32), + CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32), + CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32) + }; + N = CurDAG->getMachineNode(SystemZ::RISBG, SDLoc(N), MVT::i64, Ops); + return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode(); +} + +SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { + // Try treating each operand of N as the second operand of the RxSBG + // and see which goes deepest. + RxSBGOperands RxSBG[] = { + RxSBGOperands(Opcode, N->getOperand(0)), + RxSBGOperands(Opcode, N->getOperand(1)) + }; + unsigned Count[] = { 0, 0 }; + for (unsigned I = 0; I < 2; ++I) + while (expandRxSBG(RxSBG[I])) + Count[I] += 1; + + // Do nothing if neither operand is suitable. + if (Count[0] == 0 && Count[1] == 0) + return 0; + + // Pick the deepest second operand. + unsigned I = Count[0] > Count[1] ? 0 : 1; + SDValue Op0 = N->getOperand(I ^ 1); + + // Prefer IC for character insertions from memory. + if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) + if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode())) + if (Load->getMemoryVT() == MVT::i8) + return 0; + + // See whether we can avoid an AND in the first operand by converting + // ROSBG to RISBG. + if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) + Opcode = SystemZ::RISBG; + + EVT VT = N->getValueType(0); + SDValue Ops[5] = { + convertTo(SDLoc(N), MVT::i64, Op0), + convertTo(SDLoc(N), MVT::i64, RxSBG[I].Input), + CurDAG->getTargetConstant(RxSBG[I].Start, MVT::i32), + CurDAG->getTargetConstant(RxSBG[I].End, MVT::i32), + CurDAG->getTargetConstant(RxSBG[I].Rotate, MVT::i32) + }; + N = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, Ops); + return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode(); +} + SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, uint64_t UpperVal, uint64_t LowerVal) { @@ -533,6 +900,49 @@ SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, return Or.getNode(); } +// N is a (store (load ...), ...) pattern. Return true if it can use MVC. +bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { + StoreSDNode *Store = cast<StoreSDNode>(N); + LoadSDNode *Load = cast<LoadSDNode>(Store->getValue().getNode()); + + // MVC is logically a bytewise copy, so can't be used for volatile accesses. + if (Load->isVolatile() || Store->isVolatile()) + return false; + + // Prefer not to use MVC if either address can use ... RELATIVE LONG + // instructions. + assert(Load->getMemoryVT() == Store->getMemoryVT() && + "Should already have checked that the types match"); + uint64_t Size = Load->getMemoryVT().getStoreSize(); + if (Size > 1 && Size <= 8) { + // Prefer LHRL, LRL and LGRL. + if (Load->getBasePtr().getOpcode() == SystemZISD::PCREL_WRAPPER) + return false; + // Prefer STHRL, STRL and STGRL. + if (Store->getBasePtr().getOpcode() == SystemZISD::PCREL_WRAPPER) + return false; + } + + // There's no chance of overlap if the load is invariant. + if (Load->isInvariant()) + return true; + + // If both operands are aligned, they must be equal or not overlap. + if (Load->getAlignment() >= Size && Store->getAlignment() >= Size) + return true; + + // Otherwise we need to check whether there's an alias. + const Value *V1 = Load->getSrcValue(); + const Value *V2 = Store->getSrcValue(); + if (!V1 || !V2) + return false; + + int64_t End1 = Load->getSrcValueOffset() + Size; + int64_t End2 = Store->getSrcValueOffset() + Size; + return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getTBAAInfo()), + AliasAnalysis::Location(V2, End2, Store->getTBAAInfo())); +} + SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { // Dump information about the Node being selected DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n"); @@ -544,12 +954,21 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { } unsigned Opcode = Node->getOpcode(); + SDNode *ResNode = 0; switch (Opcode) { case ISD::OR: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::ROSBG); + goto or_xor; + case ISD::XOR: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::RXSBG); + // Fall through. + or_xor: // If this is a 64-bit operation in which both 32-bit halves are nonzero, // split the operation into two. - if (Node->getValueType(0) == MVT::i64) + if (!ResNode && Node->getValueType(0) == MVT::i64) if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { uint64_t Val = Op1->getZExtValue(); if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) @@ -558,6 +977,17 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { } break; + case ISD::AND: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::RNSBG); + // Fall through. + case ISD::ROTL: + case ISD::SHL: + case ISD::SRL: + if (!ResNode) + ResNode = tryRISBGZero(Node); + break; + case ISD::Constant: // If this is a 64-bit constant that is out of the range of LLILF, // LLIHF and LGFI, split it into two 32-bit pieces. @@ -582,10 +1012,32 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { } } break; + + case SystemZISD::SELECT_CCMASK: { + SDValue Op0 = Node->getOperand(0); + SDValue Op1 = Node->getOperand(1); + // Prefer to put any load first, so that it can be matched as a + // conditional load. + if (Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) { + SDValue CCValid = Node->getOperand(2); + SDValue CCMask = Node->getOperand(3); + uint64_t ConstCCValid = + cast<ConstantSDNode>(CCValid.getNode())->getZExtValue(); + uint64_t ConstCCMask = + cast<ConstantSDNode>(CCMask.getNode())->getZExtValue(); + // Invert the condition. + CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, + CCMask.getValueType()); + SDValue Op4 = Node->getOperand(4); + Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); + } + break; + } } // Select the default instruction - SDNode *ResNode = SelectCode(Node); + if (!ResNode) + ResNode = SelectCode(Node); DEBUG(errs() << "=> "; if (ResNode == NULL || ResNode == Node) diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 1dc187f..6acdcd4 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -200,11 +200,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); - // Expand these using getExceptionSelectorRegister() and - // getExceptionPointerRegister(). - setOperationAction(ISD::EXCEPTIONADDR, PtrVT, Expand); - setOperationAction(ISD::EHSELECTION, PtrVT, Expand); - // Handle floating-point types. for (unsigned I = MVT::FIRST_FP_VALUETYPE; I <= MVT::LAST_FP_VALUETYPE; @@ -246,6 +241,38 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::VACOPY, MVT::Other, Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); + + // We want to use MVC in preference to even a single load/store pair. + MaxStoresPerMemcpy = 0; + MaxStoresPerMemcpyOptSize = 0; + + // The main memset sequence is a byte store followed by an MVC. + // Two STC or MV..I stores win over that, but the kind of fused stores + // generated by target-independent code don't when the byte value is + // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better + // than "STC;MVC". Handle the choice in target-specific code instead. + MaxStoresPerMemset = 0; + MaxStoresPerMemsetOptSize = 0; +} + +bool +SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + VT = VT.getScalarType(); + + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + case MVT::f64: + return true; + case MVT::f128: + return false; + default: + break; + } + + return false; } bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { @@ -263,6 +290,21 @@ bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, return true; } +bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, + Type *Ty) const { + // Punt on globals for now, although they can be used in limited + // RELATIVE LONG cases. + if (AM.BaseGV) + return false; + + // Require a 20-bit signed offset. + if (!isInt<20>(AM.BaseOffs)) + return false; + + // Indexing is OK but no scale factor can be applied. + return AM.Scale == 0 || AM.Scale == 1; +} + //===----------------------------------------------------------------------===// // Inline asm support //===----------------------------------------------------------------------===// @@ -359,8 +401,24 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, return weight; } +// Parse a "{tNNN}" register constraint for which the register type "t" +// has already been verified. MC is the class associated with "t" and +// Map maps 0-based register numbers to LLVM register numbers. +static std::pair<unsigned, const TargetRegisterClass *> +parseRegisterNumber(const std::string &Constraint, + const TargetRegisterClass *RC, const unsigned *Map) { + assert(*(Constraint.end()-1) == '}' && "Missing '}'"); + if (isdigit(Constraint[2])) { + std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); + unsigned Index = atoi(Suffix.c_str()); + if (Index < 16 && Map[Index]) + return std::make_pair(Map[Index], RC); + } + return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); +} + std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: -getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { +getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { // GCC Constraint Letters switch (Constraint[0]) { @@ -388,6 +446,32 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { return std::make_pair(0U, &SystemZ::FP32BitRegClass); } } + if (Constraint[0] == '{') { + // We need to override the default register parsing for GPRs and FPRs + // because the interpretation depends on VT. The internal names of + // the registers are also different from the external names + // (F0D and F0S instead of F0, etc.). + if (Constraint[1] == 'r') { + if (VT == MVT::i32) + return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, + SystemZMC::GR32Regs); + if (VT == MVT::i128) + return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, + SystemZMC::GR128Regs); + return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, + SystemZMC::GR64Regs); + } + if (Constraint[1] == 'f') { + if (VT == MVT::f32) + return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, + SystemZMC::FP32Regs); + if (VT == MVT::f128) + return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, + SystemZMC::FP128Regs); + return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, + SystemZMC::FP64Regs); + } + } return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } @@ -610,9 +694,9 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &DL = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -827,6 +911,29 @@ static unsigned CCMaskForCondCode(ISD::CondCode CC) { } // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 +// can be converted to a comparison against zero, adjust the operands +// as necessary. +static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned, + SDValue &CmpOp0, SDValue &CmpOp1, + unsigned &CCMask) { + if (IsUnsigned) + return; + + ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode()); + if (!ConstOp1) + return; + + int64_t Value = ConstOp1->getSExtValue(); + if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) || + (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) || + (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) || + (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) { + CCMask ^= SystemZ::CCMASK_CMP_EQ; + CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType()); + } +} + +// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, SDValue &CmpOp0, SDValue &CmpOp1, @@ -870,7 +977,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) // Test whether the high bit of the byte is set. Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; - else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT) + else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE) // Test whether the high bit of the byte is clear. Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; else @@ -946,15 +1053,22 @@ static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, return false; } -// Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the -// 4-bit condition-code mask for CC. +// Return a target node that compares CmpOp0 with CmpOp1 and stores a +// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible +// 2-bit results and CCMask to the subset of those results that are +// associated with Cond. static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, - ISD::CondCode CC, unsigned &CCMask) { + ISD::CondCode Cond, unsigned &CCValid, + unsigned &CCMask) { bool IsUnsigned = false; - CCMask = CCMaskForCondCode(CC); - if (!CmpOp0.getValueType().isFloatingPoint()) { + CCMask = CCMaskForCondCode(Cond); + if (CmpOp0.getValueType().isFloatingPoint()) + CCValid = SystemZ::CCMASK_FCMP; + else { IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; - CCMask &= ~SystemZ::CCMASK_CMP_UO; + CCValid = SystemZ::CCMASK_ICMP; + CCMask &= CCValid; + adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) IsUnsigned = true; @@ -996,10 +1110,11 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Dest = Op.getOperand(4); SDLoc DL(Op); - unsigned CCMask; - SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); + unsigned CCValid, CCMask; + SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask); return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), - Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags); + Chain, DAG.getConstant(CCValid, MVT::i32), + DAG.getConstant(CCMask, MVT::i32), Dest, Flags); } SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, @@ -1011,12 +1126,13 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); SDLoc DL(Op); - unsigned CCMask; - SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); + unsigned CCValid, CCMask; + SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask); - SmallVector<SDValue, 4> Ops; + SmallVector<SDValue, 5> Ops; Ops.push_back(TrueOp); Ops.push_back(FalseOp); + Ops.push_back(DAG.getConstant(CCValid, MVT::i32)); Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); Ops.push_back(Flags); @@ -1269,18 +1385,23 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, SDValue Op1 = Op.getOperand(1); EVT VT = Op.getValueType(); SDLoc DL(Op); + unsigned Opcode; // We use DSGF for 32-bit division. if (is32Bit(VT)) { Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); - Op1 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op1); - } + Opcode = SystemZISD::SDIVREM32; + } else if (DAG.ComputeNumSignBits(Op1) > 32) { + Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); + Opcode = SystemZISD::SDIVREM32; + } else + Opcode = SystemZISD::SDIVREM64; // DSG(F) takes a 64-bit dividend, so the even register in the GR128 // input is "don't care". The instruction returns the remainder in // the even register and the quotient in the odd register. SDValue Ops[2]; - lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::SDIVREM64, + lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, Op0, Op1, Ops[1], Ops[0]); return DAG.getMergeValues(Ops, 2, DL); } @@ -1579,6 +1700,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(SDIVREM64); OPCODE(UDIVREM32); OPCODE(UDIVREM64); + OPCODE(MVC); OPCODE(ATOMIC_SWAPW); OPCODE(ATOMIC_LOADW_ADD); OPCODE(ATOMIC_LOADW_SUB); @@ -1620,34 +1742,6 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, return NewMBB; } -bool SystemZTargetLowering:: -convertPrevCompareToBranch(MachineBasicBlock *MBB, - MachineBasicBlock::iterator MBBI, - unsigned CCMask, MachineBasicBlock *Target) const { - MachineBasicBlock::iterator Compare = MBBI; - MachineBasicBlock::iterator Begin = MBB->begin(); - do - { - if (Compare == Begin) - return false; - --Compare; - } - while (Compare->isDebugValue()); - - const SystemZInstrInfo *TII = TM.getInstrInfo(); - unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), - Compare); - if (!FusedOpcode) - return false; - - DebugLoc DL = Compare->getDebugLoc(); - BuildMI(*MBB, MBBI, DL, TII->get(FusedOpcode)) - .addOperand(Compare->getOperand(0)).addOperand(Compare->getOperand(1)) - .addImm(CCMask).addMBB(Target); - Compare->removeFromParent(); - return true; -} - // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. MachineBasicBlock * SystemZTargetLowering::emitSelect(MachineInstr *MI, @@ -1657,7 +1751,8 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, unsigned DestReg = MI->getOperand(0).getReg(); unsigned TrueReg = MI->getOperand(1).getReg(); unsigned FalseReg = MI->getOperand(2).getReg(); - unsigned CCMask = MI->getOperand(3).getImm(); + unsigned CCValid = MI->getOperand(3).getImm(); + unsigned CCMask = MI->getOperand(4).getImm(); DebugLoc DL = MI->getDebugLoc(); MachineBasicBlock *StartMBB = MBB; @@ -1667,15 +1762,9 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, // StartMBB: // BRC CCMask, JoinMBB // # fallthrough to FalseMBB - // - // The original DAG glues comparisons to their uses, both to ensure - // that no CC-clobbering instructions are inserted between them, and - // to ensure that comparison results are not reused. This means that - // this Select is the sole user of any preceding comparison instruction - // and that we can try to use a fused compare and branch instead. MBB = StartMBB; - if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); MBB->addSuccessor(JoinMBB); MBB->addSuccessor(FalseMBB); @@ -1696,6 +1785,69 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, return JoinMBB; } +// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. +// StoreOpcode is the store to use and Invert says whether the store should +// happen when the condition is false rather than true. If a STORE ON +// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. +MachineBasicBlock * +SystemZTargetLowering::emitCondStore(MachineInstr *MI, + MachineBasicBlock *MBB, + unsigned StoreOpcode, unsigned STOCOpcode, + bool Invert) const { + const SystemZInstrInfo *TII = TM.getInstrInfo(); + + unsigned SrcReg = MI->getOperand(0).getReg(); + MachineOperand Base = MI->getOperand(1); + int64_t Disp = MI->getOperand(2).getImm(); + unsigned IndexReg = MI->getOperand(3).getReg(); + unsigned CCValid = MI->getOperand(4).getImm(); + unsigned CCMask = MI->getOperand(5).getImm(); + DebugLoc DL = MI->getDebugLoc(); + + StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); + + // Use STOCOpcode if possible. We could use different store patterns in + // order to avoid matching the index register, but the performance trade-offs + // might be more complicated in that case. + if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { + if (Invert) + CCMask ^= CCValid; + BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) + .addReg(SrcReg).addOperand(Base).addImm(Disp) + .addImm(CCValid).addImm(CCMask); + MI->eraseFromParent(); + return MBB; + } + + // Get the condition needed to branch around the store. + if (!Invert) + CCMask ^= CCValid; + + MachineBasicBlock *StartMBB = MBB; + MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); + + // StartMBB: + // BRC CCMask, JoinMBB + // # fallthrough to FalseMBB + MBB = StartMBB; + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); + MBB->addSuccessor(JoinMBB); + MBB->addSuccessor(FalseMBB); + + // FalseMBB: + // store %SrcReg, %Disp(%Index,%Base) + // # fallthrough to JoinMBB + MBB = FalseMBB; + BuildMI(MBB, DL, TII->get(StoreOpcode)) + .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); + MBB->addSuccessor(JoinMBB); + + MI->eraseFromParent(); + return JoinMBB; +} + // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. @@ -1712,7 +1864,6 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); bool IsSubWord = (BitSize < 32); // Extract the operands. Base can be a register or a frame index. @@ -1812,7 +1963,8 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -1835,7 +1987,6 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); bool IsSubWord = (BitSize < 32); // Extract the operands. Base can be a register or a frame index. @@ -1897,17 +2048,10 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, if (IsSubWord) BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) .addReg(OldVal).addReg(BitShift).addImm(0); - unsigned FusedOpcode = TII->getCompareAndBranch(CompareOpcode); - if (FusedOpcode) - BuildMI(MBB, DL, TII->get(FusedOpcode)) - .addReg(RotatedOldVal).addReg(Src2) - .addImm(KeepOldMask).addMBB(UpdateMBB); - else { - BuildMI(MBB, DL, TII->get(CompareOpcode)) - .addReg(RotatedOldVal).addReg(Src2); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)) - .addImm(KeepOldMask).addMBB(UpdateMBB); - } + BuildMI(MBB, DL, TII->get(CompareOpcode)) + .addReg(RotatedOldVal).addReg(Src2); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); MBB->addSuccessor(UpdateMBB); MBB->addSuccessor(UseAltMBB); @@ -1937,7 +2081,8 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -1953,7 +2098,6 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); // Extract the operands. Base can be a register or a frame index. unsigned Dest = MI->getOperand(0).getReg(); @@ -2009,7 +2153,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, // ^^ Replace the upper 32-BitSize bits of the // comparison value with those that we loaded, // so that we can use a full word comparison. - // CRJNE %Dest, %RetryCmpVal, DoneMBB + // CR %Dest, %RetryCmpVal + // JNE DoneMBB // # Fall through to SetMBB MBB = LoopMBB; BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) @@ -2025,9 +2170,11 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, .addReg(OldVal).addReg(BitShift).addImm(BitSize); BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); - BuildMI(MBB, DL, TII->get(SystemZ::CRJ)) - .addReg(Dest).addReg(RetryCmpVal) - .addImm(MaskNE).addMBB(DoneMBB); + BuildMI(MBB, DL, TII->get(SystemZ::CR)) + .addReg(Dest).addReg(RetryCmpVal); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); MBB->addSuccessor(DoneMBB); MBB->addSuccessor(SetMBB); @@ -2047,7 +2194,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -2090,6 +2238,26 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI, return MBB; } +MachineBasicBlock * +SystemZTargetLowering::emitMVCWrapper(MachineInstr *MI, + MachineBasicBlock *MBB) const { + const SystemZInstrInfo *TII = TM.getInstrInfo(); + DebugLoc DL = MI->getDebugLoc(); + + MachineOperand DestBase = MI->getOperand(0); + uint64_t DestDisp = MI->getOperand(1).getImm(); + MachineOperand SrcBase = MI->getOperand(2); + uint64_t SrcDisp = MI->getOperand(3).getImm(); + uint64_t Length = MI->getOperand(4).getImm(); + + BuildMI(*MBB, MI, DL, TII->get(SystemZ::MVC)) + .addOperand(DestBase).addImm(DestDisp).addImm(Length) + .addOperand(SrcBase).addImm(SrcDisp); + + MI->eraseFromParent(); + return MBB; +} + MachineBasicBlock *SystemZTargetLowering:: EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { switch (MI->getOpcode()) { @@ -2100,6 +2268,43 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::SelectF128: return emitSelect(MI, MBB); + case SystemZ::CondStore8_32: + return emitCondStore(MI, MBB, SystemZ::STC32, 0, false); + case SystemZ::CondStore8_32Inv: + return emitCondStore(MI, MBB, SystemZ::STC32, 0, true); + case SystemZ::CondStore16_32: + return emitCondStore(MI, MBB, SystemZ::STH32, 0, false); + case SystemZ::CondStore16_32Inv: + return emitCondStore(MI, MBB, SystemZ::STH32, 0, true); + case SystemZ::CondStore32_32: + return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, false); + case SystemZ::CondStore32_32Inv: + return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, true); + case SystemZ::CondStore8: + return emitCondStore(MI, MBB, SystemZ::STC, 0, false); + case SystemZ::CondStore8Inv: + return emitCondStore(MI, MBB, SystemZ::STC, 0, true); + case SystemZ::CondStore16: + return emitCondStore(MI, MBB, SystemZ::STH, 0, false); + case SystemZ::CondStore16Inv: + return emitCondStore(MI, MBB, SystemZ::STH, 0, true); + case SystemZ::CondStore32: + return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); + case SystemZ::CondStore32Inv: + return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); + case SystemZ::CondStore64: + return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); + case SystemZ::CondStore64Inv: + return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); + case SystemZ::CondStoreF32: + return emitCondStore(MI, MBB, SystemZ::STE, 0, false); + case SystemZ::CondStoreF32Inv: + return emitCondStore(MI, MBB, SystemZ::STE, 0, true); + case SystemZ::CondStoreF64: + return emitCondStore(MI, MBB, SystemZ::STD, 0, false); + case SystemZ::CondStoreF64Inv: + return emitCondStore(MI, MBB, SystemZ::STD, 0, true); + case SystemZ::AEXT128_64: return emitExt128(MI, MBB, false, SystemZ::subreg_low); case SystemZ::ZEXT128_32: @@ -2276,16 +2481,8 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::ATOMIC_CMP_SWAPW: return emitAtomicCmpSwapW(MI, MBB); - case SystemZ::BRC: - // The original DAG glues comparisons to their uses, both to ensure - // that no CC-clobbering instructions are inserted between them, and - // to ensure that comparison results are not reused. This means that - // a BRC is the sole user of a preceding comparison and that we can - // try to use a fused compare and branch instead. - if (convertPrevCompareToBranch(MBB, MI, MI->getOperand(0).getImm(), - MI->getOperand(1).getMBB())) - MI->eraseFromParent(); - return MBB; + case SystemZ::MVCWrapper: + return emitMVCWrapper(MI, MBB); default: llvm_unreachable("Unexpected instr type to insert"); } diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h index f17e9e4..c0dbe49 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.h +++ b/lib/Target/SystemZ/SystemZISelLowering.h @@ -68,10 +68,18 @@ namespace SystemZISD { // first input operands are GR128s. The trailing numbers are the // widths of the second operand in bits. UMUL_LOHI64, + SDIVREM32, SDIVREM64, UDIVREM32, UDIVREM64, + // Use MVC to copy bytes from one memory location to another. + // The first operand is the target address, the second operand is the + // source address, and the third operand is the constant length. + // This isn't a memory opcode because we'd need to attach two + // MachineMemOperands rather than one. + MVC, + // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or // ATOMIC_LOAD_<op>. // @@ -118,18 +126,19 @@ public: virtual MVT getScalarShiftAmountTy(EVT LHSTy) const LLVM_OVERRIDE { return MVT::i32; } - virtual EVT getSetCCResultType(LLVMContext &, EVT) const { + virtual EVT getSetCCResultType(LLVMContext &, EVT) const LLVM_OVERRIDE { return MVT::i32; } - virtual bool isFMAFasterThanMulAndAdd(EVT) const LLVM_OVERRIDE { - return true; - } - virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; - virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const; + virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const LLVM_OVERRIDE; + virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE; + virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const + LLVM_OVERRIDE; + virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const + LLVM_OVERRIDE; virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE; virtual std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const LLVM_OVERRIDE; + MVT VT) const LLVM_OVERRIDE; virtual TargetLowering::ConstraintType getConstraintType(const std::string &Constraint) const LLVM_OVERRIDE; virtual TargetLowering::ConstraintWeight @@ -203,6 +212,10 @@ private: // Implement EmitInstrWithCustomInserter for individual operation types. MachineBasicBlock *emitSelect(MachineInstr *MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitCondStore(MachineInstr *MI, + MachineBasicBlock *BB, + unsigned StoreOpcode, unsigned STOCOpcode, + bool Invert) const; MachineBasicBlock *emitExt128(MachineInstr *MI, MachineBasicBlock *MBB, bool ClearEven, unsigned SubReg) const; @@ -217,6 +230,8 @@ private: unsigned BitSize) const; MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr *MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitMVCWrapper(MachineInstr *MI, + MachineBasicBlock *BB) const; }; } // end namespace llvm diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td index 86ef14c..b903b51 100644 --- a/lib/Target/SystemZ/SystemZInstrFP.td +++ b/lib/Target/SystemZ/SystemZInstrFP.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// -// Control-flow instructions +// Select instructions //===----------------------------------------------------------------------===// // C's ?: operator for floating-point operands. @@ -16,32 +16,48 @@ def SelectF32 : SelectWrapper<FP32>; def SelectF64 : SelectWrapper<FP64>; def SelectF128 : SelectWrapper<FP128>; +defm CondStoreF32 : CondStores<FP32, nonvolatile_store, + nonvolatile_load, bdxaddr20only>; +defm CondStoreF64 : CondStores<FP64, nonvolatile_store, + nonvolatile_load, bdxaddr20only>; + //===----------------------------------------------------------------------===// // Move instructions //===----------------------------------------------------------------------===// // Load zero. let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { - def LZER : InherentRRE<"lzer", 0xB374, FP32, (fpimm0)>; - def LZDR : InherentRRE<"lzdr", 0xB375, FP64, (fpimm0)>; - def LZXR : InherentRRE<"lzxr", 0xB376, FP128, (fpimm0)>; + def LZER : InherentRRE<"lze", 0xB374, FP32, (fpimm0)>; + def LZDR : InherentRRE<"lzd", 0xB375, FP64, (fpimm0)>; + def LZXR : InherentRRE<"lzx", 0xB376, FP128, (fpimm0)>; } // Moves between two floating-point registers. let neverHasSideEffects = 1 in { - def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>; - def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>; - def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>; + def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>; + def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>; + def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>; +} + +// Moves between two floating-point registers that also set the condition +// codes. +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { + defm LTEBR : LoadAndTestRRE<"lteb", 0xB302, FP32>; + defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>; + defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>; } +def : CompareZeroFP<LTEBRCompare, FP32>; +def : CompareZeroFP<LTDBRCompare, FP64>; +def : CompareZeroFP<LTXBRCompare, FP128>; // Moves between 64-bit integer and floating-point registers. -def LGDR : UnaryRRE<"lgdr", 0xB3CD, bitconvert, GR64, FP64>; -def LDGR : UnaryRRE<"ldgr", 0xB3C1, bitconvert, FP64, GR64>; +def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>; +def LDGR : UnaryRRE<"ldg", 0xB3C1, bitconvert, FP64, GR64>; // fcopysign with an FP32 result. let isCodeGenOnly = 1 in { - def CPSDRss : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP32, FP32>; - def CPSDRsd : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP32, FP64>; + def CPSDRss : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP32>; + def CPSDRsd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP64>; } // The sign of an FP128 is in the high register. @@ -50,8 +66,8 @@ def : Pat<(fcopysign FP32:$src1, FP128:$src2), // fcopysign with an FP64 result. let isCodeGenOnly = 1 in - def CPSDRds : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP64, FP32>; -def CPSDRdd : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP64, FP64>; + def CPSDRds : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP32>; +def CPSDRdd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP64>; // The sign of an FP128 is in the high register. def : Pat<(fcopysign FP64:$src1, FP128:$src2), @@ -70,13 +86,17 @@ def : CopySign128<FP64, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_high), def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_high), (EXTRACT_SUBREG FP128:$src2, subreg_high))>; +defm LoadStoreF32 : MVCLoadStore<load, store, f32, MVCWrapper, 4>; +defm LoadStoreF64 : MVCLoadStore<load, store, f64, MVCWrapper, 8>; +defm LoadStoreF128 : MVCLoadStore<load, store, f128, MVCWrapper, 16>; + //===----------------------------------------------------------------------===// // Load instructions //===----------------------------------------------------------------------===// let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { - defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32>; - defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64>; + defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>; + defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -91,8 +111,8 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { //===----------------------------------------------------------------------===// let SimpleBDXStore = 1 in { - defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32>; - defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64>; + defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32, 4>; + defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -109,9 +129,9 @@ let SimpleBDXStore = 1 in { // Convert floating-point values to narrower representations, rounding // according to the current mode. The destination of LEXBR and LDXBR // is a 128-bit value, but only the first register of the pair is used. -def LEDBR : UnaryRRE<"ledbr", 0xB344, fround, FP32, FP64>; -def LEXBR : UnaryRRE<"lexbr", 0xB346, null_frag, FP128, FP128>; -def LDXBR : UnaryRRE<"ldxbr", 0xB345, null_frag, FP128, FP128>; +def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>; +def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>; +def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>; def : Pat<(f32 (fround FP128:$src)), (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_32bit)>; @@ -119,36 +139,34 @@ def : Pat<(f64 (fround FP128:$src)), (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_high)>; // Extend register floating-point values to wider representations. -def LDEBR : UnaryRRE<"ldebr", 0xB304, fextend, FP64, FP32>; -def LXEBR : UnaryRRE<"lxebr", 0xB306, fextend, FP128, FP32>; -def LXDBR : UnaryRRE<"lxdbr", 0xB305, fextend, FP128, FP64>; +def LDEBR : UnaryRRE<"ldeb", 0xB304, fextend, FP64, FP32>; +def LXEBR : UnaryRRE<"lxeb", 0xB306, fextend, FP128, FP32>; +def LXDBR : UnaryRRE<"lxdb", 0xB305, fextend, FP128, FP64>; // Extend memory floating-point values to wider representations. -def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64>; -def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128>; -def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128>; +def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64, 4>; +def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128, 4>; +def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128, 8>; // Convert a signed integer register value to a floating-point one. -let Defs = [CC] in { - def CEFBR : UnaryRRE<"cefbr", 0xB394, sint_to_fp, FP32, GR32>; - def CDFBR : UnaryRRE<"cdfbr", 0xB395, sint_to_fp, FP64, GR32>; - def CXFBR : UnaryRRE<"cxfbr", 0xB396, sint_to_fp, FP128, GR32>; +def CEFBR : UnaryRRE<"cefb", 0xB394, sint_to_fp, FP32, GR32>; +def CDFBR : UnaryRRE<"cdfb", 0xB395, sint_to_fp, FP64, GR32>; +def CXFBR : UnaryRRE<"cxfb", 0xB396, sint_to_fp, FP128, GR32>; - def CEGBR : UnaryRRE<"cegbr", 0xB3A4, sint_to_fp, FP32, GR64>; - def CDGBR : UnaryRRE<"cdgbr", 0xB3A5, sint_to_fp, FP64, GR64>; - def CXGBR : UnaryRRE<"cxgbr", 0xB3A6, sint_to_fp, FP128, GR64>; -} +def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>; +def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>; +def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>; // Convert a floating-point register value to a signed integer value, // with the second operand (modifier M3) specifying the rounding mode. let Defs = [CC] in { - def CFEBR : UnaryRRF<"cfebr", 0xB398, GR32, FP32>; - def CFDBR : UnaryRRF<"cfdbr", 0xB399, GR32, FP64>; - def CFXBR : UnaryRRF<"cfxbr", 0xB39A, GR32, FP128>; + def CFEBR : UnaryRRF<"cfeb", 0xB398, GR32, FP32>; + def CFDBR : UnaryRRF<"cfdb", 0xB399, GR32, FP64>; + def CFXBR : UnaryRRF<"cfxb", 0xB39A, GR32, FP128>; - def CGEBR : UnaryRRF<"cgebr", 0xB3A8, GR64, FP32>; - def CGDBR : UnaryRRF<"cgdbr", 0xB3A9, GR64, FP64>; - def CGXBR : UnaryRRF<"cgxbr", 0xB3AA, GR64, FP128>; + def CGEBR : UnaryRRF<"cgeb", 0xB3A8, GR64, FP32>; + def CGDBR : UnaryRRF<"cgdb", 0xB3A9, GR64, FP64>; + def CGXBR : UnaryRRF<"cgxb", 0xB3AA, GR64, FP128>; } // fp_to_sint always rounds towards zero, which is modifier value 5. @@ -165,33 +183,33 @@ def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>; //===----------------------------------------------------------------------===// // Negation (Load Complement). -let Defs = [CC] in { - def LCEBR : UnaryRRE<"lcebr", 0xB303, fneg, FP32, FP32>; - def LCDBR : UnaryRRE<"lcdbr", 0xB313, fneg, FP64, FP64>; - def LCXBR : UnaryRRE<"lcxbr", 0xB343, fneg, FP128, FP128>; +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { + def LCEBR : UnaryRRE<"lceb", 0xB303, fneg, FP32, FP32>; + def LCDBR : UnaryRRE<"lcdb", 0xB313, fneg, FP64, FP64>; + def LCXBR : UnaryRRE<"lcxb", 0xB343, fneg, FP128, FP128>; } // Absolute value (Load Positive). -let Defs = [CC] in { - def LPEBR : UnaryRRE<"lpebr", 0xB300, fabs, FP32, FP32>; - def LPDBR : UnaryRRE<"lpdbr", 0xB310, fabs, FP64, FP64>; - def LPXBR : UnaryRRE<"lpxbr", 0xB340, fabs, FP128, FP128>; +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { + def LPEBR : UnaryRRE<"lpeb", 0xB300, fabs, FP32, FP32>; + def LPDBR : UnaryRRE<"lpdb", 0xB310, fabs, FP64, FP64>; + def LPXBR : UnaryRRE<"lpxb", 0xB340, fabs, FP128, FP128>; } // Negative absolute value (Load Negative). -let Defs = [CC] in { - def LNEBR : UnaryRRE<"lnebr", 0xB301, fnabs, FP32, FP32>; - def LNDBR : UnaryRRE<"lndbr", 0xB311, fnabs, FP64, FP64>; - def LNXBR : UnaryRRE<"lnxbr", 0xB341, fnabs, FP128, FP128>; +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { + def LNEBR : UnaryRRE<"lneb", 0xB301, fnabs, FP32, FP32>; + def LNDBR : UnaryRRE<"lndb", 0xB311, fnabs, FP64, FP64>; + def LNXBR : UnaryRRE<"lnxb", 0xB341, fnabs, FP128, FP128>; } // Square root. -def SQEBR : UnaryRRE<"sqebr", 0xB314, fsqrt, FP32, FP32>; -def SQDBR : UnaryRRE<"sqdbr", 0xB315, fsqrt, FP64, FP64>; -def SQXBR : UnaryRRE<"sqxbr", 0xB316, fsqrt, FP128, FP128>; +def SQEBR : UnaryRRE<"sqeb", 0xB314, fsqrt, FP32, FP32>; +def SQDBR : UnaryRRE<"sqdb", 0xB315, fsqrt, FP64, FP64>; +def SQXBR : UnaryRRE<"sqxb", 0xB316, fsqrt, FP128, FP128>; -def SQEB : UnaryRXE<"sqeb", 0xED14, loadu<fsqrt>, FP32>; -def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64>; +def SQEB : UnaryRXE<"sqeb", 0xED14, loadu<fsqrt>, FP32, 4>; +def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64, 8>; // Round to an integer, with the second operand (modifier M3) specifying // the rounding mode. @@ -199,11 +217,9 @@ def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64>; // These forms always check for inexact conditions. z196 added versions // that allow this to suppressed (as for fnearbyint), but we don't yet // support -march=z196. -let Defs = [CC] in { - def FIEBR : UnaryRRF<"fiebr", 0xB357, FP32, FP32>; - def FIDBR : UnaryRRF<"fidbr", 0xB35F, FP64, FP64>; - def FIXBR : UnaryRRF<"fixbr", 0xB347, FP128, FP128>; -} +def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>; +def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>; +def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>; // frint rounds according to the current mode (modifier 0) and detects // inexact conditions. @@ -216,94 +232,94 @@ def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>; //===----------------------------------------------------------------------===// // Addition. -let Defs = [CC] in { +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { let isCommutable = 1 in { - def AEBR : BinaryRRE<"aebr", 0xB30A, fadd, FP32, FP32>; - def ADBR : BinaryRRE<"adbr", 0xB31A, fadd, FP64, FP64>; - def AXBR : BinaryRRE<"axbr", 0xB34A, fadd, FP128, FP128>; + def AEBR : BinaryRRE<"aeb", 0xB30A, fadd, FP32, FP32>; + def ADBR : BinaryRRE<"adb", 0xB31A, fadd, FP64, FP64>; + def AXBR : BinaryRRE<"axb", 0xB34A, fadd, FP128, FP128>; } - def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load>; - def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load>; + def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load, 4>; + def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load, 8>; } // Subtraction. -let Defs = [CC] in { - def SEBR : BinaryRRE<"sebr", 0xB30B, fsub, FP32, FP32>; - def SDBR : BinaryRRE<"sdbr", 0xB31B, fsub, FP64, FP64>; - def SXBR : BinaryRRE<"sxbr", 0xB34B, fsub, FP128, FP128>; +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in { + def SEBR : BinaryRRE<"seb", 0xB30B, fsub, FP32, FP32>; + def SDBR : BinaryRRE<"sdb", 0xB31B, fsub, FP64, FP64>; + def SXBR : BinaryRRE<"sxb", 0xB34B, fsub, FP128, FP128>; - def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load>; - def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load>; + def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load, 4>; + def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load, 8>; } // Multiplication. let isCommutable = 1 in { - def MEEBR : BinaryRRE<"meebr", 0xB317, fmul, FP32, FP32>; - def MDBR : BinaryRRE<"mdbr", 0xB31C, fmul, FP64, FP64>; - def MXBR : BinaryRRE<"mxbr", 0xB34C, fmul, FP128, FP128>; + def MEEBR : BinaryRRE<"meeb", 0xB317, fmul, FP32, FP32>; + def MDBR : BinaryRRE<"mdb", 0xB31C, fmul, FP64, FP64>; + def MXBR : BinaryRRE<"mxb", 0xB34C, fmul, FP128, FP128>; } -def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load>; -def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load>; +def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load, 4>; +def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load, 8>; // f64 multiplication of two FP32 registers. -def MDEBR : BinaryRRE<"mdebr", 0xB30C, null_frag, FP64, FP32>; +def MDEBR : BinaryRRE<"mdeb", 0xB30C, null_frag, FP64, FP32>; def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (fextend FP32:$src2))), (MDEBR (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_32bit), FP32:$src2)>; // f64 multiplication of an FP32 register and an f32 memory. -def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load>; +def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load, 4>; def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (extloadf32 bdxaddr12only:$addr))), (MDEB (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_32bit), bdxaddr12only:$addr)>; // f128 multiplication of two FP64 registers. -def MXDBR : BinaryRRE<"mxdbr", 0xB307, null_frag, FP128, FP64>; +def MXDBR : BinaryRRE<"mxdb", 0xB307, null_frag, FP128, FP64>; def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (fextend FP64:$src2))), (MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_high), FP64:$src2)>; // f128 multiplication of an FP64 register and an f64 memory. -def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load>; +def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load, 8>; def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (extloadf64 bdxaddr12only:$addr))), (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_high), bdxaddr12only:$addr)>; // Fused multiply-add. -def MAEBR : TernaryRRD<"maebr", 0xB30E, z_fma, FP32>; -def MADBR : TernaryRRD<"madbr", 0xB31E, z_fma, FP64>; +def MAEBR : TernaryRRD<"maeb", 0xB30E, z_fma, FP32>; +def MADBR : TernaryRRD<"madb", 0xB31E, z_fma, FP64>; -def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load>; -def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load>; +def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load, 4>; +def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load, 8>; // Fused multiply-subtract. -def MSEBR : TernaryRRD<"msebr", 0xB30F, z_fms, FP32>; -def MSDBR : TernaryRRD<"msdbr", 0xB31F, z_fms, FP64>; +def MSEBR : TernaryRRD<"mseb", 0xB30F, z_fms, FP32>; +def MSDBR : TernaryRRD<"msdb", 0xB31F, z_fms, FP64>; -def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load>; -def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load>; +def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load, 4>; +def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load, 8>; // Division. -def DEBR : BinaryRRE<"debr", 0xB30D, fdiv, FP32, FP32>; -def DDBR : BinaryRRE<"ddbr", 0xB31D, fdiv, FP64, FP64>; -def DXBR : BinaryRRE<"dxbr", 0xB34D, fdiv, FP128, FP128>; +def DEBR : BinaryRRE<"deb", 0xB30D, fdiv, FP32, FP32>; +def DDBR : BinaryRRE<"ddb", 0xB31D, fdiv, FP64, FP64>; +def DXBR : BinaryRRE<"dxb", 0xB34D, fdiv, FP128, FP128>; -def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load>; -def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load>; +def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load, 4>; +def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load, 8>; //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// -let Defs = [CC] in { - def CEBR : CompareRRE<"cebr", 0xB309, z_cmp, FP32, FP32>; - def CDBR : CompareRRE<"cdbr", 0xB319, z_cmp, FP64, FP64>; - def CXBR : CompareRRE<"cxbr", 0xB349, z_cmp, FP128, FP128>; +let Defs = [CC], CCValues = 0xF in { + def CEBR : CompareRRE<"ceb", 0xB309, z_cmp, FP32, FP32>; + def CDBR : CompareRRE<"cdb", 0xB319, z_cmp, FP64, FP64>; + def CXBR : CompareRRE<"cxb", 0xB349, z_cmp, FP128, FP128>; - def CEB : CompareRXE<"ceb", 0xED09, z_cmp, FP32, load>; - def CDB : CompareRXE<"cdb", 0xED19, z_cmp, FP64, load>; + def CEB : CompareRXE<"ceb", 0xED09, z_cmp, FP32, load, 4>; + def CDB : CompareRXE<"cdb", 0xED19, z_cmp, FP64, load, 8>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td index ad050fd..954df11 100644 --- a/lib/Target/SystemZ/SystemZInstrFormats.td +++ b/lib/Target/SystemZ/SystemZInstrFormats.td @@ -21,12 +21,24 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr, let Pattern = pattern; let AsmString = asmstr; - // Used to identify a group of related instructions, such as ST and STY. - string Function = ""; - - // "12" for an instruction that has a ...Y equivalent, "20" for that - // ...Y equivalent. - string PairType = "none"; + // Some instructions come in pairs, one having a 12-bit displacement + // and the other having a 20-bit displacement. Both instructions in + // the pair have the same DispKey and their DispSizes are "12" and "20" + // respectively. + string DispKey = ""; + string DispSize = "none"; + + // Many register-based <INSN>R instructions have a memory-based <INSN> + // counterpart. OpKey uniquely identifies <INSN>, while OpType is + // "reg" for <INSN>R and "mem" for <INSN>. + string OpKey = ""; + string OpType = "none"; + + // Many distinct-operands instructions have older 2-operand equivalents. + // NumOpsKey uniquely identifies one of these 2-operand and 3-operand pairs, + // with NumOpsValue being "2" or "3" as appropriate. + string NumOpsKey = ""; + string NumOpsValue = "none"; // True if this instruction is a simple D(X,B) load of a register // (with no sign or zero extension). @@ -46,11 +58,40 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr, // operations. bit Is128Bit = 0; - let TSFlags{0} = SimpleBDXLoad; - let TSFlags{1} = SimpleBDXStore; - let TSFlags{2} = Has20BitOffset; - let TSFlags{3} = HasIndex; - let TSFlags{4} = Is128Bit; + // The access size of all memory operands in bytes, or 0 if not known. + bits<5> AccessBytes = 0; + + // If the instruction sets CC to a useful value, this gives the mask + // of all possible CC results. The mask has the same form as + // SystemZ::CCMASK_*. + bits<4> CCValues = 0; + + // The subset of CCValues that have the same meaning as they would after + // a comparison of the first operand against zero. + bits<4> CompareZeroCCMask = 0; + + // True if the instruction is conditional and if the CC mask operand + // comes first (as for BRC, etc.). + bit CCMaskFirst = 0; + + // Similar, but true if the CC mask operand comes last (as for LOC, etc.). + bit CCMaskLast = 0; + + // True if the instruction is the "logical" rather than "arithmetic" form, + // in cases where a distinction exists. + bit IsLogical = 0; + + let TSFlags{0} = SimpleBDXLoad; + let TSFlags{1} = SimpleBDXStore; + let TSFlags{2} = Has20BitOffset; + let TSFlags{3} = HasIndex; + let TSFlags{4} = Is128Bit; + let TSFlags{9-5} = AccessBytes; + let TSFlags{13-10} = CCValues; + let TSFlags{17-14} = CompareZeroCCMask; + let TSFlags{18} = CCMaskFirst; + let TSFlags{19} = CCMaskLast; + let TSFlags{20} = IsLogical; } //===----------------------------------------------------------------------===// @@ -61,8 +102,8 @@ class InstSystemZ<int size, dag outs, dag ins, string asmstr, // displacement. def getDisp12Opcode : InstrMapping { let FilterClass = "InstSystemZ"; - let RowFields = ["Function"]; - let ColFields = ["PairType"]; + let RowFields = ["DispKey"]; + let ColFields = ["DispSize"]; let KeyCol = ["20"]; let ValueCols = [["12"]]; } @@ -70,12 +111,30 @@ def getDisp12Opcode : InstrMapping { // Return the version of an instruction that has a signed 20-bit displacement. def getDisp20Opcode : InstrMapping { let FilterClass = "InstSystemZ"; - let RowFields = ["Function"]; - let ColFields = ["PairType"]; + let RowFields = ["DispKey"]; + let ColFields = ["DispSize"]; let KeyCol = ["12"]; let ValueCols = [["20"]]; } +// Return the memory form of a register instruction. +def getMemOpcode : InstrMapping { + let FilterClass = "InstSystemZ"; + let RowFields = ["OpKey"]; + let ColFields = ["OpType"]; + let KeyCol = ["reg"]; + let ValueCols = [["mem"]]; +} + +// Return the 3-operand form of a 2-operand instruction. +def getThreeOperandOpcode : InstrMapping { + let FilterClass = "InstSystemZ"; + let RowFields = ["NumOpsKey"]; + let ColFields = ["NumOpsValue"]; + let KeyCol = ["2"]; + let ValueCols = [["3"]]; +} + //===----------------------------------------------------------------------===// // Instruction formats //===----------------------------------------------------------------------===// @@ -147,6 +206,23 @@ class InstRIEc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{7-0} = op{7-0}; } +class InstRIEd<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<4> R1; + bits<4> R3; + bits<16> I2; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = R1; + let Inst{35-32} = R3; + let Inst{31-16} = I2; + let Inst{15-8} = 0; + let Inst{7-0} = op{7-0}; +} + class InstRIEf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -383,17 +459,38 @@ class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Has20BitOffset = 1; } +class InstSS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<24> BDL1; + bits<16> BD2; + + let Inst{47-40} = op; + let Inst{39-16} = BDL1; + let Inst{15-0} = BD2; +} + //===----------------------------------------------------------------------===// // Instruction definitions with semantics //===----------------------------------------------------------------------===// // -// These classes have the form <Category><Format>, where <Format> is one +// These classes have the form [Cond]<Category><Format>, where <Format> is one // of the formats defined above and where <Category> describes the inputs -// and outputs. <Category> can be one of: +// and outputs. "Cond" is used if the instruction is conditional, +// in which case the 4-bit condition-code mask is added as a final operand. +// <Category> can be one of: // // Inherent: // One register output operand and no input operands. // +// BranchUnary: +// One register output operand, one register input operand and +// one branch displacement. The instructions stores a modified +// form of the source register in the destination register and +// branches on the result. +// // Store: // One register or immediate input operand and one address input operand. // The instruction stores the first operand to the address. @@ -455,11 +552,20 @@ class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> class InherentRRE<string mnemonic, bits<16> opcode, RegisterOperand cls, dag src> : InstRRE<opcode, (outs cls:$R1), (ins), - mnemonic#"\t$R1", + mnemonic#"r\t$R1", [(set cls:$R1, src)]> { let R2 = 0; } +class BranchUnaryRI<string mnemonic, bits<12> opcode, RegisterOperand cls> + : InstRI<opcode, (outs cls:$R1), (ins cls:$R1src, brtarget16:$I2), + mnemonic##"\t$R1, $I2", []> { + let isBranch = 1; + let isTerminator = 1; + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; +} + class LoadMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls> : InstRSY<opcode, (outs cls:$R1, cls:$R3), (ins bdaddr20only:$BD2), mnemonic#"\t$R1, $R3, $BD2", []> { @@ -479,28 +585,38 @@ class StoreRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator, } class StoreRX<string mnemonic, bits<8> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr12only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr12only> : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(operator cls:$R1, mode:$XBD2)]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayStore = 1; + let AccessBytes = bytes; } class StoreRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr20only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr20only> : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(operator cls:$R1, mode:$XBD2)]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayStore = 1; + let AccessBytes = bytes; } multiclass StoreRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode, - SDPatternOperator operator, RegisterOperand cls> { - let Function = mnemonic ## #cls in { - let PairType = "12" in - def "" : StoreRX<mnemonic, rxOpcode, operator, cls, bdxaddr12pair>; - let PairType = "20" in - def Y : StoreRXY<mnemonic#"y", rxyOpcode, operator, cls, bdxaddr20pair>; + SDPatternOperator operator, RegisterOperand cls, + bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in + def "" : StoreRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>; + let DispSize = "20" in + def Y : StoreRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes, + bdxaddr20pair>; } } @@ -536,30 +652,106 @@ class StoreSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator, multiclass StoreSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode, SDPatternOperator operator, Immediate imm> { - let Function = mnemonic in { - let PairType = "12" in + let DispKey = mnemonic in { + let DispSize = "12" in def "" : StoreSI<mnemonic, siOpcode, operator, imm, bdaddr12pair>; - let PairType = "20" in + let DispSize = "20" in def Y : StoreSIY<mnemonic#"y", siyOpcode, operator, imm, bdaddr20pair>; } } +class CondStoreRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, cond4:$valid, cond4:$R3), + mnemonic#"$R3\t$R1, $BD2", []>, + Requires<[FeatureLoadStoreOnCond]> { + let mayStore = 1; + let AccessBytes = bytes; + let CCMaskLast = 1; +} + +// Like CondStoreRSY, but used for the raw assembly form. The condition-code +// mask is the third operand rather than being part of the mnemonic. +class AsmCondStoreRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, uimm8zx4:$R3), + mnemonic#"\t$R1, $BD2, $R3", []>, + Requires<[FeatureLoadStoreOnCond]> { + let mayStore = 1; + let AccessBytes = bytes; +} + +// Like CondStoreRSY, but with a fixed CC mask. +class FixedCondStoreRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls, bits<4> ccmask, bits<5> bytes, + AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2), + mnemonic#"\t$R1, $BD2", []>, + Requires<[FeatureLoadStoreOnCond]> { + let mayStore = 1; + let AccessBytes = bytes; + let R3 = ccmask; +} + class UnaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR<opcode, (outs cls1:$R1), (ins cls2:$R2), - mnemonic#"\t$R1, $R2", - [(set cls1:$R1, (operator cls2:$R2))]>; + mnemonic#"r\t$R1, $R2", + [(set cls1:$R1, (operator cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE<opcode, (outs cls1:$R1), (ins cls2:$R2), - mnemonic#"\t$R1, $R2", - [(set cls1:$R1, (operator cls2:$R2))]>; + mnemonic#"r\t$R1, $R2", + [(set cls1:$R1, (operator cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1, RegisterOperand cls2> : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2), - mnemonic#"\t$R1, $R3, $R2", []>; + mnemonic#"r\t$R1, $R3, $R2", []> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} + +// These instructions are generated by if conversion. The old value of R1 +// is added as an implicit use. +class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1, + RegisterOperand cls2> + : InstRRF<opcode, (outs cls1:$R1), (ins cls2:$R2, cond4:$valid, cond4:$R3), + mnemonic#"r$R3\t$R1, $R2", []>, + Requires<[FeatureLoadStoreOnCond]> { + let CCMaskLast = 1; +} + +// Like CondUnaryRRF, but used for the raw assembly form. The condition-code +// mask is the third operand rather than being part of the mnemonic. +class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1, + RegisterOperand cls2> + : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, uimm8zx4:$R3), + mnemonic#"r\t$R1, $R2, $R3", []>, + Requires<[FeatureLoadStoreOnCond]> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; +} + +// Like CondUnaryRRF, but with a fixed CC mask. +class FixedCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1, + RegisterOperand cls2, bits<4> ccmask> + : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2), + mnemonic#"\t$R1, $R2", []>, + Requires<[FeatureLoadStoreOnCond]> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let R3 = ccmask; +} class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> @@ -585,45 +777,105 @@ class UnaryRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator, let AddedComplexity = 7; } +class CondUnaryRSY<string mnemonic, bits<16> opcode, + SDPatternOperator operator, RegisterOperand cls, + bits<5> bytes, AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs cls:$R1), + (ins cls:$R1src, mode:$BD2, cond4:$valid, cond4:$R3), + mnemonic#"$R3\t$R1, $BD2", + [(set cls:$R1, + (z_select_ccmask (load bdaddr20only:$BD2), cls:$R1src, + cond4:$valid, cond4:$R3))]>, + Requires<[FeatureLoadStoreOnCond]> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let mayLoad = 1; + let AccessBytes = bytes; + let CCMaskLast = 1; +} + +// Like CondUnaryRSY, but used for the raw assembly form. The condition-code +// mask is the third operand rather than being part of the mnemonic. +class AsmCondUnaryRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, uimm8zx4:$R3), + mnemonic#"\t$R1, $BD2, $R3", []>, + Requires<[FeatureLoadStoreOnCond]> { + let mayLoad = 1; + let AccessBytes = bytes; + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; +} + +// Like CondUnaryRSY, but with a fixed CC mask. +class FixedCondUnaryRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls, bits<4> ccmask, bits<5> bytes, + AddressingMode mode = bdaddr20only> + : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2), + mnemonic#"\t$R1, $BD2", []>, + Requires<[FeatureLoadStoreOnCond]> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let R3 = ccmask; + let mayLoad = 1; + let AccessBytes = bytes; +} + class UnaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr12only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr12only> : InstRX<opcode, (outs cls:$R1), (ins mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator mode:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class UnaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls> + RegisterOperand cls, bits<5> bytes> : InstRXE<opcode, (outs cls:$R1), (ins bdxaddr12only:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator bdxaddr12only:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class UnaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr20only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr20only> : InstRXY<opcode, (outs cls:$R1), (ins mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator mode:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } multiclass UnaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode, - SDPatternOperator operator, RegisterOperand cls> { - let Function = mnemonic ## #cls in { - let PairType = "12" in - def "" : UnaryRX<mnemonic, rxOpcode, operator, cls, bdxaddr12pair>; - let PairType = "20" in - def Y : UnaryRXY<mnemonic#"y", rxyOpcode, operator, cls, bdxaddr20pair>; + SDPatternOperator operator, RegisterOperand cls, + bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in + def "" : UnaryRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>; + let DispSize = "20" in + def Y : UnaryRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes, + bdxaddr20pair>; } } class BinaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2), - mnemonic#"\t$R1, $R2", + mnemonic#"r\t$R1, $R2", [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } @@ -631,8 +883,10 @@ class BinaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator, class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2), - mnemonic#"\t$R1, $R2", + mnemonic#"r\t$R1, $R2", [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } @@ -640,8 +894,41 @@ class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, class BinaryRRF<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R3, cls2:$R2), - mnemonic#"\t$R1, $R3, $R2", - [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]>; + mnemonic#"r\t$R1, $R3, $R2", + [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} + +class BinaryRRFK<string mnemonic, bits<16> opcode, SDPatternOperator operator, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R2, cls2:$R3), + mnemonic#"rk\t$R1, $R2, $R3", + [(set cls1:$R1, (operator cls1:$R2, cls2:$R3))]>; + +multiclass BinaryRRAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2, + SDPatternOperator operator, RegisterOperand cls1, + RegisterOperand cls2> { + let NumOpsKey = mnemonic in { + let NumOpsValue = "3" in + def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>, + Requires<[FeatureDistinctOps]>; + let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in + def "" : BinaryRR<mnemonic, opcode1, operator, cls1, cls2>; + } +} + +multiclass BinaryRREAndK<string mnemonic, bits<16> opcode1, bits<16> opcode2, + SDPatternOperator operator, RegisterOperand cls1, + RegisterOperand cls2> { + let NumOpsKey = mnemonic in { + let NumOpsValue = "3" in + def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>, + Requires<[FeatureDistinctOps]>; + let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in + def "" : BinaryRRE<mnemonic, opcode1, operator, cls1, cls2>; + } +} class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> @@ -652,6 +939,24 @@ class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, let DisableEncoding = "$R1src"; } +class BinaryRIE<string mnemonic, bits<16> opcode, SDPatternOperator operator, + RegisterOperand cls, Immediate imm> + : InstRIEd<opcode, (outs cls:$R1), (ins cls:$R3, imm:$I2), + mnemonic#"\t$R1, $R3, $I2", + [(set cls:$R1, (operator cls:$R3, imm:$I2))]>; + +multiclass BinaryRIAndK<string mnemonic, bits<12> opcode1, bits<16> opcode2, + SDPatternOperator operator, RegisterOperand cls, + Immediate imm> { + let NumOpsKey = mnemonic in { + let NumOpsValue = "3" in + def K : BinaryRIE<mnemonic##"k", opcode2, null_frag, cls, imm>, + Requires<[FeatureDistinctOps]>; + let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in + def "" : BinaryRI<mnemonic, opcode1, operator, cls, imm>; + } +} + class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> : InstRIL<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2), @@ -662,46 +967,56 @@ class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, } class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr12only> : InstRX<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class BinaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXE<opcode, (outs cls:$R1), (ins cls:$R1src, bdxaddr12only:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator cls:$R1src, (load bdxaddr12only:$XBD2)))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class BinaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr20only> : InstRXY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } multiclass BinaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode, SDPatternOperator operator, RegisterOperand cls, - SDPatternOperator load> { - let Function = mnemonic ## #cls in { - let PairType = "12" in - def "" : BinaryRX<mnemonic, rxOpcode, operator, cls, load, bdxaddr12pair>; - let PairType = "20" in - def Y : BinaryRXY<mnemonic#"y", rxyOpcode, operator, cls, load, + SDPatternOperator load, bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in + def "" : BinaryRX<mnemonic, rxOpcode, operator, cls, load, bytes, + bdxaddr12pair>; + let DispSize = "20" in + def Y : BinaryRXY<mnemonic#"y", rxyOpcode, operator, cls, load, bytes, bdxaddr20pair>; } } @@ -727,59 +1042,83 @@ class BinarySIY<string mnemonic, bits<16> opcode, SDPatternOperator operator, multiclass BinarySIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode, SDPatternOperator operator, Operand imm> { - let Function = mnemonic ## #cls in { - let PairType = "12" in + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in def "" : BinarySI<mnemonic, siOpcode, operator, imm, bdaddr12pair>; - let PairType = "20" in + let DispSize = "20" in def Y : BinarySIY<mnemonic#"y", siyOpcode, operator, imm, bdaddr20pair>; } } class ShiftRS<string mnemonic, bits<8> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode> - : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2), + RegisterOperand cls> + : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2), mnemonic#"\t$R1, $BD2", - [(set cls:$R1, (operator cls:$R1src, mode:$BD2))]> { + [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> { let R3 = 0; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } class ShiftRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode> - : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, mode:$BD2), + RegisterOperand cls> + : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2), mnemonic#"\t$R1, $R3, $BD2", - [(set cls:$R1, (operator cls:$R3, mode:$BD2))]>; + [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>; + +multiclass ShiftRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2, + SDPatternOperator operator, RegisterOperand cls> { + let NumOpsKey = mnemonic in { + let NumOpsValue = "3" in + def K : ShiftRSY<mnemonic##"k", opcode2, null_frag, cls>, + Requires<[FeatureDistinctOps]>; + let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in + def "" : ShiftRS<mnemonic, opcode1, operator, cls>; + } +} class CompareRR<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR<opcode, (outs), (ins cls1:$R1, cls2:$R2), - mnemonic#"\t$R1, $R2", - [(operator cls1:$R1, cls2:$R2)]>; + mnemonic#"r\t$R1, $R2", + [(operator cls1:$R1, cls2:$R2)]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; + let isCompare = 1; +} class CompareRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE<opcode, (outs), (ins cls1:$R1, cls2:$R2), - mnemonic#"\t$R1, $R2", - [(operator cls1:$R1, cls2:$R2)]>; + mnemonic#"r\t$R1, $R2", + [(operator cls1:$R1, cls2:$R2)]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; + let isCompare = 1; +} class CompareRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> : InstRI<opcode, (outs), (ins cls:$R1, imm:$I2), mnemonic#"\t$R1, $I2", - [(operator cls:$R1, imm:$I2)]>; + [(operator cls:$R1, imm:$I2)]> { + let isCompare = 1; +} class CompareRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> : InstRIL<opcode, (outs), (ins cls:$R1, imm:$I2), mnemonic#"\t$R1, $I2", - [(operator cls:$R1, imm:$I2)]>; + [(operator cls:$R1, imm:$I2)]> { + let isCompare = 1; +} class CompareRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, SDPatternOperator load> : InstRIL<opcode, (outs), (ins cls:$R1, pcrel32:$I2), mnemonic#"\t$R1, $I2", [(operator cls:$R1, (load pcrel32:$I2))]> { + let isCompare = 1; let mayLoad = 1; // We want PC-relative addresses to be tried ahead of BD and BDX addresses. // However, BDXs have two extra operands and are therefore 6 units more @@ -788,41 +1127,53 @@ class CompareRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator, } class CompareRX<string mnemonic, bits<8> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr12only> : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(operator cls:$R1, (load mode:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; + let isCompare = 1; let mayLoad = 1; + let AccessBytes = bytes; } class CompareRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXE<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2), mnemonic#"\t$R1, $XBD2", [(operator cls:$R1, (load bdxaddr12only:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; + let isCompare = 1; let mayLoad = 1; + let AccessBytes = bytes; } class CompareRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr20only> : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2), mnemonic#"\t$R1, $XBD2", [(operator cls:$R1, (load mode:$XBD2))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; + let isCompare = 1; let mayLoad = 1; + let AccessBytes = bytes; } multiclass CompareRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode, SDPatternOperator operator, RegisterOperand cls, - SDPatternOperator load> { - let Function = mnemonic ## #cls in { - let PairType = "12" in + SDPatternOperator load, bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in def "" : CompareRX<mnemonic, rxOpcode, operator, cls, - load, bdxaddr12pair>; - let PairType = "20" in + load, bytes, bdxaddr12pair>; + let DispSize = "20" in def Y : CompareRXY<mnemonic#"y", rxyOpcode, operator, cls, - load, bdxaddr20pair>; + load, bytes, bdxaddr20pair>; } } @@ -832,6 +1183,7 @@ class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator, : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2), mnemonic#"\t$BD1, $I2", [(operator (load mode:$BD1), imm:$I2)]> { + let isCompare = 1; let mayLoad = 1; } @@ -840,6 +1192,7 @@ class CompareSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator, : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2), mnemonic#"\t$BD1, $I2", [(operator (load bdaddr12only:$BD1), imm:$I2)]> { + let isCompare = 1; let mayLoad = 1; } @@ -849,16 +1202,17 @@ class CompareSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator, : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2), mnemonic#"\t$BD1, $I2", [(operator (load mode:$BD1), imm:$I2)]> { + let isCompare = 1; let mayLoad = 1; } multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode, SDPatternOperator operator, SDPatternOperator load, Immediate imm> { - let Function = mnemonic in { - let PairType = "12" in + let DispKey = mnemonic in { + let DispSize = "12" in def "" : CompareSI<mnemonic, siOpcode, operator, load, imm, bdaddr12pair>; - let PairType = "20" in + let DispSize = "20" in def Y : CompareSIY<mnemonic#"y", siyOpcode, operator, load, imm, bdaddr20pair>; } @@ -867,22 +1221,27 @@ multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode, class TernaryRRD<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls> : InstRRD<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, cls:$R2), - mnemonic#"\t$R1, $R3, $R2", + mnemonic#"r\t$R1, $R3, $R2", [(set cls:$R1, (operator cls:$R1src, cls:$R3, cls:$R2))]> { + let OpKey = mnemonic ## cls; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXF<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, bdxaddr12only:$XBD2), mnemonic#"\t$R1, $R3, $XBD2", [(set cls:$R1, (operator cls:$R1src, cls:$R3, (load bdxaddr12only:$XBD2)))]> { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator, @@ -909,10 +1268,10 @@ class CmpSwapRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator, multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode, SDPatternOperator operator, RegisterOperand cls> { - let Function = mnemonic ## #cls in { - let PairType = "12" in + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in def "" : CmpSwapRS<mnemonic, rsOpcode, operator, cls, bdaddr12pair>; - let PairType = "20" in + let DispSize = "20" in def Y : CmpSwapRSY<mnemonic#"y", rsyOpcode, operator, cls, bdaddr20pair>; } } @@ -920,13 +1279,21 @@ multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode, class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1, RegisterOperand cls2> : InstRIEf<opcode, (outs cls1:$R1), - (ins cls1:$R1src, cls2:$R2, - uimm8zx6:$I3, uimm8zx6:$I4, uimm8zx6:$I5), + (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5), mnemonic#"\t$R1, $R2, $I3, $I4, $I5", []> { let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } +// A floating-point load-and test operation. Create both a normal unary +// operation and one that acts as a comparison against zero. +multiclass LoadAndTestRRE<string mnemonic, bits<16> opcode, + RegisterOperand cls> { + def "" : UnaryRRE<mnemonic, opcode, null_frag, cls, cls>; + let isCodeGenOnly = 1 in + def Compare : CompareRRE<mnemonic, opcode, null_frag, cls, cls>; +} + //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// @@ -946,8 +1313,10 @@ class Pseudo<dag outs, dag ins, list<dag> pattern> // Implements "$dst = $cc & (8 >> CC) ? $src1 : $src2", where CC is // the value of the PSW's 2-bit condition code field. class SelectWrapper<RegisterOperand cls> - : Pseudo<(outs cls:$dst), (ins cls:$src1, cls:$src2, i8imm:$cc), - [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2, imm:$cc))]> { + : Pseudo<(outs cls:$dst), + (ins cls:$src1, cls:$src2, uimm8zx4:$valid, uimm8zx4:$cc), + [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2, + uimm8zx4:$valid, uimm8zx4:$cc))]> { let usesCustomInserter = 1; // Although the instructions used by these nodes do not in themselves // change CC, the insertion requires new blocks, and CC cannot be live @@ -956,6 +1325,23 @@ class SelectWrapper<RegisterOperand cls> let Uses = [CC]; } +// Stores $new to $addr if $cc is true ("" case) or false (Inv case). +multiclass CondStores<RegisterOperand cls, SDPatternOperator store, + SDPatternOperator load, AddressingMode mode> { + let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in { + def "" : Pseudo<(outs), + (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc), + [(store (z_select_ccmask cls:$new, (load mode:$addr), + uimm8zx4:$valid, uimm8zx4:$cc), + mode:$addr)]>; + def Inv : Pseudo<(outs), + (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc), + [(store (z_select_ccmask (load mode:$addr), cls:$new, + uimm8zx4:$valid, uimm8zx4:$cc), + mode:$addr)]>; + } +} + // OPERATOR is ATOMIC_SWAP or an ATOMIC_LOAD_* operation. PAT and OPERAND // describe the second (non-memory) operand. class AtomicLoadBinary<SDPatternOperator operator, RegisterOperand cls, diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 0d30432..9ee60aa 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -12,8 +12,10 @@ //===----------------------------------------------------------------------===// #include "SystemZInstrInfo.h" +#include "SystemZTargetMachine.h" #include "SystemZInstrBuilder.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #define GET_INSTRINFO_CTOR #define GET_INSTRMAP_INFO @@ -21,9 +23,14 @@ using namespace llvm; +// Return a mask with Count low bits set. +static uint64_t allOnes(unsigned int Count) { + return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; +} + SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm) : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), - RI(tm) { + RI(tm), TM(tm) { } // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, @@ -80,7 +87,8 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { // Return 0 otherwise. // // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. -static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, int Flag) { +static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, + unsigned Flag) { const MCInstrDesc &MCID = MI->getDesc(); if ((MCID.TSFlags & Flag) && MI->getOperand(1).isFI() && @@ -102,6 +110,31 @@ unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); } +bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI, + int &DestFrameIndex, + int &SrcFrameIndex) const { + // Check for MVC 0(Length,FI1),0(FI2) + const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo(); + if (MI->getOpcode() != SystemZ::MVC || + !MI->getOperand(0).isFI() || + MI->getOperand(1).getImm() != 0 || + !MI->getOperand(3).isFI() || + MI->getOperand(4).getImm() != 0) + return false; + + // Check that Length covers the full slots. + int64_t Length = MI->getOperand(2).getImm(); + unsigned FI1 = MI->getOperand(0).getIndex(); + unsigned FI2 = MI->getOperand(3).getIndex(); + if (MFI->getObjectSize(FI1) != Length || + MFI->getObjectSize(FI2) != Length) + return false; + + DestFrameIndex = FI1; + SrcFrameIndex = FI2; + return true; +} + bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, @@ -168,13 +201,13 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // FIXME: add X86-style branch swap FBB = TBB; TBB = Branch.Target->getMBB(); + Cond.push_back(MachineOperand::CreateImm(Branch.CCValid)); Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); continue; } // Handle subsequent conditional branches. - assert(Cond.size() == 1); - assert(TBB); + assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch"); // Only handle the case where all conditional branches branch to the same // destination. @@ -182,11 +215,13 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, return true; // If the conditions are the same, we can leave them alone. - unsigned OldCond = Cond[0].getImm(); - if (OldCond == Branch.CCMask) + unsigned OldCCValid = Cond[0].getImm(); + unsigned OldCCMask = Cond[1].getImm(); + if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask) continue; // FIXME: Try combining conditions like X86 does. Should be easy on Z! + return false; } return false; @@ -214,6 +249,13 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { return Count; } +bool SystemZInstrInfo:: +ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { + assert(Cond.size() == 2 && "Invalid condition"); + Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm()); + return false; +} + unsigned SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, @@ -225,7 +267,7 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); - assert((Cond.size() == 1 || Cond.size() == 0) && + assert((Cond.size() == 2 || Cond.size() == 0) && "SystemZ branch conditions have one component!"); if (Cond.empty()) { @@ -237,8 +279,10 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, // Conditional branch. unsigned Count = 0; - unsigned CC = Cond[0].getImm(); - BuildMI(&MBB, DL, get(SystemZ::BRC)).addImm(CC).addMBB(TBB); + unsigned CCValid = Cond[0].getImm(); + unsigned CCMask = Cond[1].getImm(); + BuildMI(&MBB, DL, get(SystemZ::BRC)) + .addImm(CCValid).addImm(CCMask).addMBB(TBB); ++Count; if (FBB) { @@ -249,6 +293,62 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, return Count; } +// If Opcode is a move that has a conditional variant, return that variant, +// otherwise return 0. +static unsigned getConditionalMove(unsigned Opcode) { + switch (Opcode) { + case SystemZ::LR: return SystemZ::LOCR; + case SystemZ::LGR: return SystemZ::LOCGR; + default: return 0; + } +} + +bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const { + unsigned Opcode = MI->getOpcode(); + if (TM.getSubtargetImpl()->hasLoadStoreOnCond() && + getConditionalMove(Opcode)) + return true; + return false; +} + +bool SystemZInstrInfo:: +isProfitableToIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, unsigned ExtraPredCycles, + const BranchProbability &Probability) const { + // For now only convert single instructions. + return NumCycles == 1; +} + +bool SystemZInstrInfo:: +isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumCyclesT, unsigned ExtraPredCyclesT, + MachineBasicBlock &FMBB, + unsigned NumCyclesF, unsigned ExtraPredCyclesF, + const BranchProbability &Probability) const { + // For now avoid converting mutually-exclusive cases. + return false; +} + +bool SystemZInstrInfo:: +PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const { + assert(Pred.size() == 2 && "Invalid condition"); + unsigned CCValid = Pred[0].getImm(); + unsigned CCMask = Pred[1].getImm(); + assert(CCMask > 0 && CCMask < 15 && "Invalid predicate"); + unsigned Opcode = MI->getOpcode(); + if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) { + if (unsigned CondOpcode = getConditionalMove(Opcode)) { + MI->setDesc(get(CondOpcode)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addImm(CCValid).addImm(CCMask) + .addReg(SystemZ::CC, RegState::Implicit);; + return true; + } + } + return false; +} + void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL, @@ -315,6 +415,223 @@ SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, FrameIdx); } +// Return true if MI is a simple load or store with a 12-bit displacement +// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. +static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { + const MCInstrDesc &MCID = MI->getDesc(); + return ((MCID.TSFlags & Flag) && + isUInt<12>(MI->getOperand(2).getImm()) && + MI->getOperand(3).getReg() == 0); +} + +namespace { + struct LogicOp { + LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {} + LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) + : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} + + operator bool() const { return RegSize; } + + unsigned RegSize, ImmLSB, ImmSize; + }; +} + +static LogicOp interpretAndImmediate(unsigned Opcode) { + switch (Opcode) { + case SystemZ::NILL32: return LogicOp(32, 0, 16); + case SystemZ::NILH32: return LogicOp(32, 16, 16); + case SystemZ::NILL: return LogicOp(64, 0, 16); + case SystemZ::NILH: return LogicOp(64, 16, 16); + case SystemZ::NIHL: return LogicOp(64, 32, 16); + case SystemZ::NIHH: return LogicOp(64, 48, 16); + case SystemZ::NILF32: return LogicOp(32, 0, 32); + case SystemZ::NILF: return LogicOp(64, 0, 32); + case SystemZ::NIHF: return LogicOp(64, 32, 32); + default: return LogicOp(); + } +} + +// Used to return from convertToThreeAddress after replacing two-address +// instruction OldMI with three-address instruction NewMI. +static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI, + MachineInstr *NewMI, + LiveVariables *LV) { + if (LV) { + unsigned NumOps = OldMI->getNumOperands(); + for (unsigned I = 1; I < NumOps; ++I) { + MachineOperand &Op = OldMI->getOperand(I); + if (Op.isReg() && Op.isKill()) + LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI); + } + } + return NewMI; +} + +MachineInstr * +SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, + LiveVariables *LV) const { + MachineInstr *MI = MBBI; + MachineBasicBlock *MBB = MI->getParent(); + + unsigned Opcode = MI->getOpcode(); + unsigned NumOps = MI->getNumOperands(); + + // Try to convert something like SLL into SLLK, if supported. + // We prefer to keep the two-operand form where possible both + // because it tends to be shorter and because some instructions + // have memory forms that can be used during spilling. + if (TM.getSubtargetImpl()->hasDistinctOps()) { + int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode); + if (ThreeOperandOpcode >= 0) { + MachineOperand &Dest = MI->getOperand(0); + MachineOperand &Src = MI->getOperand(1); + MachineInstrBuilder MIB = + BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode)) + .addOperand(Dest); + // Keep the kill state, but drop the tied flag. + MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); + // Keep the remaining operands as-is. + for (unsigned I = 2; I < NumOps; ++I) + MIB.addOperand(MI->getOperand(I)); + return finishConvertToThreeAddress(MI, MIB, LV); + } + } + + // Try to convert an AND into an RISBG-type instruction. + if (LogicOp And = interpretAndImmediate(Opcode)) { + unsigned NewOpcode; + if (And.RegSize == 64) + NewOpcode = SystemZ::RISBG; + else if (TM.getSubtargetImpl()->hasHighWord()) + NewOpcode = SystemZ::RISBLG32; + else + // We can't use RISBG for 32-bit operations because it clobbers the + // high word of the destination too. + NewOpcode = 0; + if (NewOpcode) { + uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB; + // AND IMMEDIATE leaves the other bits of the register unchanged. + Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB); + unsigned Start, End; + if (isRxSBGMask(Imm, And.RegSize, Start, End)) { + if (NewOpcode == SystemZ::RISBLG32) { + Start &= 31; + End &= 31; + } + MachineOperand &Dest = MI->getOperand(0); + MachineOperand &Src = MI->getOperand(1); + MachineInstrBuilder MIB = + BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode)) + .addOperand(Dest).addReg(0) + .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()) + .addImm(Start).addImm(End + 128).addImm(0); + return finishConvertToThreeAddress(MI, MIB, LV); + } + } + } + return 0; +} + +MachineInstr * +SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + unsigned Size = MFI->getObjectSize(FrameIndex); + + // Eary exit for cases we don't care about + if (Ops.size() != 1) + return 0; + + unsigned OpNum = Ops[0]; + assert(Size == MF.getRegInfo() + .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && + "Invalid size combination"); + + unsigned Opcode = MI->getOpcode(); + if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { + bool Op0IsGPR = (Opcode == SystemZ::LGDR); + bool Op1IsGPR = (Opcode == SystemZ::LDGR); + // If we're spilling the destination of an LDGR or LGDR, store the + // source register instead. + if (OpNum == 0) { + unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; + return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) + .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) + .addImm(0).addReg(0); + } + // If we're spilling the source of an LDGR or LGDR, load the + // destination register instead. + if (OpNum == 1) { + unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; + unsigned Dest = MI->getOperand(0).getReg(); + return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) + .addFrameIndex(FrameIndex).addImm(0).addReg(0); + } + } + + // Look for cases where the source of a simple store or the destination + // of a simple load is being spilled. Try to use MVC instead. + // + // Although MVC is in practice a fast choice in these cases, it is still + // logically a bytewise copy. This means that we cannot use it if the + // load or store is volatile. It also means that the transformation is + // not valid in cases where the two memories partially overlap; however, + // that is not a problem here, because we know that one of the memories + // is a full frame index. + if (OpNum == 0 && MI->hasOneMemOperand()) { + MachineMemOperand *MMO = *MI->memoperands_begin(); + if (MMO->getSize() == Size && !MMO->isVolatile()) { + // Handle conversion of loads. + if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { + return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) + .addFrameIndex(FrameIndex).addImm(0).addImm(Size) + .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) + .addMemOperand(MMO); + } + // Handle conversion of stores. + if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { + return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) + .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) + .addImm(Size).addFrameIndex(FrameIndex).addImm(0) + .addMemOperand(MMO); + } + } + } + + // If the spilled operand is the final one, try to change <INSN>R + // into <INSN>. + int MemOpcode = SystemZ::getMemOpcode(Opcode); + if (MemOpcode >= 0) { + unsigned NumOps = MI->getNumExplicitOperands(); + if (OpNum == NumOps - 1) { + const MCInstrDesc &MemDesc = get(MemOpcode); + uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); + assert(AccessBytes != 0 && "Size of access should be known"); + assert(AccessBytes <= Size && "Access outside the frame index"); + uint64_t Offset = Size - AccessBytes; + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); + for (unsigned I = 0; I < OpNum; ++I) + MIB.addOperand(MI->getOperand(I)); + MIB.addFrameIndex(FrameIndex).addImm(Offset); + if (MemDesc.TSFlags & SystemZII::HasIndex) + MIB.addReg(0); + return MIB; + } + } + + return 0; +} + +MachineInstr * +SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const { + return 0; +} + bool SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { switch (MI->getOpcode()) { @@ -343,13 +660,6 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { } } -bool SystemZInstrInfo:: -ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { - assert(Cond.size() == 1 && "Invalid branch condition!"); - Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY); - return false; -} - uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { if (MI->getOpcode() == TargetOpcode::INLINEASM) { const MachineFunction *MF = MI->getParent()->getParent(); @@ -366,22 +676,31 @@ SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { case SystemZ::J: case SystemZ::JG: return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, - &MI->getOperand(0)); + SystemZ::CCMASK_ANY, &MI->getOperand(0)); case SystemZ::BRC: case SystemZ::BRCL: return SystemZII::Branch(SystemZII::BranchNormal, - MI->getOperand(0).getImm(), &MI->getOperand(1)); + MI->getOperand(0).getImm(), + MI->getOperand(1).getImm(), &MI->getOperand(2)); + + case SystemZ::BRCT: + return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP, + SystemZ::CCMASK_CMP_NE, &MI->getOperand(2)); + + case SystemZ::BRCTG: + return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP, + SystemZ::CCMASK_CMP_NE, &MI->getOperand(2)); case SystemZ::CIJ: case SystemZ::CRJ: - return SystemZII::Branch(SystemZII::BranchC, MI->getOperand(2).getImm(), - &MI->getOperand(3)); + return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP, + MI->getOperand(2).getImm(), &MI->getOperand(3)); case SystemZ::CGIJ: case SystemZ::CGRJ: - return SystemZII::Branch(SystemZII::BranchCG, MI->getOperand(2).getImm(), - &MI->getOperand(3)); + return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP, + MI->getOperand(2).getImm(), &MI->getOperand(3)); default: llvm_unreachable("Unrecognized branch opcode"); @@ -442,6 +761,64 @@ unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, return 0; } +unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const { + switch (Opcode) { + case SystemZ::L: return SystemZ::LT; + case SystemZ::LY: return SystemZ::LT; + case SystemZ::LG: return SystemZ::LTG; + case SystemZ::LGF: return SystemZ::LTGF; + case SystemZ::LR: return SystemZ::LTR; + case SystemZ::LGFR: return SystemZ::LTGFR; + case SystemZ::LGR: return SystemZ::LTGR; + case SystemZ::LER: return SystemZ::LTEBR; + case SystemZ::LDR: return SystemZ::LTDBR; + case SystemZ::LXR: return SystemZ::LTXBR; + default: return 0; + } +} + +// Return true if Mask matches the regexp 0*1+0*, given that zero masks +// have already been filtered out. Store the first set bit in LSB and +// the number of set bits in Length if so. +static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) { + unsigned First = findFirstSet(Mask); + uint64_t Top = (Mask >> First) + 1; + if ((Top & -Top) == Top) { + LSB = First; + Length = findFirstSet(Top); + return true; + } + return false; +} + +bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize, + unsigned &Start, unsigned &End) const { + // Reject trivial all-zero masks. + if (Mask == 0) + return false; + + // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of + // the msb and End specifies the index of the lsb. + unsigned LSB, Length; + if (isStringOfOnes(Mask, LSB, Length)) { + Start = 63 - (LSB + Length - 1); + End = 63 - LSB; + return true; + } + + // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb + // of the low 1s and End specifies the lsb of the high 1s. + if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) { + assert(LSB > 0 && "Bottom bit must be set"); + assert(LSB + Length < BitSize && "Top bit must be set"); + Start = 63 - (LSB - 1); + End = 63 - (LSB + Length); + return true; + } + + return false; +} + unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, const MachineInstr *MI) const { switch (Opcode) { diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h index d6980f7..276fd3b 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/lib/Target/SystemZ/SystemZInstrInfo.h @@ -28,12 +28,31 @@ class SystemZTargetMachine; namespace SystemZII { enum { // See comments in SystemZInstrFormats.td. - SimpleBDXLoad = (1 << 0), - SimpleBDXStore = (1 << 1), - Has20BitOffset = (1 << 2), - HasIndex = (1 << 3), - Is128Bit = (1 << 4) + SimpleBDXLoad = (1 << 0), + SimpleBDXStore = (1 << 1), + Has20BitOffset = (1 << 2), + HasIndex = (1 << 3), + Is128Bit = (1 << 4), + AccessSizeMask = (31 << 5), + AccessSizeShift = 5, + CCValuesMask = (15 << 10), + CCValuesShift = 10, + CompareZeroCCMaskMask = (15 << 14), + CompareZeroCCMaskShift = 14, + CCMaskFirst = (1 << 18), + CCMaskLast = (1 << 19), + IsLogical = (1 << 20) }; + static inline unsigned getAccessSize(unsigned int Flags) { + return (Flags & AccessSizeMask) >> AccessSizeShift; + } + static inline unsigned getCCValues(unsigned int Flags) { + return (Flags & CCValuesMask) >> CCValuesShift; + } + static inline unsigned getCompareZeroCCMask(unsigned int Flags) { + return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift; + } + // SystemZ MachineOperand target flags. enum { // Masks out the bits for the access model. @@ -53,26 +72,39 @@ namespace SystemZII { // An instruction that peforms a 64-bit signed comparison and branches // on the result. - BranchCG + BranchCG, + + // An instruction that decrements a 32-bit register and branches if + // the result is nonzero. + BranchCT, + + // An instruction that decrements a 64-bit register and branches if + // the result is nonzero. + BranchCTG }; // Information about a branch instruction. struct Branch { // The type of the branch. BranchType Type; + // CCMASK_<N> is set if CC might be equal to N. + unsigned CCValid; + // CCMASK_<N> is set if the branch should be taken when CC == N. unsigned CCMask; // The target of the branch. const MachineOperand *Target; - Branch(BranchType type, unsigned ccMask, const MachineOperand *target) - : Type(type), CCMask(ccMask), Target(target) {} + Branch(BranchType type, unsigned ccValid, unsigned ccMask, + const MachineOperand *target) + : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {} }; } class SystemZInstrInfo : public SystemZGenInstrInfo { const SystemZRegisterInfo RI; + SystemZTargetMachine &TM; void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const; void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const; @@ -85,6 +117,8 @@ public: int &FrameIndex) const LLVM_OVERRIDE; virtual unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const LLVM_OVERRIDE; + virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex, + int &SrcFrameIndex) const LLVM_OVERRIDE; virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, @@ -95,6 +129,23 @@ public: MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const LLVM_OVERRIDE; + virtual bool isPredicable(MachineInstr *MI) const LLVM_OVERRIDE; + virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, + unsigned ExtraPredCycles, + const BranchProbability &Probability) const + LLVM_OVERRIDE; + virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumCyclesT, + unsigned ExtraPredCyclesT, + MachineBasicBlock &FMBB, + unsigned NumCyclesF, + unsigned ExtraPredCyclesF, + const BranchProbability &Probability) const + LLVM_OVERRIDE; + virtual bool + PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const + LLVM_OVERRIDE; virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL, unsigned DestReg, unsigned SrcReg, @@ -111,6 +162,18 @@ public: unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const LLVM_OVERRIDE; + virtual MachineInstr * + convertToThreeAddress(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, + LiveVariables *LV) const; + virtual MachineInstr * + foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const; + virtual MachineInstr * + foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const; virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const LLVM_OVERRIDE; virtual bool @@ -141,6 +204,16 @@ public: // exists. unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const; + // If Opcode is a load instruction that has a LOAD AND TEST form, + // return the opcode for the testing form, otherwise return 0. + unsigned getLoadAndTest(unsigned Opcode) const; + + // Return true if ROTATE AND ... SELECTED BITS can be used to select bits + // Mask of the R2 operand, given that only the low BitSize bits of Mask are + // significant. Set Start and End to the I3 and I4 operands if so. + bool isRxSBGMask(uint64_t Mask, unsigned BitSize, + unsigned &Start, unsigned &End) const; + // If Opcode is a COMPARE opcode for which an associated COMPARE AND // BRANCH exists, return the opcode for the latter, otherwise return 0. // MI, if nonnull, is the compare instruction. diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td index c9ec6bc..b318d67 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/lib/Target/SystemZ/SystemZInstrInfo.td @@ -58,22 +58,19 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, R1 = 15 in { // in their raw BRC/BRCL form, with the 4-bit condition-code mask being // the first operand. It seems friendlier to use mnemonic forms like // JE and JLH when writing out the assembly though. -// -// Using a custom inserter for BRC gives us a chance to convert the BRC -// and a preceding compare into a single compare-and-branch instruction. -// The inserter makes no change in cases where a separate branch really -// is needed. -multiclass CondBranches<Operand ccmask, string short, string long> { - let isBranch = 1, isTerminator = 1, Uses = [CC] in { - def "" : InstRI<0xA74, (outs), (ins ccmask:$R1, brtarget16:$I2), short, []>; - def L : InstRIL<0xC04, (outs), (ins ccmask:$R1, brtarget32:$I2), long, []>; +let isBranch = 1, isTerminator = 1, Uses = [CC] in { + let isCodeGenOnly = 1, CCMaskFirst = 1 in { + def BRC : InstRI<0xA74, (outs), (ins cond4:$valid, cond4:$R1, + brtarget16:$I2), "j$R1\t$I2", + [(z_br_ccmask cond4:$valid, cond4:$R1, bb:$I2)]>; + def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1, + brtarget32:$I2), "jg$R1\t$I2", []>; } + def AsmBRC : InstRI<0xA74, (outs), (ins uimm8zx4:$R1, brtarget16:$I2), + "brc\t$R1, $I2", []>; + def AsmBRCL : InstRIL<0xC04, (outs), (ins uimm8zx4:$R1, brtarget32:$I2), + "brcl\t$R1, $I2", []>; } -let isCodeGenOnly = 1, usesCustomInserter = 1 in - defm BRC : CondBranches<cond4, "j$R1\t$I2", "jg$R1\t$I2">; -defm AsmBRC : CondBranches<uimm8zx4, "brc\t$R1, $I2", "brcl\t$R1, $I2">; - -def : Pat<(z_br_ccmask cond4:$cond, bb:$dst), (BRC cond4:$cond, bb:$dst)>; // Fused compare-and-branch instructions. As for normal branches, // we handle these instructions internally in their raw CRJ-like form, @@ -107,26 +104,32 @@ defm AsmC : CompareBranches<uimm8zx4, "", "$M3, ">; // (integer or floating-point) multiclass CondExtendedMnemonic<bits<4> ccmask, string name> { let R1 = ccmask in { - def "" : InstRI<0xA74, (outs), (ins brtarget16:$I2), - "j"##name##"\t$I2", []>; - def L : InstRIL<0xC04, (outs), (ins brtarget32:$I2), + def J : InstRI<0xA74, (outs), (ins brtarget16:$I2), + "j"##name##"\t$I2", []>; + def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2), "jg"##name##"\t$I2", []>; } + def LOCR : FixedCondUnaryRRF<"locr"##name, 0xB9F2, GR32, GR32, ccmask>; + def LOCGR : FixedCondUnaryRRF<"locgr"##name, 0xB9E2, GR64, GR64, ccmask>; + def LOC : FixedCondUnaryRSY<"loc"##name, 0xEBF2, GR32, ccmask, 4>; + def LOCG : FixedCondUnaryRSY<"locg"##name, 0xEBE2, GR64, ccmask, 8>; + def STOC : FixedCondStoreRSY<"stoc"##name, 0xEBF3, GR32, ccmask, 4>; + def STOCG : FixedCondStoreRSY<"stocg"##name, 0xEBE3, GR64, ccmask, 8>; } -defm AsmJO : CondExtendedMnemonic<1, "o">; -defm AsmJH : CondExtendedMnemonic<2, "h">; -defm AsmJNLE : CondExtendedMnemonic<3, "nle">; -defm AsmJL : CondExtendedMnemonic<4, "l">; -defm AsmJNHE : CondExtendedMnemonic<5, "nhe">; -defm AsmJLH : CondExtendedMnemonic<6, "lh">; -defm AsmJNE : CondExtendedMnemonic<7, "ne">; -defm AsmJE : CondExtendedMnemonic<8, "e">; -defm AsmJNLH : CondExtendedMnemonic<9, "nlh">; -defm AsmJHE : CondExtendedMnemonic<10, "he">; -defm AsmJNL : CondExtendedMnemonic<11, "nl">; -defm AsmJLE : CondExtendedMnemonic<12, "le">; -defm AsmJNH : CondExtendedMnemonic<13, "nh">; -defm AsmJNO : CondExtendedMnemonic<14, "no">; +defm AsmO : CondExtendedMnemonic<1, "o">; +defm AsmH : CondExtendedMnemonic<2, "h">; +defm AsmNLE : CondExtendedMnemonic<3, "nle">; +defm AsmL : CondExtendedMnemonic<4, "l">; +defm AsmNHE : CondExtendedMnemonic<5, "nhe">; +defm AsmLH : CondExtendedMnemonic<6, "lh">; +defm AsmNE : CondExtendedMnemonic<7, "ne">; +defm AsmE : CondExtendedMnemonic<8, "e">; +defm AsmNLH : CondExtendedMnemonic<9, "nlh">; +defm AsmHE : CondExtendedMnemonic<10, "he">; +defm AsmNL : CondExtendedMnemonic<11, "nl">; +defm AsmLE : CondExtendedMnemonic<12, "le">; +defm AsmNH : CondExtendedMnemonic<13, "nh">; +defm AsmNO : CondExtendedMnemonic<14, "no">; // Define AsmParser mnemonics for each integer condition-code mask. // This is like the list above, except that condition 3 is not possible @@ -163,16 +166,43 @@ defm AsmJE : IntCondExtendedMnemonic<8, "e", "nlh">; defm AsmJHE : IntCondExtendedMnemonic<10, "he", "nl">; defm AsmJLE : IntCondExtendedMnemonic<12, "le", "nh">; +// Decrement a register and branch if it is nonzero. These don't clobber CC, +// but we might need to split long branches into sequences that do. +let Defs = [CC] in { + def BRCT : BranchUnaryRI<"brct", 0xA76, GR32>; + def BRCTG : BranchUnaryRI<"brctg", 0xA77, GR64>; +} + +//===----------------------------------------------------------------------===// +// Select instructions +//===----------------------------------------------------------------------===// + def Select32 : SelectWrapper<GR32>; def Select64 : SelectWrapper<GR64>; +defm CondStore8_32 : CondStores<GR32, nonvolatile_truncstorei8, + nonvolatile_anyextloadi8, bdxaddr20only>; +defm CondStore16_32 : CondStores<GR32, nonvolatile_truncstorei16, + nonvolatile_anyextloadi16, bdxaddr20only>; +defm CondStore32_32 : CondStores<GR32, nonvolatile_store, + nonvolatile_load, bdxaddr20only>; + +defm CondStore8 : CondStores<GR64, nonvolatile_truncstorei8, + nonvolatile_anyextloadi8, bdxaddr20only>; +defm CondStore16 : CondStores<GR64, nonvolatile_truncstorei16, + nonvolatile_anyextloadi16, bdxaddr20only>; +defm CondStore32 : CondStores<GR64, nonvolatile_truncstorei32, + nonvolatile_anyextloadi32, bdxaddr20only>; +defm CondStore64 : CondStores<GR64, nonvolatile_store, + nonvolatile_load, bdxaddr20only>; + //===----------------------------------------------------------------------===// // Call instructions //===----------------------------------------------------------------------===// // The definitions here are for the call-clobbered registers. let isCall = 1, Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D, - F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D], + F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D, CC], R1 = 14, isCodeGenOnly = 1 in { def BRAS : InstRI<0xA75, (outs), (ins pcrel16call:$I2, variable_ops), "bras\t%r14, $I2", []>; @@ -197,12 +227,27 @@ def AsmBASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2), // Register moves. let neverHasSideEffects = 1 in { - def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>; - def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>; + def LR : UnaryRR <"l", 0x18, null_frag, GR32, GR32>; + def LGR : UnaryRRE<"lg", 0xB904, null_frag, GR64, GR64>; +} +let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in { + def LTR : UnaryRR <"lt", 0x12, null_frag, GR32, GR32>; + def LTGR : UnaryRRE<"ltg", 0xB902, null_frag, GR64, GR64>; +} + +// Move on condition. +let isCodeGenOnly = 1, Uses = [CC] in { + def LOCR : CondUnaryRRF<"loc", 0xB9F2, GR32, GR32>; + def LOCGR : CondUnaryRRF<"locg", 0xB9E2, GR64, GR64>; +} +let Uses = [CC] in { + def AsmLOCR : AsmCondUnaryRRF<"loc", 0xB9F2, GR32, GR32>; + def AsmLOCGR : AsmCondUnaryRRF<"locg", 0xB9E2, GR64, GR64>; } // Immediate moves. -let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { +let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1, + isReMaterializable = 1 in { // 16-bit sign-extended immediates. def LHI : UnaryRI<"lhi", 0xA78, bitconvert, GR32, imm32sx16>; def LGHI : UnaryRI<"lghi", 0xA79, bitconvert, GR64, imm64sx16>; @@ -221,11 +266,8 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { // Register loads. let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { - defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32>; - def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>; - - def LG : UnaryRXY<"lg", 0xE304, load, GR64>; - def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>; + defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32, 4>; + def LG : UnaryRXY<"lg", 0xE304, load, GR64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -234,16 +276,31 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { [(set GR128:$dst, (load bdxaddr20only128:$src))]>; } } +let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in { + def LT : UnaryRXY<"lt", 0xE312, load, GR32, 4>; + def LTG : UnaryRXY<"ltg", 0xE302, load, GR64, 8>; +} + +let canFoldAsLoad = 1 in { + def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>; + def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>; +} + +// Load on condition. +let isCodeGenOnly = 1, Uses = [CC] in { + def LOC : CondUnaryRSY<"loc", 0xEBF2, nonvolatile_load, GR32, 4>; + def LOCG : CondUnaryRSY<"locg", 0xEBE2, nonvolatile_load, GR64, 8>; +} +let Uses = [CC] in { + def AsmLOC : AsmCondUnaryRSY<"loc", 0xEBF2, GR32, 4>; + def AsmLOCG : AsmCondUnaryRSY<"locg", 0xEBE2, GR64, 8>; +} // Register stores. let SimpleBDXStore = 1 in { - let isCodeGenOnly = 1 in { - defm ST32 : StoreRXPair<"st", 0x50, 0xE350, store, GR32>; - def STRL32 : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>; - } - - def STG : StoreRXY<"stg", 0xE324, store, GR64>; - def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>; + let isCodeGenOnly = 1 in + defm ST32 : StoreRXPair<"st", 0x50, 0xE350, store, GR32, 4>; + def STG : StoreRXY<"stg", 0xE324, store, GR64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -252,6 +309,20 @@ let SimpleBDXStore = 1 in { [(store GR128:$src, bdxaddr20only128:$dst)]>; } } +let isCodeGenOnly = 1 in + def STRL32 : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>; +def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>; + +// Store on condition. +let isCodeGenOnly = 1, Uses = [CC] in { + def STOC32 : CondStoreRSY<"stoc", 0xEBF3, GR32, 4>; + def STOC : CondStoreRSY<"stoc", 0xEBF3, GR64, 4>; + def STOCG : CondStoreRSY<"stocg", 0xEBE3, GR64, 8>; +} +let Uses = [CC] in { + def AsmSTOC : AsmCondStoreRSY<"stoc", 0xEBF3, GR32, 4>; + def AsmSTOCG : AsmCondStoreRSY<"stocg", 0xEBE3, GR64, 8>; +} // 8-bit immediate stores to 8-bit fields. defm MVI : StoreSIPair<"mvi", 0x92, 0xEB52, truncstorei8, imm32zx8trunc>; @@ -261,22 +332,50 @@ def MVHHI : StoreSIL<"mvhhi", 0xE544, truncstorei16, imm32sx16trunc>; def MVHI : StoreSIL<"mvhi", 0xE54C, store, imm32sx16>; def MVGHI : StoreSIL<"mvghi", 0xE548, store, imm64sx16>; +// Memory-to-memory moves. +let mayLoad = 1, mayStore = 1 in + def MVC : InstSS<0xD2, (outs), (ins bdladdr12onlylen8:$BDL1, + bdaddr12only:$BD2), + "mvc\t$BDL1, $BD2", []>; + +let mayLoad = 1, mayStore = 1, usesCustomInserter = 1 in + def MVCWrapper : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src, + imm32len8:$length), + [(z_mvc bdaddr12only:$dest, bdaddr12only:$src, + imm32len8:$length)]>; + +defm LoadStore8_32 : MVCLoadStore<anyextloadi8, truncstorei8, i32, + MVCWrapper, 1>; +defm LoadStore16_32 : MVCLoadStore<anyextloadi16, truncstorei16, i32, + MVCWrapper, 2>; +defm LoadStore32_32 : MVCLoadStore<load, store, i32, MVCWrapper, 4>; + +defm LoadStore8 : MVCLoadStore<anyextloadi8, truncstorei8, i64, + MVCWrapper, 1>; +defm LoadStore16 : MVCLoadStore<anyextloadi16, truncstorei16, i64, + MVCWrapper, 2>; +defm LoadStore32 : MVCLoadStore<anyextloadi32, truncstorei32, i64, + MVCWrapper, 4>; +defm LoadStore64 : MVCLoadStore<load, store, i64, MVCWrapper, 8>; + //===----------------------------------------------------------------------===// // Sign extensions //===----------------------------------------------------------------------===// // 32-bit extensions from registers. let neverHasSideEffects = 1 in { - def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>; - def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>; + def LBR : UnaryRRE<"lb", 0xB926, sext8, GR32, GR32>; + def LHR : UnaryRRE<"lh", 0xB927, sext16, GR32, GR32>; } // 64-bit extensions from registers. let neverHasSideEffects = 1 in { - def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>; - def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>; - def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>; + def LGBR : UnaryRRE<"lgb", 0xB906, sext8, GR64, GR64>; + def LGHR : UnaryRRE<"lgh", 0xB907, sext16, GR64, GR64>; + def LGFR : UnaryRRE<"lgf", 0xB914, sext32, GR64, GR32>; } +let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in + def LTGFR : UnaryRRE<"ltgf", 0xB912, null_frag, GR64, GR64>; // Match 32-to-64-bit sign extensions in which the source is already // in a 64-bit register. @@ -284,16 +383,18 @@ def : Pat<(sext_inreg GR64:$src, i32), (LGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>; // 32-bit extensions from memory. -def LB : UnaryRXY<"lb", 0xE376, sextloadi8, GR32>; -defm LH : UnaryRXPair<"lh", 0x48, 0xE378, sextloadi16, GR32>; +def LB : UnaryRXY<"lb", 0xE376, sextloadi8, GR32, 1>; +defm LH : UnaryRXPair<"lh", 0x48, 0xE378, sextloadi16, GR32, 2>; def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_sextloadi16, GR32>; // 64-bit extensions from memory. -def LGB : UnaryRXY<"lgb", 0xE377, sextloadi8, GR64>; -def LGH : UnaryRXY<"lgh", 0xE315, sextloadi16, GR64>; -def LGF : UnaryRXY<"lgf", 0xE314, sextloadi32, GR64>; +def LGB : UnaryRXY<"lgb", 0xE377, sextloadi8, GR64, 1>; +def LGH : UnaryRXY<"lgh", 0xE315, sextloadi16, GR64, 2>; +def LGF : UnaryRXY<"lgf", 0xE314, sextloadi32, GR64, 4>; def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_sextloadi16, GR64>; def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_sextloadi32, GR64>; +let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in + def LTGF : UnaryRXY<"ltgf", 0xE332, sextloadi32, GR64, 4>; // If the sign of a load-extend operation doesn't matter, use the signed ones. // There's not really much to choose between the sign and zero extensions, @@ -306,21 +407,29 @@ def : Pat<(i64 (extloadi8 bdxaddr20only:$src)), (LGB bdxaddr20only:$src)>; def : Pat<(i64 (extloadi16 bdxaddr20only:$src)), (LGH bdxaddr20only:$src)>; def : Pat<(i64 (extloadi32 bdxaddr20only:$src)), (LGF bdxaddr20only:$src)>; +// We want PC-relative addresses to be tried ahead of BD and BDX addresses. +// However, BDXs have two extra operands and are therefore 6 units more +// complex. +let AddedComplexity = 7 in { + def : Pat<(i32 (extloadi16 pcrel32:$src)), (LHRL pcrel32:$src)>; + def : Pat<(i64 (extloadi16 pcrel32:$src)), (LGHRL pcrel32:$src)>; +} + //===----------------------------------------------------------------------===// // Zero extensions //===----------------------------------------------------------------------===// // 32-bit extensions from registers. let neverHasSideEffects = 1 in { - def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>; - def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>; + def LLCR : UnaryRRE<"llc", 0xB994, zext8, GR32, GR32>; + def LLHR : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>; } // 64-bit extensions from registers. let neverHasSideEffects = 1 in { - def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>; - def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>; - def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>; + def LLGCR : UnaryRRE<"llgc", 0xB984, zext8, GR64, GR64>; + def LLGHR : UnaryRRE<"llgh", 0xB985, zext16, GR64, GR64>; + def LLGFR : UnaryRRE<"llgf", 0xB916, zext32, GR64, GR32>; } // Match 32-to-64-bit zero extensions in which the source is already @@ -329,14 +438,14 @@ def : Pat<(and GR64:$src, 0xffffffff), (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>; // 32-bit extensions from memory. -def LLC : UnaryRXY<"llc", 0xE394, zextloadi8, GR32>; -def LLH : UnaryRXY<"llh", 0xE395, zextloadi16, GR32>; +def LLC : UnaryRXY<"llc", 0xE394, zextloadi8, GR32, 1>; +def LLH : UnaryRXY<"llh", 0xE395, zextloadi16, GR32, 2>; def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_zextloadi16, GR32>; // 64-bit extensions from memory. -def LLGC : UnaryRXY<"llgc", 0xE390, zextloadi8, GR64>; -def LLGH : UnaryRXY<"llgh", 0xE391, zextloadi16, GR64>; -def LLGF : UnaryRXY<"llgf", 0xE316, zextloadi32, GR64>; +def LLGC : UnaryRXY<"llgc", 0xE390, zextloadi8, GR64, 1>; +def LLGH : UnaryRXY<"llgh", 0xE391, zextloadi16, GR64, 2>; +def LLGF : UnaryRXY<"llgf", 0xE316, zextloadi32, GR64, 4>; def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_zextloadi16, GR64>; def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_zextloadi32, GR64>; @@ -350,16 +459,16 @@ def : Pat<(i32 (trunc GR64:$src)), // Truncations of 32-bit registers to memory. let isCodeGenOnly = 1 in { - defm STC32 : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32>; - defm STH32 : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32>; + defm STC32 : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32, 1>; + defm STH32 : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32, 2>; def STHRL32 : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>; } // Truncations of 64-bit registers to memory. -defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR64>; -defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR64>; +defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR64, 1>; +defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR64, 2>; def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR64>; -defm ST : StoreRXPair<"st", 0x50, 0xE350, truncstorei32, GR64>; +defm ST : StoreRXPair<"st", 0x50, 0xE350, truncstorei32, GR64, 4>; def STRL : StoreRILPC<"strl", 0xC4F, aligned_truncstorei32, GR64>; //===----------------------------------------------------------------------===// @@ -378,30 +487,32 @@ def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>; // Byte-swapping register moves. let neverHasSideEffects = 1 in { - def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>; - def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>; + def LRVR : UnaryRRE<"lrv", 0xB91F, bswap, GR32, GR32>; + def LRVGR : UnaryRRE<"lrvg", 0xB90F, bswap, GR64, GR64>; } // Byte-swapping loads. Unlike normal loads, these instructions are // allowed to access storage more than once. -def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap, nonvolatile_load>, GR32>; -def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap, nonvolatile_load>, GR64>; +def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap, nonvolatile_load>, GR32, 4>; +def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap, nonvolatile_load>, GR64, 8>; // Likewise byte-swapping stores. -def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap, nonvolatile_store>, GR32>; -def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>, GR64>; +def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap, nonvolatile_store>, GR32, 4>; +def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>, + GR64, 8>; //===----------------------------------------------------------------------===// // Load address instructions //===----------------------------------------------------------------------===// // Load BDX-style addresses. -let neverHasSideEffects = 1, Function = "la" in { - let PairType = "12" in +let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isReMaterializable = 1, + DispKey = "la" in { + let DispSize = "12" in def LA : InstRX<0x41, (outs GR64:$R1), (ins laaddr12pair:$XBD2), "la\t$R1, $XBD2", [(set GR64:$R1, laaddr12pair:$XBD2)]>; - let PairType = "20" in + let DispSize = "20" in def LAY : InstRXY<0xE371, (outs GR64:$R1), (ins laaddr20pair:$XBD2), "lay\t$R1, $XBD2", [(set GR64:$R1, laaddr20pair:$XBD2)]>; @@ -409,7 +520,8 @@ let neverHasSideEffects = 1, Function = "la" in { // Load a PC-relative address. There's no version of this instruction // with a 16-bit offset, so there's no relaxation. -let neverHasSideEffects = 1 in { +let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1, + isReMaterializable = 1 in { def LARL : InstRIL<0xC00, (outs GR64:$R1), (ins pcrel32:$I2), "larl\t$R1, $I2", [(set GR64:$R1, pcrel32:$I2)]>; @@ -420,9 +532,12 @@ let neverHasSideEffects = 1 in { //===----------------------------------------------------------------------===// let Defs = [CC] in { - def LCR : UnaryRR <"lcr", 0x13, ineg, GR32, GR32>; - def LCGR : UnaryRRE<"lcgr", 0xB903, ineg, GR64, GR64>; - def LCGFR : UnaryRRE<"lcgfr", 0xB913, null_frag, GR64, GR32>; + let CCValues = 0xF, CompareZeroCCMask = 0x8 in { + def LCR : UnaryRR <"lc", 0x13, ineg, GR32, GR32>; + def LCGR : UnaryRRE<"lcg", 0xB903, ineg, GR64, GR64>; + } + let CCValues = 0xE, CompareZeroCCMask = 0xE in + def LCGFR : UnaryRRE<"lcgf", 0xB913, null_frag, GR64, GR32>; } defm : SXU<ineg, LCGFR>; @@ -431,8 +546,8 @@ defm : SXU<ineg, LCGFR>; //===----------------------------------------------------------------------===// let isCodeGenOnly = 1 in - defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, zextloadi8>; -defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, zextloadi8>; + defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, zextloadi8, 1>; +defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, zextloadi8, 1>; defm : InsertMem<"inserti8", IC32, GR32, zextloadi8, bdxaddr12pair>; defm : InsertMem<"inserti8", IC32Y, GR32, zextloadi8, bdxaddr20pair>; @@ -456,7 +571,8 @@ def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>; // full-width move. (We use IILF rather than something like LLILF // for 32-bit moves because IILF leaves the upper 32 bits of the // GR64 unchanged.) -let isCodeGenOnly = 1 in { +let isCodeGenOnly = 1, isAsCheapAsAMove = 1, isMoveImm = 1, + isReMaterializable = 1 in { def IILF32 : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>; } def IILF : BinaryRIL<"iilf", 0xC09, insertlf, GR64, imm64lf32>; @@ -473,27 +589,27 @@ def : Pat<(or (zext32 GR32:$src), imm64hf32:$imm), //===----------------------------------------------------------------------===// // Plain addition. -let Defs = [CC] in { +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { // Addition of a register. let isCommutable = 1 in { - def AR : BinaryRR <"ar", 0x1A, add, GR32, GR32>; - def AGR : BinaryRRE<"agr", 0xB908, add, GR64, GR64>; + defm AR : BinaryRRAndK<"a", 0x1A, 0xB9F8, add, GR32, GR32>; + defm AGR : BinaryRREAndK<"ag", 0xB908, 0xB9E8, add, GR64, GR64>; } - def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>; + def AGFR : BinaryRRE<"agf", 0xB918, null_frag, GR64, GR32>; // Addition of signed 16-bit immediates. - def AHI : BinaryRI<"ahi", 0xA7A, add, GR32, imm32sx16>; - def AGHI : BinaryRI<"aghi", 0xA7B, add, GR64, imm64sx16>; + defm AHI : BinaryRIAndK<"ahi", 0xA7A, 0xECD8, add, GR32, imm32sx16>; + defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, add, GR64, imm64sx16>; // Addition of signed 32-bit immediates. def AFI : BinaryRIL<"afi", 0xC29, add, GR32, simm32>; def AGFI : BinaryRIL<"agfi", 0xC28, add, GR64, imm64sx32>; // Addition of memory. - defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, sextloadi16>; - defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load>; - def AGF : BinaryRXY<"agf", 0xE318, add, GR64, sextloadi32>; - def AG : BinaryRXY<"ag", 0xE308, add, GR64, load>; + defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, sextloadi16, 2>; + defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load, 4>; + def AGF : BinaryRXY<"agf", 0xE318, add, GR64, sextloadi32, 4>; + def AG : BinaryRXY<"ag", 0xE308, add, GR64, load, 8>; // Addition to memory. def ASI : BinarySIY<"asi", 0xEB6A, add, imm32sx8>; @@ -505,31 +621,37 @@ defm : SXB<add, GR64, AGFR>; let Defs = [CC] in { // Addition of a register. let isCommutable = 1 in { - def ALR : BinaryRR <"alr", 0x1E, addc, GR32, GR32>; - def ALGR : BinaryRRE<"algr", 0xB90A, addc, GR64, GR64>; + defm ALR : BinaryRRAndK<"al", 0x1E, 0xB9FA, addc, GR32, GR32>; + defm ALGR : BinaryRREAndK<"alg", 0xB90A, 0xB9EA, addc, GR64, GR64>; } - def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>; + def ALGFR : BinaryRRE<"algf", 0xB91A, null_frag, GR64, GR32>; + + // Addition of signed 16-bit immediates. + def ALHSIK : BinaryRIE<"alhsik", 0xECDA, addc, GR32, imm32sx16>, + Requires<[FeatureDistinctOps]>; + def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, addc, GR64, imm64sx16>, + Requires<[FeatureDistinctOps]>; // Addition of unsigned 32-bit immediates. def ALFI : BinaryRIL<"alfi", 0xC2B, addc, GR32, uimm32>; def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>; // Addition of memory. - defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load>; - def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, zextloadi32>; - def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load>; + defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>; + def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, zextloadi32, 4>; + def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load, 8>; } defm : ZXB<addc, GR64, ALGFR>; // Addition producing and using a carry. let Defs = [CC], Uses = [CC] in { // Addition of a register. - def ALCR : BinaryRRE<"alcr", 0xB998, adde, GR32, GR32>; - def ALCGR : BinaryRRE<"alcgr", 0xB988, adde, GR64, GR64>; + def ALCR : BinaryRRE<"alc", 0xB998, adde, GR32, GR32>; + def ALCGR : BinaryRRE<"alcg", 0xB988, adde, GR64, GR64>; // Addition of memory. - def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load>; - def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load>; + def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load, 4>; + def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>; } //===----------------------------------------------------------------------===// @@ -538,26 +660,26 @@ let Defs = [CC], Uses = [CC] in { // Plain substraction. Although immediate forms exist, we use the // add-immediate instruction instead. -let Defs = [CC] in { +let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { // Subtraction of a register. - def SR : BinaryRR <"sr", 0x1B, sub, GR32, GR32>; - def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>; - def SGR : BinaryRRE<"sgr", 0xB909, sub, GR64, GR64>; + defm SR : BinaryRRAndK<"s", 0x1B, 0xB9F9, sub, GR32, GR32>; + def SGFR : BinaryRRE<"sgf", 0xB919, null_frag, GR64, GR32>; + defm SGR : BinaryRREAndK<"sg", 0xB909, 0xB9E9, sub, GR64, GR64>; // Subtraction of memory. - defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, sextloadi16>; - defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load>; - def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, sextloadi32>; - def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load>; + defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, sextloadi16, 2>; + defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>; + def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, sextloadi32, 4>; + def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load, 8>; } defm : SXB<sub, GR64, SGFR>; // Subtraction producing a carry. let Defs = [CC] in { // Subtraction of a register. - def SLR : BinaryRR <"slr", 0x1F, subc, GR32, GR32>; - def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>; - def SLGR : BinaryRRE<"slgr", 0xB90B, subc, GR64, GR64>; + defm SLR : BinaryRRAndK<"sl", 0x1F, 0xB9FB, subc, GR32, GR32>; + def SLGFR : BinaryRRE<"slgf", 0xB91B, null_frag, GR64, GR32>; + defm SLGR : BinaryRREAndK<"slg", 0xB90B, 0xB9EB, subc, GR64, GR64>; // Subtraction of unsigned 32-bit immediates. These don't match // subc because we prefer addc for constants. @@ -565,21 +687,21 @@ let Defs = [CC] in { def SLGFI : BinaryRIL<"slgfi", 0xC24, null_frag, GR64, imm64zx32>; // Subtraction of memory. - defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load>; - def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, zextloadi32>; - def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load>; + defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load, 4>; + def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, zextloadi32, 4>; + def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load, 8>; } defm : ZXB<subc, GR64, SLGFR>; // Subtraction producing and using a carry. let Defs = [CC], Uses = [CC] in { // Subtraction of a register. - def SLBR : BinaryRRE<"slbr", 0xB999, sube, GR32, GR32>; - def SLGBR : BinaryRRE<"slbgr", 0xB989, sube, GR64, GR64>; + def SLBR : BinaryRRE<"slb", 0xB999, sube, GR32, GR32>; + def SLGBR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>; // Subtraction of memory. - def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load>; - def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load>; + def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load, 4>; + def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load, 8>; } //===----------------------------------------------------------------------===// @@ -588,30 +710,37 @@ let Defs = [CC], Uses = [CC] in { let Defs = [CC] in { // ANDs of a register. - let isCommutable = 1 in { - def NR : BinaryRR <"nr", 0x14, and, GR32, GR32>; - def NGR : BinaryRRE<"ngr", 0xB980, and, GR64, GR64>; + let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm NR : BinaryRRAndK<"n", 0x14, 0xB9F4, and, GR32, GR32>; + defm NGR : BinaryRREAndK<"ng", 0xB980, 0xB9E4, and, GR64, GR64>; } - // ANDs of a 16-bit immediate, leaving other bits unaffected. - let isCodeGenOnly = 1 in { - def NILL32 : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>; - def NILH32 : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>; + let isConvertibleToThreeAddress = 1 in { + // ANDs of a 16-bit immediate, leaving other bits unaffected. + // The CC result only reflects the 16-bit field, not the full register. + let isCodeGenOnly = 1 in { + def NILL32 : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>; + def NILH32 : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>; + } + def NILL : BinaryRI<"nill", 0xA57, and, GR64, imm64ll16c>; + def NILH : BinaryRI<"nilh", 0xA56, and, GR64, imm64lh16c>; + def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>; + def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>; + + // ANDs of a 32-bit immediate, leaving other bits unaffected. + // The CC result only reflects the 32-bit field, which means we can + // use it as a zero indicator for i32 operations but not otherwise. + let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in + def NILF32 : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>; + def NILF : BinaryRIL<"nilf", 0xC0B, and, GR64, imm64lf32c>; + def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>; } - def NILL : BinaryRI<"nill", 0xA57, and, GR64, imm64ll16c>; - def NILH : BinaryRI<"nilh", 0xA56, and, GR64, imm64lh16c>; - def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>; - def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>; - - // ANDs of a 32-bit immediate, leaving other bits unaffected. - let isCodeGenOnly = 1 in - def NILF32 : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>; - def NILF : BinaryRIL<"nilf", 0xC0B, and, GR64, imm64lf32c>; - def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>; // ANDs of memory. - defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load>; - def NG : BinaryRXY<"ng", 0xE380, and, GR64, load>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load, 4>; + def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>; + } // AND to memory defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>; @@ -625,12 +754,13 @@ defm : RMWIByte<and, bdaddr20pair, NIY>; let Defs = [CC] in { // ORs of a register. - let isCommutable = 1 in { - def OR : BinaryRR <"or", 0x16, or, GR32, GR32>; - def OGR : BinaryRRE<"ogr", 0xB981, or, GR64, GR64>; + let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm OR : BinaryRRAndK<"o", 0x16, 0xB9F6, or, GR32, GR32>; + defm OGR : BinaryRREAndK<"og", 0xB981, 0xB9E6, or, GR64, GR64>; } // ORs of a 16-bit immediate, leaving other bits unaffected. + // The CC result only reflects the 16-bit field, not the full register. let isCodeGenOnly = 1 in { def OILL32 : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>; def OILH32 : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>; @@ -641,14 +771,18 @@ let Defs = [CC] in { def OIHH : BinaryRI<"oihh", 0xA58, or, GR64, imm64hh16>; // ORs of a 32-bit immediate, leaving other bits unaffected. - let isCodeGenOnly = 1 in + // The CC result only reflects the 32-bit field, which means we can + // use it as a zero indicator for i32 operations but not otherwise. + let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in def OILF32 : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>; def OILF : BinaryRIL<"oilf", 0xC0D, or, GR64, imm64lf32>; def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>; // ORs of memory. - defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load>; - def OG : BinaryRXY<"og", 0xE381, or, GR64, load>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load, 4>; + def OG : BinaryRXY<"og", 0xE381, or, GR64, load, 8>; + } // OR to memory defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>; @@ -662,20 +796,24 @@ defm : RMWIByte<or, bdaddr20pair, OIY>; let Defs = [CC] in { // XORs of a register. - let isCommutable = 1 in { - def XR : BinaryRR <"xr", 0x17, xor, GR32, GR32>; - def XGR : BinaryRRE<"xgr", 0xB982, xor, GR64, GR64>; + let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm XR : BinaryRRAndK<"x", 0x17, 0xB9F7, xor, GR32, GR32>; + defm XGR : BinaryRREAndK<"xg", 0xB982, 0xB9E7, xor, GR64, GR64>; } // XORs of a 32-bit immediate, leaving other bits unaffected. - let isCodeGenOnly = 1 in + // The CC result only reflects the 32-bit field, which means we can + // use it as a zero indicator for i32 operations but not otherwise. + let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in def XILF32 : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>; def XILF : BinaryRIL<"xilf", 0xC07, xor, GR64, imm64lf32>; def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>; // XORs of memory. - defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load>; - def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in { + defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load, 4>; + def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load, 8>; + } // XOR to memory defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>; @@ -689,10 +827,10 @@ defm : RMWIByte<xor, bdaddr20pair, XIY>; // Multiplication of a register. let isCommutable = 1 in { - def MSR : BinaryRRE<"msr", 0xB252, mul, GR32, GR32>; - def MSGR : BinaryRRE<"msgr", 0xB90C, mul, GR64, GR64>; + def MSR : BinaryRRE<"ms", 0xB252, mul, GR32, GR32>; + def MSGR : BinaryRRE<"msg", 0xB90C, mul, GR64, GR64>; } -def MSGFR : BinaryRRE<"msgfr", 0xB91C, null_frag, GR64, GR32>; +def MSGFR : BinaryRRE<"msgf", 0xB91C, null_frag, GR64, GR32>; defm : SXB<mul, GR64, MSGFR>; // Multiplication of a signed 16-bit immediate. @@ -704,33 +842,32 @@ def MSFI : BinaryRIL<"msfi", 0xC21, mul, GR32, simm32>; def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>; // Multiplication of memory. -defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, sextloadi16>; -defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load>; -def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, sextloadi32>; -def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load>; +defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, sextloadi16, 2>; +defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>; +def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, sextloadi32, 4>; +def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>; // Multiplication of a register, producing two results. -def MLGR : BinaryRRE<"mlgr", 0xB986, z_umul_lohi64, GR128, GR64>; +def MLGR : BinaryRRE<"mlg", 0xB986, z_umul_lohi64, GR128, GR64>; // Multiplication of memory, producing two results. -def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load>; +def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>; //===----------------------------------------------------------------------===// // Division and remainder //===----------------------------------------------------------------------===// // Division and remainder, from registers. -def DSGFR : BinaryRRE<"dsgfr", 0xB91D, null_frag, GR128, GR32>; -def DSGR : BinaryRRE<"dsgr", 0xB90D, z_sdivrem64, GR128, GR64>; -def DLR : BinaryRRE<"dlr", 0xB997, z_udivrem32, GR128, GR32>; -def DLGR : BinaryRRE<"dlgr", 0xB987, z_udivrem64, GR128, GR64>; -defm : SXB<z_sdivrem64, GR128, DSGFR>; +def DSGFR : BinaryRRE<"dsgf", 0xB91D, z_sdivrem32, GR128, GR32>; +def DSGR : BinaryRRE<"dsg", 0xB90D, z_sdivrem64, GR128, GR64>; +def DLR : BinaryRRE<"dl", 0xB997, z_udivrem32, GR128, GR32>; +def DLGR : BinaryRRE<"dlg", 0xB987, z_udivrem64, GR128, GR64>; // Division and remainder, from memory. -def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem64, GR128, sextloadi32>; -def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load>; -def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load>; -def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load>; +def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>; +def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>; +def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>; +def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>; //===----------------------------------------------------------------------===// // Shifts @@ -738,35 +875,55 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load>; // Shift left. let neverHasSideEffects = 1 in { - def SLL : ShiftRS <"sll", 0x89, shl, GR32, shift12only>; - def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64, shift20only>; + defm SLL : ShiftRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>; + def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64>; } // Logical shift right. let neverHasSideEffects = 1 in { - def SRL : ShiftRS <"srl", 0x88, srl, GR32, shift12only>; - def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64, shift20only>; + defm SRL : ShiftRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>; + def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64>; } // Arithmetic shift right. -let Defs = [CC] in { - def SRA : ShiftRS <"sra", 0x8A, sra, GR32, shift12only>; - def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64, shift20only>; +let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in { + defm SRA : ShiftRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>; + def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64>; } // Rotate left. let neverHasSideEffects = 1 in { - def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32, shift20only>; - def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64, shift20only>; + def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32>; + def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64>; } // Rotate second operand left and inserted selected bits into first operand. // These can act like 32-bit operands provided that the constant start and -// end bits (operands 2 and 3) are in the range [32, 64) +// end bits (operands 2 and 3) are in the range [32, 64). let Defs = [CC] in { let isCodeGenOnly = 1 in - def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>; - def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>; + def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>; + let CCValues = 0xE, CompareZeroCCMask = 0xE in + def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>; +} + +// Forms of RISBG that only affect one word of the destination register. +// They do not set CC. +let isCodeGenOnly = 1 in + def RISBLG32 : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR32>, + Requires<[FeatureHighWord]>; +def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GR64, GR64>, + Requires<[FeatureHighWord]>; +def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR64, GR64>, + Requires<[FeatureHighWord]>; + +// Rotate second operand left and perform a logical operation with selected +// bits of the first operand. The CC result only describes the selected bits, +// so isn't useful for a full comparison against zero. +let Defs = [CC] in { + def RNSBG : RotateSelectRIEf<"rnsbg", 0xEC54, GR64, GR64>; + def ROSBG : RotateSelectRIEf<"rosbg", 0xEC56, GR64, GR64>; + def RXSBG : RotateSelectRIEf<"rxsbg", 0xEC57, GR64, GR64>; } //===----------------------------------------------------------------------===// @@ -774,11 +931,11 @@ let Defs = [CC] in { //===----------------------------------------------------------------------===// // Signed comparisons. -let Defs = [CC] in { +let Defs = [CC], CCValues = 0xE in { // Comparison with a register. - def CR : CompareRR <"cr", 0x19, z_cmp, GR32, GR32>; - def CGFR : CompareRRE<"cgfr", 0xB930, null_frag, GR64, GR32>; - def CGR : CompareRRE<"cgr", 0xB920, z_cmp, GR64, GR64>; + def CR : CompareRR <"c", 0x19, z_cmp, GR32, GR32>; + def CGFR : CompareRRE<"cgf", 0xB930, null_frag, GR64, GR32>; + def CGR : CompareRRE<"cg", 0xB920, z_cmp, GR64, GR64>; // Comparison with a signed 16-bit immediate. def CHI : CompareRI<"chi", 0xA7E, z_cmp, GR32, imm32sx16>; @@ -789,11 +946,11 @@ let Defs = [CC] in { def CGFI : CompareRIL<"cgfi", 0xC2C, z_cmp, GR64, imm64sx32>; // Comparison with memory. - defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_cmp, GR32, sextloadi16>; - defm C : CompareRXPair<"c", 0x59, 0xE359, z_cmp, GR32, load>; - def CGH : CompareRXY<"cgh", 0xE334, z_cmp, GR64, sextloadi16>; - def CGF : CompareRXY<"cgf", 0xE330, z_cmp, GR64, sextloadi32>; - def CG : CompareRXY<"cg", 0xE320, z_cmp, GR64, load>; + defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_cmp, GR32, sextloadi16, 2>; + defm C : CompareRXPair<"c", 0x59, 0xE359, z_cmp, GR32, load, 4>; + def CGH : CompareRXY<"cgh", 0xE334, z_cmp, GR64, sextloadi16, 2>; + def CGF : CompareRXY<"cgf", 0xE330, z_cmp, GR64, sextloadi32, 4>; + def CG : CompareRXY<"cg", 0xE320, z_cmp, GR64, load, 8>; def CHRL : CompareRILPC<"chrl", 0xC65, z_cmp, GR32, aligned_sextloadi16>; def CRL : CompareRILPC<"crl", 0xC6D, z_cmp, GR32, aligned_load>; def CGHRL : CompareRILPC<"cghrl", 0xC64, z_cmp, GR64, aligned_sextloadi16>; @@ -808,20 +965,20 @@ let Defs = [CC] in { defm : SXB<z_cmp, GR64, CGFR>; // Unsigned comparisons. -let Defs = [CC] in { +let Defs = [CC], CCValues = 0xE, IsLogical = 1 in { // Comparison with a register. - def CLR : CompareRR <"clr", 0x15, z_ucmp, GR32, GR32>; - def CLGFR : CompareRRE<"clgfr", 0xB931, null_frag, GR64, GR32>; - def CLGR : CompareRRE<"clgr", 0xB921, z_ucmp, GR64, GR64>; + def CLR : CompareRR <"cl", 0x15, z_ucmp, GR32, GR32>; + def CLGFR : CompareRRE<"clgf", 0xB931, null_frag, GR64, GR32>; + def CLGR : CompareRRE<"clg", 0xB921, z_ucmp, GR64, GR64>; // Comparison with a signed 32-bit immediate. def CLFI : CompareRIL<"clfi", 0xC2F, z_ucmp, GR32, uimm32>; def CLGFI : CompareRIL<"clgfi", 0xC2E, z_ucmp, GR64, imm64zx32>; // Comparison with memory. - defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load>; - def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, zextloadi32>; - def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load>; + defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load, 4>; + def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, zextloadi32, 4>; + def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load, 8>; def CLHRL : CompareRILPC<"clhrl", 0xC67, z_ucmp, GR32, aligned_zextloadi16>; def CLRL : CompareRILPC<"clrl", 0xC6F, z_ucmp, GR32, @@ -974,7 +1131,7 @@ def EAR : InstRRE<0xB24F, (outs GR32:$R1), (ins access_reg:$R2), // and the second giving a copy of the source with the leftmost one bit // cleared. We only use the first result here. let Defs = [CC] in { - def FLOGR : UnaryRRE<"flogr", 0xB983, null_frag, GR128, GR64>; + def FLOGR : UnaryRRE<"flog", 0xB983, null_frag, GR128, GR64>; } def : Pat<(ctlz GR64:$src), (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_high)>; @@ -1015,3 +1172,15 @@ def : Pat<(add GR64:$src1, imm64zx32n:$src2), (SLGFI GR64:$src1, imm64zx32n:$src2)>; def : Pat<(sub GR64:$src1, (zextloadi32 bdxaddr20only:$addr)), (SLGF GR64:$src1, bdxaddr20only:$addr)>; + +// Optimize sign-extended 1/0 selects to -1/0 selects. This is important +// for vector legalization. +def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, uimm8zx4:$cc)), + (i32 31)), + (i32 31)), + (Select32 (LHI -1), (LHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>; +def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, + uimm8zx4:$cc)))), + (i32 63)), + (i32 63)), + (Select64 (LGHI -1), (LGHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>; diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp index 2cb5823..114f74e 100644 --- a/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -71,8 +71,6 @@ using namespace llvm; STATISTIC(LongBranches, "Number of long branches."); namespace { - typedef MachineBasicBlock::iterator Iter; - // Represents positional information about a basic block. struct MBBInfo { // The address that we currently assume the block has. @@ -150,6 +148,7 @@ namespace { bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address); bool mustRelaxABranch(); void setWorstCaseAddresses(); + void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode); void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode); void relaxBranch(TerminatorInfo &Terminator); void relaxBranches(); @@ -220,6 +219,11 @@ TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) { // Relaxes to BRCL, which is 2 bytes longer. Terminator.ExtraRelaxSize = 2; break; + case SystemZ::BRCT: + case SystemZ::BRCTG: + // Relaxes to A(G)HI and BRCL, which is 6 bytes longer. + Terminator.ExtraRelaxSize = 6; + break; case SystemZ::CRJ: // Relaxes to a CR/BRCL sequence, which is 2 bytes longer. Terminator.ExtraRelaxSize = 2; @@ -310,7 +314,7 @@ bool SystemZLongBranch::mustRelaxBranch(const TerminatorInfo &Terminator, // Return true if, under current assumptions, any terminator needs // to be relaxed. bool SystemZLongBranch::mustRelaxABranch() { - for (SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin(), + for (SmallVectorImpl<TerminatorInfo>::iterator TI = Terminators.begin(), TE = Terminators.end(); TI != TE; ++TI) if (mustRelaxBranch(*TI, TI->Address)) return true; @@ -322,7 +326,7 @@ bool SystemZLongBranch::mustRelaxABranch() { void SystemZLongBranch::setWorstCaseAddresses() { SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin(); BlockPosition Position(MF->getAlignment()); - for (SmallVector<MBBInfo, 16>::iterator BI = MBBs.begin(), BE = MBBs.end(); + for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end(); BI != BE; ++BI) { skipNonTerminators(Position, *BI); for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) { @@ -332,6 +336,25 @@ void SystemZLongBranch::setWorstCaseAddresses() { } } +// Split BRANCH ON COUNT MI into the addition given by AddOpcode followed +// by a BRCL on the result. +void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI, + unsigned AddOpcode) { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + BuildMI(*MBB, MI, DL, TII->get(AddOpcode)) + .addOperand(MI->getOperand(0)) + .addOperand(MI->getOperand(1)) + .addImm(-1); + MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE) + .addOperand(MI->getOperand(2)); + // The implicit use of CC is a killing use. + BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); + MI->eraseFromParent(); +} + // Split MI into the comparison given by CompareOpcode followed // a BRCL on the result. void SystemZLongBranch::splitCompareBranch(MachineInstr *MI, @@ -342,10 +365,11 @@ void SystemZLongBranch::splitCompareBranch(MachineInstr *MI, .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) + .addImm(SystemZ::CCMASK_ICMP) .addOperand(MI->getOperand(2)) .addOperand(MI->getOperand(3)); // The implicit use of CC is a killing use. - BRCL->getOperand(2).setIsKill(); + BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); } @@ -359,6 +383,12 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) { case SystemZ::BRC: Branch->setDesc(TII->get(SystemZ::BRCL)); break; + case SystemZ::BRCT: + splitBranchOnCount(Branch, SystemZ::AHI); + break; + case SystemZ::BRCTG: + splitBranchOnCount(Branch, SystemZ::AGHI); + break; case SystemZ::CRJ: splitCompareBranch(Branch, SystemZ::CR); break; @@ -386,7 +416,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) { void SystemZLongBranch::relaxBranches() { SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin(); BlockPosition Position(MF->getAlignment()); - for (SmallVector<MBBInfo, 16>::iterator BI = MBBs.begin(), BE = MBBs.end(); + for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end(); BI != BE; ++BI) { skipNonTerminators(Position, *BI); for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) { diff --git a/lib/Target/SystemZ/SystemZMCInstLower.cpp b/lib/Target/SystemZ/SystemZMCInstLower.cpp index fd3f867..432a0d3 100644 --- a/lib/Target/SystemZ/SystemZMCInstLower.cpp +++ b/lib/Target/SystemZ/SystemZMCInstLower.cpp @@ -57,9 +57,6 @@ MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const { llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: - // Ignore all implicit register operands. - if (MO.isImplicit()) - return MCOperand(); return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: @@ -104,8 +101,8 @@ void SystemZMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(Opcode); for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { const MachineOperand &MO = MI->getOperand(I); - MCOperand MCOp = lowerOperand(MO); - if (MCOp.isValid()) - OutMI.addOperand(MCOp); + // Ignore all implicit register operands. + if (!MO.isReg() || !MO.isImplicit()) + OutMI.addOperand(lowerOperand(MO)); } } diff --git a/lib/Target/SystemZ/SystemZMCInstLower.h b/lib/Target/SystemZ/SystemZMCInstLower.h index afa72f3..db5bdb0 100644 --- a/lib/Target/SystemZ/SystemZMCInstLower.h +++ b/lib/Target/SystemZ/SystemZMCInstLower.h @@ -35,7 +35,7 @@ public: // Lower MachineInstr MI to MCInst OutMI. void lower(const MachineInstr *MI, MCInst &OutMI) const; - // Return an MCOperand for MO. Return an empty operand if MO is implicit. + // Return an MCOperand for MO. MCOperand lowerOperand(const MachineOperand& MO) const; // Return an MCOperand for MO, given that it equals Symbol + Offset. diff --git a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h index 1dc05a7..69c2691 100644 --- a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h +++ b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h @@ -15,7 +15,6 @@ namespace llvm { class SystemZMachineFunctionInfo : public MachineFunctionInfo { - unsigned SavedGPRFrameSize; unsigned LowSavedGPR; unsigned HighSavedGPR; unsigned VarArgsFirstGPR; @@ -26,14 +25,8 @@ class SystemZMachineFunctionInfo : public MachineFunctionInfo { public: explicit SystemZMachineFunctionInfo(MachineFunction &MF) - : SavedGPRFrameSize(0), LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), - VarArgsFirstFPR(0), VarArgsFrameIndex(0), RegSaveFrameIndex(0), - ManipulatesSP(false) {} - - // Get and set the number of bytes allocated by generic code to store - // call-saved GPRs. - unsigned getSavedGPRFrameSize() const { return SavedGPRFrameSize; } - void setSavedGPRFrameSize(unsigned bytes) { SavedGPRFrameSize = bytes; } + : LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), VarArgsFirstFPR(0), + VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false) {} // Get and set the first call-saved GPR that should be saved and restored // by this function. This is 0 if no GPRs need to be saved or restored. diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td index 66d9c5f..9d79439 100644 --- a/lib/Target/SystemZ/SystemZOperands.td +++ b/lib/Target/SystemZ/SystemZOperands.td @@ -53,49 +53,63 @@ class PCRelAddress<ValueType vt, string self, AsmOperandClass asmop> // Constructs an AsmOperandClass for addressing mode FORMAT, treating the // registers as having BITSIZE bits and displacements as having DISPSIZE bits. -class AddressAsmOperand<string format, string bitsize, string dispsize> +// LENGTH is "LenN" for addresses with an N-bit length field, otherwise it +// is "". +class AddressAsmOperand<string format, string bitsize, string dispsize, + string length = ""> : AsmOperandClass { - let Name = format##bitsize##"Disp"##dispsize; + let Name = format##bitsize##"Disp"##dispsize##length; let ParserMethod = "parse"##format##bitsize; let RenderMethod = "add"##format##"Operands"; } // Constructs both a DAG pattern and instruction operand for an addressing mode. -// The mode is selected by custom code in select<TYPE><DISPSIZE><SUFFIX>(), -// encoded by custom code in get<FORMAT><DISPSIZE>Encoding() and decoded -// by custom code in decode<TYPE><BITSIZE>Disp<DISPSIZE>Operand(). -// The address registers have BITSIZE bits and displacements have -// DISPSIZE bits. NUMOPS is the number of operands that make up an -// address and OPERANDS lists the types of those operands using (ops ...). -// FORMAT is the type of addressing mode, which needs to match the names -// used in AddressAsmOperand. -class AddressingMode<string type, string bitsize, string dispsize, - string suffix, int numops, string format, dag operands> +// FORMAT, BITSIZE, DISPSIZE and LENGTH are the parameters to an associated +// AddressAsmOperand. OPERANDS is a list of NUMOPS individual operands +// (base register, displacement, etc.). SELTYPE is the type of the memory +// operand for selection purposes; sometimes we want different selection +// choices for the same underlying addressing mode. SUFFIX is similarly +// a suffix appended to the displacement for selection purposes; +// e.g. we want to reject small 20-bit displacements if a 12-bit form +// also exists, but we want to accept them otherwise. +class AddressingMode<string seltype, string bitsize, string dispsize, + string suffix, string length, int numops, string format, + dag operands> : ComplexPattern<!cast<ValueType>("i"##bitsize), numops, - "select"##type##dispsize##suffix, + "select"##seltype##dispsize##suffix##length, [add, sub, or, frameindex, z_adjdynalloc]>, Operand<!cast<ValueType>("i"##bitsize)> { let PrintMethod = "print"##format##"Operand"; - let EncoderMethod = "get"##format##dispsize##"Encoding"; - let DecoderMethod = "decode"##format##bitsize##"Disp"##dispsize##"Operand"; + let EncoderMethod = "get"##format##dispsize##length##"Encoding"; + let DecoderMethod = + "decode"##format##bitsize##"Disp"##dispsize##length##"Operand"; let MIOperandInfo = operands; let ParserMatchClass = - !cast<AddressAsmOperand>(format##bitsize##"Disp"##dispsize); + !cast<AddressAsmOperand>(format##bitsize##"Disp"##dispsize##length); } // An addressing mode with a base and displacement but no index. class BDMode<string type, string bitsize, string dispsize, string suffix> - : AddressingMode<type, bitsize, dispsize, suffix, 2, "BDAddr", + : AddressingMode<type, bitsize, dispsize, suffix, "", 2, "BDAddr", (ops !cast<RegisterOperand>("ADDR"##bitsize), !cast<Immediate>("disp"##dispsize##"imm"##bitsize))>; // An addressing mode with a base, displacement and index. class BDXMode<string type, string bitsize, string dispsize, string suffix> - : AddressingMode<type, bitsize, dispsize, suffix, 3, "BDXAddr", + : AddressingMode<type, bitsize, dispsize, suffix, "", 3, "BDXAddr", (ops !cast<RegisterOperand>("ADDR"##bitsize), !cast<Immediate>("disp"##dispsize##"imm"##bitsize), !cast<RegisterOperand>("ADDR"##bitsize))>; +// A BDMode paired with an immediate length operand of LENSIZE bits. +class BDLMode<string type, string bitsize, string dispsize, string suffix, + string lensize> + : AddressingMode<type, bitsize, dispsize, suffix, "Len"##lensize, 3, + "BDLAddr", + (ops !cast<RegisterOperand>("ADDR"##bitsize), + !cast<Immediate>("disp"##dispsize##"imm"##bitsize), + !cast<Immediate>("imm"##bitsize))>; + //===----------------------------------------------------------------------===// // Extracting immediate operands from nodes // These all create MVT::i64 nodes to ensure the value is not sign-extended @@ -205,6 +219,11 @@ def uimm8 : Immediate<i8, [{}], UIMM8, "U8Imm">; // i32 immediates //===----------------------------------------------------------------------===// +// Immediates for 8-bit lengths. +def imm32len8 : Immediate<i32, [{ + return isUInt<8>(N->getZExtValue() - 1); +}], NOOP_SDNodeXForm, "U32Imm">; + // Immediates for the lower and upper 16 bits of an i32, with the other // bits of the i32 being zero. def imm32ll16 : Immediate<i32, [{ @@ -402,15 +421,16 @@ def disp12imm64 : Operand<i64>; def disp20imm32 : Operand<i32>; def disp20imm64 : Operand<i64>; -def BDAddr32Disp12 : AddressAsmOperand<"BDAddr", "32", "12">; -def BDAddr32Disp20 : AddressAsmOperand<"BDAddr", "32", "20">; -def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">; -def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">; -def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">; -def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">; +def BDAddr32Disp12 : AddressAsmOperand<"BDAddr", "32", "12">; +def BDAddr32Disp20 : AddressAsmOperand<"BDAddr", "32", "20">; +def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">; +def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">; +def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">; +def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">; +def BDLAddr64Disp12Len8 : AddressAsmOperand<"BDLAddr", "64", "12", "Len8">; // DAG patterns and operands for addressing modes. Each mode has -// the form <type><range><group> where: +// the form <type><range><group>[<len>] where: // // <type> is one of: // shift : base + displacement (32-bit) @@ -418,6 +438,7 @@ def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">; // bdxaddr : base + displacement + index // laaddr : like bdxaddr, but used for Load Address operations // dynalloc : base + displacement + index + ADJDYNALLOC +// bdladdr : base + displacement with a length field // // <range> is one of: // 12 : the displacement is an unsigned 12-bit value @@ -428,20 +449,26 @@ def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">; // range value (12 or 20) // only : used when there is no equivalent instruction with the opposite // range value -def shift12only : BDMode <"BDAddr", "32", "12", "Only">; -def shift20only : BDMode <"BDAddr", "32", "20", "Only">; -def bdaddr12only : BDMode <"BDAddr", "64", "12", "Only">; -def bdaddr12pair : BDMode <"BDAddr", "64", "12", "Pair">; -def bdaddr20only : BDMode <"BDAddr", "64", "20", "Only">; -def bdaddr20pair : BDMode <"BDAddr", "64", "20", "Pair">; -def bdxaddr12only : BDXMode<"BDXAddr", "64", "12", "Only">; -def bdxaddr12pair : BDXMode<"BDXAddr", "64", "12", "Pair">; -def bdxaddr20only : BDXMode<"BDXAddr", "64", "20", "Only">; -def bdxaddr20only128 : BDXMode<"BDXAddr", "64", "20", "Only128">; -def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">; -def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">; -def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">; -def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">; +// +// <len> is one of: +// +// <empty> : there is no length field +// len8 : the length field is 8 bits, with a range of [1, 0x100]. +def shift12only : BDMode <"BDAddr", "32", "12", "Only">; +def shift20only : BDMode <"BDAddr", "32", "20", "Only">; +def bdaddr12only : BDMode <"BDAddr", "64", "12", "Only">; +def bdaddr12pair : BDMode <"BDAddr", "64", "12", "Pair">; +def bdaddr20only : BDMode <"BDAddr", "64", "20", "Only">; +def bdaddr20pair : BDMode <"BDAddr", "64", "20", "Pair">; +def bdxaddr12only : BDXMode<"BDXAddr", "64", "12", "Only">; +def bdxaddr12pair : BDXMode<"BDXAddr", "64", "12", "Pair">; +def bdxaddr20only : BDXMode<"BDXAddr", "64", "20", "Only">; +def bdxaddr20only128 : BDXMode<"BDXAddr", "64", "20", "Only128">; +def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">; +def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">; +def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">; +def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">; +def bdladdr12onlylen8 : BDLMode<"BDLAddr", "64", "12", "Only", "8">; //===----------------------------------------------------------------------===// // Miscellaneous diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td index ab01b25..6a3af2b 100644 --- a/lib/Target/SystemZ/SystemZOperators.td +++ b/lib/Target/SystemZ/SystemZOperators.td @@ -15,13 +15,15 @@ def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>, SDTCisVT<1, i64>]>; def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; def SDT_ZCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>; -def SDT_ZBRCCMask : SDTypeProfile<0, 2, +def SDT_ZBRCCMask : SDTypeProfile<0, 3, [SDTCisVT<0, i8>, - SDTCisVT<1, OtherVT>]>; -def SDT_ZSelectCCMask : SDTypeProfile<1, 3, + SDTCisVT<1, i8>, + SDTCisVT<2, OtherVT>]>; +def SDT_ZSelectCCMask : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisVT<3, i8>]>; + SDTCisVT<3, i8>, + SDTCisVT<4, i8>]>; def SDT_ZWrapPtr : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; @@ -52,6 +54,10 @@ def SDT_ZAtomicCmpSwapW : SDTypeProfile<1, 6, SDTCisVT<4, i32>, SDTCisVT<5, i32>, SDTCisVT<6, i32>]>; +def SDT_ZCopy : SDTypeProfile<0, 3, + [SDTCisPtrTy<0>, + SDTCisPtrTy<1>, + SDTCisVT<2, i32>]>; //===----------------------------------------------------------------------===// // Node definitions @@ -81,6 +87,7 @@ def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>; def z_extract_access : SDNode<"SystemZISD::EXTRACT_ACCESS", SDT_ZExtractAccess>; def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>; +def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>; def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>; def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>; def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>; @@ -102,6 +109,9 @@ def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">; def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">; def z_atomic_cmp_swapw : AtomicWOp<"ATOMIC_CMP_SWAPW", SDT_ZAtomicCmpSwapW>; +def z_mvc : SDNode<"SystemZISD::MVC", SDT_ZCopy, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + //===----------------------------------------------------------------------===// // Pattern fragments //===----------------------------------------------------------------------===// @@ -120,6 +130,20 @@ def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>; def loadf32 : PatFrag<(ops node:$src), (f32 (load node:$src))>; def loadf64 : PatFrag<(ops node:$src), (f64 (load node:$src))>; +// Extending loads in which the extension type doesn't matter. +def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD; +}]>; +def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + // Aligned loads. class AlignedLoad<SDPatternOperator load> : PatFrag<(ops node:$addr), (load node:$addr), [{ @@ -149,7 +173,10 @@ class NonvolatileLoad<SDPatternOperator load> LoadSDNode *Load = cast<LoadSDNode>(N); return !Load->isVolatile(); }]>; -def nonvolatile_load : NonvolatileLoad<load>; +def nonvolatile_load : NonvolatileLoad<load>; +def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>; +def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>; +def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>; // Non-volatile stores. class NonvolatileStore<SDPatternOperator store> @@ -157,7 +184,10 @@ class NonvolatileStore<SDPatternOperator store> StoreSDNode *Store = cast<StoreSDNode>(N); return !Store->isVolatile(); }]>; -def nonvolatile_store : NonvolatileStore<store>; +def nonvolatile_store : NonvolatileStore<store>; +def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>; +def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>; +def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>; // Insertions. def inserti8 : PatFrag<(ops node:$src1, node:$src2), diff --git a/lib/Target/SystemZ/SystemZPatterns.td b/lib/Target/SystemZ/SystemZPatterns.td index 3689f74..c442ae0 100644 --- a/lib/Target/SystemZ/SystemZPatterns.td +++ b/lib/Target/SystemZ/SystemZPatterns.td @@ -50,12 +50,8 @@ class RMWI<SDPatternOperator load, SDPatternOperator operator, // memory location. IMM is the type of the second operand. multiclass RMWIByte<SDPatternOperator operator, AddressingMode mode, Instruction insn> { - def : RMWI<zextloadi8, operator, truncstorei8, mode, imm32, insn>; - def : RMWI<zextloadi8, operator, truncstorei8, mode, imm64, insn>; - def : RMWI<sextloadi8, operator, truncstorei8, mode, imm32, insn>; - def : RMWI<sextloadi8, operator, truncstorei8, mode, imm64, insn>; - def : RMWI<extloadi8, operator, truncstorei8, mode, imm32, insn>; - def : RMWI<extloadi8, operator, truncstorei8, mode, imm64, insn>; + def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm32, insn>; + def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm64, insn>; } // Record that INSN performs insertion TYPE into a register of class CLS. @@ -69,3 +65,23 @@ multiclass InsertMem<string type, Instruction insn, RegisterOperand cls, (load mode:$src2), cls:$src1), (insn cls:$src1, mode:$src2)>; } + +// Use MVC instruction INSN for a load of type LOAD followed by a store +// of type STORE. VT is the type of the intermediate register and LENGTH +// is the number of bytes to copy (which may be smaller than VT). +multiclass MVCLoadStore<SDPatternOperator load, SDPatternOperator store, + ValueType vt, Instruction insn, bits<5> length> { + def Pat : PatFrag<(ops node:$dest, node:$src), + (store (vt (load node:$src)), node:$dest), + [{ return storeLoadCanUseMVC(N); }]>; + + def : Pat<(!cast<SDPatternOperator>(NAME##"Pat") bdaddr12only:$dest, + bdaddr12only:$src), + (insn bdaddr12only:$dest, bdaddr12only:$src, length)>; +} + +// Record that INSN is a LOAD AND TEST that can be used to compare +// registers in CLS against zero. The instruction has separate R1 and R2 +// operands, but they must be the same when the instruction is used like this. +class CompareZeroFP<Instruction insn, RegisterOperand cls> + : Pat<(z_cmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>; diff --git a/lib/Target/SystemZ/SystemZProcessors.td b/lib/Target/SystemZ/SystemZProcessors.td new file mode 100644 index 0000000..7e14aa7 --- /dev/null +++ b/lib/Target/SystemZ/SystemZProcessors.td @@ -0,0 +1,38 @@ +//===-- SystemZ.td - SystemZ processors and features ---------*- tblgen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Processor and feature definitions. +// +//===----------------------------------------------------------------------===// + +class SystemZFeature<string extname, string intname, string desc> + : Predicate<"Subtarget.has"##intname##"()">, + AssemblerPredicate<"Feature"##intname, extname>, + SubtargetFeature<extname, "Has"##intname, "true", desc>; + +def FeatureDistinctOps : SystemZFeature< + "distinct-ops", "DistinctOps", + "Assume that the distinct-operands facility is installed" +>; + +def FeatureLoadStoreOnCond : SystemZFeature< + "load-store-on-cond", "LoadStoreOnCond", + "Assume that the load/store-on-condition facility is installed" +>; + +def FeatureHighWord : SystemZFeature< + "high-word", "HighWord", + "Assume that the high-word facility is installed" +>; + +def : Processor<"z10", NoItineraries, []>; +def : Processor<"z196", NoItineraries, + [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord]>; +def : Processor<"zEC12", NoItineraries, + [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord]>; diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index c695bb3..8ce6d6a 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -53,32 +53,6 @@ SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { return Reserved; } -bool -SystemZRegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB, - MachineBasicBlock::iterator SaveMBBI, - MachineBasicBlock::iterator &UseMBBI, - const TargetRegisterClass *RC, - unsigned Reg) const { - MachineFunction &MF = *MBB.getParent(); - const SystemZInstrInfo &TII = - *static_cast<const SystemZInstrInfo*>(TM.getInstrInfo()); - const SystemZFrameLowering *TFI = - static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); - unsigned Base = getFrameRegister(MF); - uint64_t Offset = TFI->getEmergencySpillSlotOffset(MF); - DebugLoc DL; - - unsigned LoadOpcode, StoreOpcode; - TII.getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); - - // The offset must always be in range of a 12-bit unsigned displacement. - BuildMI(MBB, SaveMBBI, DL, TII.get(StoreOpcode)) - .addReg(Reg, RegState::Kill).addReg(Base).addImm(Offset).addReg(0); - BuildMI(MBB, UseMBBI, DL, TII.get(LoadOpcode), Reg) - .addReg(Base).addImm(Offset).addReg(0); - return true; -} - void SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.h b/lib/Target/SystemZ/SystemZRegisterInfo.h index 047cb4a..c447e4d 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.h +++ b/lib/Target/SystemZ/SystemZRegisterInfo.h @@ -52,11 +52,6 @@ public: const LLVM_OVERRIDE; virtual BitVector getReservedRegs(const MachineFunction &MF) const LLVM_OVERRIDE; - virtual bool saveScavengerRegister(MachineBasicBlock &MBB, - MachineBasicBlock::iterator SaveMBBI, - MachineBasicBlock::iterator &UseMBBI, - const TargetRegisterClass *RC, - unsigned Reg) const LLVM_OVERRIDE; virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const LLVM_OVERRIDE; diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.td b/lib/Target/SystemZ/SystemZRegisterInfo.td index d65553e..ffffe72 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.td +++ b/lib/Target/SystemZ/SystemZRegisterInfo.td @@ -147,5 +147,7 @@ defm FP128 : SystemZRegClass<"FP128", f128, 128, (add F0Q, F1Q, F4Q, F5Q, // Other registers //===----------------------------------------------------------------------===// -// The 2-bit condition code field of the PSW. +// The 2-bit condition code field of the PSW. Every register named in an +// inline asm needs a class associated with it. def CC : SystemZReg<"cc">; +def CCRegs : RegisterClass<"SystemZ", [i32], 32, (add CC)>; diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp new file mode 100644 index 0000000..4ca9292 --- /dev/null +++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp @@ -0,0 +1,127 @@ +//===-- SystemZSelectionDAGInfo.cpp - SystemZ SelectionDAG Info -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the SystemZSelectionDAGInfo class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "systemz-selectiondag-info" +#include "SystemZTargetMachine.h" +#include "llvm/CodeGen/SelectionDAG.h" + +using namespace llvm; + +SystemZSelectionDAGInfo:: +SystemZSelectionDAGInfo(const SystemZTargetMachine &TM) + : TargetSelectionDAGInfo(TM) { +} + +SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() { +} + +SDValue SystemZSelectionDAGInfo:: +EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain, + SDValue Dst, SDValue Src, SDValue Size, unsigned Align, + bool IsVolatile, bool AlwaysInline, + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) const { + if (IsVolatile) + return SDValue(); + + if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) { + uint64_t Bytes = CSize->getZExtValue(); + if (Bytes >= 1 && Bytes <= 0x100) { + // A single MVC. + return DAG.getNode(SystemZISD::MVC, DL, MVT::Other, + Chain, Dst, Src, Size); + } + } + return SDValue(); +} + +// Handle a memset of 1, 2, 4 or 8 bytes with the operands given by +// Chain, Dst, ByteVal and Size. These cases are expected to use +// MVI, MVHHI, MVHI and MVGHI respectively. +static SDValue memsetStore(SelectionDAG &DAG, SDLoc DL, SDValue Chain, + SDValue Dst, uint64_t ByteVal, uint64_t Size, + unsigned Align, + MachinePointerInfo DstPtrInfo) { + uint64_t StoreVal = ByteVal; + for (unsigned I = 1; I < Size; ++I) + StoreVal |= ByteVal << (I * 8); + return DAG.getStore(Chain, DL, + DAG.getConstant(StoreVal, MVT::getIntegerVT(Size * 8)), + Dst, DstPtrInfo, false, false, Align); +} + +SDValue SystemZSelectionDAGInfo:: +EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain, + SDValue Dst, SDValue Byte, SDValue Size, + unsigned Align, bool IsVolatile, + MachinePointerInfo DstPtrInfo) const { + EVT DstVT = Dst.getValueType(); + + if (IsVolatile) + return SDValue(); + + if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) { + uint64_t Bytes = CSize->getZExtValue(); + if (Bytes == 0) + return SDValue(); + if (ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte)) { + // Handle cases that can be done using at most two of + // MVI, MVHI, MVHHI and MVGHI. The latter two can only be + // used if ByteVal is all zeros or all ones; in other casees, + // we can move at most 2 halfwords. + uint64_t ByteVal = CByte->getZExtValue(); + if (ByteVal == 0 || ByteVal == 255 ? + Bytes <= 16 && CountPopulation_64(Bytes) <= 2 : + Bytes <= 4) { + unsigned Size1 = Bytes == 16 ? 8 : 1 << findLastSet(Bytes); + unsigned Size2 = Bytes - Size1; + SDValue Chain1 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size1, + Align, DstPtrInfo); + if (Size2 == 0) + return Chain1; + Dst = DAG.getNode(ISD::ADD, DL, DstVT, Dst, + DAG.getConstant(Size1, DstVT)); + DstPtrInfo = DstPtrInfo.getWithOffset(Size1); + SDValue Chain2 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size2, + std::min(Align, Size1), DstPtrInfo); + return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2); + } + } else { + // Handle one and two bytes using STC. + if (Bytes <= 2) { + SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, + false, false, Align); + if (Bytes == 1) + return Chain1; + SDValue Dst2 = DAG.getNode(ISD::ADD, DL, DstVT, Dst, + DAG.getConstant(1, DstVT)); + SDValue Chain2 = DAG.getStore(Chain, DL, Byte, Dst2, + DstPtrInfo.getWithOffset(1), + false, false, 1); + return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2); + } + } + assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already"); + if (Bytes <= 0x101) { + // Copy the byte to the first location and then use MVC to copy + // it to the rest. + Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, + false, false, Align); + SDValue Dst2 = DAG.getNode(ISD::ADD, DL, DstVT, Dst, + DAG.getConstant(1, DstVT)); + return DAG.getNode(SystemZISD::MVC, DL, MVT::Other, Chain, Dst2, Dst, + DAG.getConstant(Bytes - 1, MVT::i32)); + } + } + return SDValue(); +} diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h new file mode 100644 index 0000000..9138a9c --- /dev/null +++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h @@ -0,0 +1,46 @@ +//===-- SystemZSelectionDAGInfo.h - SystemZ SelectionDAG Info ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SystemZ subclass for TargetSelectionDAGInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef SYSTEMZSELECTIONDAGINFO_H +#define SYSTEMZSELECTIONDAGINFO_H + +#include "llvm/Target/TargetSelectionDAGInfo.h" + +namespace llvm { + +class SystemZTargetMachine; + +class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo { +public: + explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM); + ~SystemZSelectionDAGInfo(); + + virtual + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, + bool IsVolatile, bool AlwaysInline, + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) const + LLVM_OVERRIDE; + + virtual SDValue + EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, + SDValue Chain, SDValue Dst, SDValue Byte, + SDValue Size, unsigned Align, bool IsVolatile, + MachinePointerInfo DstPtrInfo) const; +}; + +} + +#endif diff --git a/lib/Target/SystemZ/SystemZSubtarget.cpp b/lib/Target/SystemZ/SystemZSubtarget.cpp index cfd3324..036ec05 100644 --- a/lib/Target/SystemZ/SystemZSubtarget.cpp +++ b/lib/Target/SystemZ/SystemZSubtarget.cpp @@ -9,6 +9,7 @@ #include "SystemZSubtarget.h" #include "llvm/IR/GlobalValue.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" #define GET_SUBTARGETINFO_TARGET_DESC #define GET_SUBTARGETINFO_CTOR @@ -19,7 +20,8 @@ using namespace llvm; SystemZSubtarget::SystemZSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS) - : SystemZGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT) { + : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false), + HasLoadStoreOnCond(false), HasHighWord(false), TargetTriple(TT) { std::string CPUName = CPU; if (CPUName.empty()) CPUName = "z10"; diff --git a/lib/Target/SystemZ/SystemZSubtarget.h b/lib/Target/SystemZ/SystemZSubtarget.h index 8d4d450..4efb58d 100644 --- a/lib/Target/SystemZ/SystemZSubtarget.h +++ b/lib/Target/SystemZ/SystemZSubtarget.h @@ -26,6 +26,11 @@ class GlobalValue; class StringRef; class SystemZSubtarget : public SystemZGenSubtargetInfo { +protected: + bool HasDistinctOps; + bool HasLoadStoreOnCond; + bool HasHighWord; + private: Triple TargetTriple; @@ -36,6 +41,15 @@ public: // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef FS); + // Return true if the target has the distinct-operands facility. + bool hasDistinctOps() const { return HasDistinctOps; } + + // Return true if the target has the load/store-on-condition facility. + bool hasLoadStoreOnCond() const { return HasLoadStoreOnCond; } + + // Return true if the target has the high-word facility. + bool hasHighWord() const { return HasHighWord; } + // Return true if GV can be accessed using LARL for reloc model RM // and code model CM. bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM, diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp index 6e7540c..856183c 100644 --- a/lib/Target/SystemZ/SystemZTargetMachine.cpp +++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp @@ -48,6 +48,7 @@ public: } virtual bool addInstSelector() LLVM_OVERRIDE; + virtual bool addPreSched2() LLVM_OVERRIDE; virtual bool addPreEmitPass() LLVM_OVERRIDE; }; } // end anonymous namespace @@ -57,7 +58,38 @@ bool SystemZPassConfig::addInstSelector() { return false; } +bool SystemZPassConfig::addPreSched2() { + if (getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond()) + addPass(&IfConverterID); + return true; +} + bool SystemZPassConfig::addPreEmitPass() { + // We eliminate comparisons here rather than earlier because some + // transformations can change the set of available CC values and we + // generally want those transformations to have priority. This is + // especially true in the commonest case where the result of the comparison + // is used by a single in-range branch instruction, since we will then + // be able to fuse the compare and the branch instead. + // + // For example, two-address NILF can sometimes be converted into + // three-address RISBLG. NILF produces a CC value that indicates whether + // the low word is zero, but RISBLG does not modify CC at all. On the + // other hand, 64-bit ANDs like NILL can sometimes be converted to RISBG. + // The CC value produced by NILL isn't useful for our purposes, but the + // value produced by RISBG can be used for any comparison with zero + // (not just equality). So there are some transformations that lose + // CC values (while still being worthwhile) and others that happen to make + // the CC result more useful than it was originally. + // + // Another reason is that we only want to use BRANCH ON COUNT in cases + // where we know that the count register is not going to be spilled. + // + // Doing it so late makes it more likely that a register will be reused + // between the comparison and the branch, but it isn't clear whether + // preventing that would be a win or not. + if (getOptLevel() != CodeGenOpt::None) + addPass(createSystemZElimComparePass(getSystemZTargetMachine())); addPass(createSystemZLongBranchPass(getSystemZTargetMachine())); return true; } diff --git a/lib/Target/SystemZ/SystemZTargetMachine.h b/lib/Target/SystemZ/SystemZTargetMachine.h index 98614e7..a99a98e 100644 --- a/lib/Target/SystemZ/SystemZTargetMachine.h +++ b/lib/Target/SystemZ/SystemZTargetMachine.h @@ -20,10 +20,10 @@ #include "SystemZInstrInfo.h" #include "SystemZRegisterInfo.h" #include "SystemZSubtarget.h" +#include "SystemZSelectionDAGInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetSelectionDAGInfo.h" namespace llvm { @@ -32,7 +32,7 @@ class SystemZTargetMachine : public LLVMTargetMachine { const DataLayout DL; SystemZInstrInfo InstrInfo; SystemZTargetLowering TLInfo; - TargetSelectionDAGInfo TSInfo; + SystemZSelectionDAGInfo TSInfo; SystemZFrameLowering FrameLowering; public: diff --git a/lib/Target/TargetLibraryInfo.cpp b/lib/Target/TargetLibraryInfo.cpp index d2967d9..8696b57 100644 --- a/lib/Target/TargetLibraryInfo.cpp +++ b/lib/Target/TargetLibraryInfo.cpp @@ -27,7 +27,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "_IO_getc", "_IO_putc", "_ZdaPv", + "_ZdaPvRKSt9nothrow_t", "_ZdlPv", + "_ZdlPvRKSt9nothrow_t", "_Znaj", "_ZnajRKSt9nothrow_t", "_Znam", @@ -168,6 +170,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "getlogin_r", "getpwnam", "gets", + "gettimeofday", "htonl", "htons", "iprintf", @@ -490,6 +493,7 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T, TLI.setUnavailable(LibFunc::getitimer); TLI.setUnavailable(LibFunc::getlogin_r); TLI.setUnavailable(LibFunc::getpwnam); + TLI.setUnavailable(LibFunc::gettimeofday); TLI.setUnavailable(LibFunc::htonl); TLI.setUnavailable(LibFunc::htons); TLI.setUnavailable(LibFunc::lchown); diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp index f5121e3..cd810b6 100644 --- a/lib/Target/TargetLoweringObjectFile.cpp +++ b/lib/Target/TargetLoweringObjectFile.cpp @@ -317,3 +317,9 @@ getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding, } } } + +const MCExpr *TargetLoweringObjectFile::getDebugThreadLocalSymbol(const MCSymbol *Sym) const { + // FIXME: It's not clear what, if any, default this should have - perhaps a + // null return could mean 'no location' & we should just do that here. + return MCSymbolRefExpr::Create(Sym, *Ctx); +} diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp index e728251..df4a03c 100644 --- a/lib/Target/TargetMachine.cpp +++ b/lib/Target/TargetMachine.cpp @@ -78,7 +78,6 @@ void TargetMachine::resetTargetOptions(const MachineFunction *MF) const { } while (0) RESET_OPTION(NoFramePointerElim, "no-frame-pointer-elim"); - RESET_OPTION(NoFramePointerElimNonLeaf, "no-frame-pointer-elim-non-leaf"); RESET_OPTION(LessPreciseFPMADOption, "less-precise-fpmad"); RESET_OPTION(UnsafeFPMath, "unsafe-fp-math"); RESET_OPTION(NoInfsFPMath, "no-infs-fp-math"); diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp index 01d12e8..7419122 100644 --- a/lib/Target/TargetMachineC.cpp +++ b/lib/Target/TargetMachineC.cpp @@ -200,7 +200,7 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M, LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) { std::string error; - raw_fd_ostream dest(Filename, error, raw_fd_ostream::F_Binary); + raw_fd_ostream dest(Filename, error, sys::fs::F_Binary); formatted_raw_ostream destf(dest); if (!error.empty()) { *ErrorMessage = strdup(error.c_str()); diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index 263eb5e..ad83d97 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -9,6 +9,7 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" @@ -830,6 +831,18 @@ struct X86Operand : public MCParsedAsmOperand { return Kind == Memory && (!Mem.Size || Mem.Size == 64) && getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; } + bool isMemVZ32() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 32) && + getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; + } + bool isMemVZ64() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 64) && + getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; + } + + bool isMem512() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 512); + } bool isAbsMem() const { return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && @@ -890,6 +903,16 @@ struct X86Operand : public MCParsedAsmOperand { addMemOperands(Inst, N); } + void addMemVZ32Operands(MCInst &Inst, unsigned N) const { + addMemOperands(Inst, N); + } + void addMemVZ64Operands(MCInst &Inst, unsigned N) const { + addMemOperands(Inst, N); + } + void addMem512Operands(MCInst &Inst, unsigned N) const { + addMemOperands(Inst, N); + } + void addMemOperands(MCInst &Inst, unsigned N) const { assert((N == 5) && "Invalid number of operands!"); Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index 7cb71f0..7e20151 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -53,7 +53,7 @@ endif() add_llvm_target(X86CodeGen ${sources}) -add_dependencies(LLVMX86CodeGen intrinsics_gen) +add_dependencies(LLVMX86CodeGen X86CommonTableGen intrinsics_gen) add_subdirectory(AsmParser) add_subdirectory(Disassembler) diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index ca71c4f..82af6fa 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -286,6 +286,9 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate, case TYPE_XMM256: mcInst.addOperand(MCOperand::CreateReg(X86::YMM0 + (immediate >> 4))); return; + case TYPE_XMM512: + mcInst.addOperand(MCOperand::CreateReg(X86::ZMM0 + (immediate >> 4))); + return; case TYPE_REL8: isBranch = true; pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize; @@ -443,6 +446,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn, EA_BASES_64BIT REGS_XMM REGS_YMM + REGS_ZMM #undef ENTRY } } else { @@ -565,6 +569,7 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand, case TYPE_XMM64: case TYPE_XMM128: case TYPE_XMM256: + case TYPE_XMM512: case TYPE_DEBUGREG: case TYPE_CONTROLREG: return translateRMRegister(mcInst, insn); @@ -683,6 +688,15 @@ static bool translateInstruction(MCInst &mcInst, } mcInst.setOpcode(insn.instructionID); + // If when reading the prefix bytes we determined the overlapping 0xf2 or 0xf3 + // prefix bytes should be disassembled as xrelease and xacquire then set the + // opcode to those instead of the rep and repne opcodes. + if (insn.xAcquireRelease) { + if(mcInst.getOpcode() == X86::REP_PREFIX) + mcInst.setOpcode(X86::XRELEASE_PREFIX); + else if(mcInst.getOpcode() == X86::REPNE_PREFIX) + mcInst.setOpcode(X86::XACQUIRE_PREFIX); + } int index; diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c index e40edba..bb195ee 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c @@ -328,6 +328,27 @@ static int readPrefixes(struct InternalInstruction* insn) { break; if (lookAtByte(insn, &nextByte)) return -1; + /* + * If the byte is 0xf2 or 0xf3, and any of the following conditions are + * met: + * - it is followed by a LOCK (0xf0) prefix + * - it is followed by an xchg instruction + * then it should be disassembled as a xacquire/xrelease not repne/rep. + */ + if ((byte == 0xf2 || byte == 0xf3) && + ((nextByte == 0xf0) | + ((nextByte & 0xfe) == 0x86 || (nextByte & 0xf8) == 0x90))) + insn->xAcquireRelease = TRUE; + /* + * Also if the byte is 0xf3, and the following condition is met: + * - it is followed by a "mov mem, reg" (opcode 0x88/0x89) or + * "mov mem, imm" (opcode 0xc6/0xc7) instructions. + * then it should be disassembled as an xrelease not rep. + */ + if (byte == 0xf3 && + (nextByte == 0x88 || nextByte == 0x89 || + nextByte == 0xc6 || nextByte == 0xc7)) + insn->xAcquireRelease = TRUE; if (insn->mode == MODE_64BIT && (nextByte & 0xf0) == 0x40) { if (consumeByte(insn, &nextByte)) return -1; @@ -1234,6 +1255,8 @@ static int readModRM(struct InternalInstruction* insn) { return prefix##_EAX + index; \ case TYPE_R64: \ return prefix##_RAX + index; \ + case TYPE_XMM512: \ + return prefix##_ZMM0 + index; \ case TYPE_XMM256: \ return prefix##_YMM0 + index; \ case TYPE_XMM128: \ diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index 407ead3..dcb6aad 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -219,7 +219,23 @@ extern "C" { ENTRY(XMM12) \ ENTRY(XMM13) \ ENTRY(XMM14) \ - ENTRY(XMM15) + ENTRY(XMM15) \ + ENTRY(XMM16) \ + ENTRY(XMM17) \ + ENTRY(XMM18) \ + ENTRY(XMM19) \ + ENTRY(XMM20) \ + ENTRY(XMM21) \ + ENTRY(XMM22) \ + ENTRY(XMM23) \ + ENTRY(XMM24) \ + ENTRY(XMM25) \ + ENTRY(XMM26) \ + ENTRY(XMM27) \ + ENTRY(XMM28) \ + ENTRY(XMM29) \ + ENTRY(XMM30) \ + ENTRY(XMM31) #define REGS_YMM \ ENTRY(YMM0) \ @@ -237,7 +253,57 @@ extern "C" { ENTRY(YMM12) \ ENTRY(YMM13) \ ENTRY(YMM14) \ - ENTRY(YMM15) + ENTRY(YMM15) \ + ENTRY(YMM16) \ + ENTRY(YMM17) \ + ENTRY(YMM18) \ + ENTRY(YMM19) \ + ENTRY(YMM20) \ + ENTRY(YMM21) \ + ENTRY(YMM22) \ + ENTRY(YMM23) \ + ENTRY(YMM24) \ + ENTRY(YMM25) \ + ENTRY(YMM26) \ + ENTRY(YMM27) \ + ENTRY(YMM28) \ + ENTRY(YMM29) \ + ENTRY(YMM30) \ + ENTRY(YMM31) + +#define REGS_ZMM \ + ENTRY(ZMM0) \ + ENTRY(ZMM1) \ + ENTRY(ZMM2) \ + ENTRY(ZMM3) \ + ENTRY(ZMM4) \ + ENTRY(ZMM5) \ + ENTRY(ZMM6) \ + ENTRY(ZMM7) \ + ENTRY(ZMM8) \ + ENTRY(ZMM9) \ + ENTRY(ZMM10) \ + ENTRY(ZMM11) \ + ENTRY(ZMM12) \ + ENTRY(ZMM13) \ + ENTRY(ZMM14) \ + ENTRY(ZMM15) \ + ENTRY(ZMM16) \ + ENTRY(ZMM17) \ + ENTRY(ZMM18) \ + ENTRY(ZMM19) \ + ENTRY(ZMM20) \ + ENTRY(ZMM21) \ + ENTRY(ZMM22) \ + ENTRY(ZMM23) \ + ENTRY(ZMM24) \ + ENTRY(ZMM25) \ + ENTRY(ZMM26) \ + ENTRY(ZMM27) \ + ENTRY(ZMM28) \ + ENTRY(ZMM29) \ + ENTRY(ZMM30) \ + ENTRY(ZMM31) #define REGS_SEGMENT \ ENTRY(ES) \ @@ -285,6 +351,7 @@ extern "C" { REGS_MMX \ REGS_XMM \ REGS_YMM \ + REGS_ZMM \ REGS_SEGMENT \ REGS_DEBUG \ REGS_CONTROL \ @@ -319,6 +386,7 @@ typedef enum { ALL_EA_BASES REGS_XMM REGS_YMM + REGS_ZMM #undef ENTRY SIB_INDEX_max } SIBIndex; @@ -457,6 +525,8 @@ struct InternalInstruction { uint64_t necessaryPrefixLocation; /* The segment override type */ SegmentOverride segmentOverride; + /* 1 if the prefix byte, 0xf2 or 0xf3 is xacquire or xrelease */ + BOOL xAcquireRelease; /* Sizes of various critical pieces of data, in bytes */ uint8_t registerSize; diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h index 23dfe4b..d291441 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h @@ -116,8 +116,106 @@ enum attributeBits { ENUM_ENTRY(IC_VEX_L_XS, 4, "requires VEX and the L and XS prefix")\ ENUM_ENTRY(IC_VEX_L_XD, 4, "requires VEX and the L and XD prefix")\ ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize") \ - ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize") - + ENUM_ENTRY(IC_VEX_L_W, 3, "requires VEX, L and W") \ + ENUM_ENTRY(IC_VEX_L_W_XS, 4, "requires VEX, L, W and XS prefix") \ + ENUM_ENTRY(IC_VEX_L_W_XD, 4, "requires VEX, L, W and XD prefix") \ + ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 4, "requires VEX, L, W and OpSize") \ + ENUM_ENTRY(IC_EVEX, 1, "requires an EVEX prefix") \ + ENUM_ENTRY(IC_EVEX_XS, 2, "requires EVEX and the XS prefix") \ + ENUM_ENTRY(IC_EVEX_XD, 2, "requires EVEX and the XD prefix") \ + ENUM_ENTRY(IC_EVEX_OPSIZE, 2, "requires EVEX and the OpSize prefix") \ + ENUM_ENTRY(IC_EVEX_W, 3, "requires EVEX and the W prefix") \ + ENUM_ENTRY(IC_EVEX_W_XS, 4, "requires EVEX, W, and XS prefix") \ + ENUM_ENTRY(IC_EVEX_W_XD, 4, "requires EVEX, W, and XD prefix") \ + ENUM_ENTRY(IC_EVEX_W_OPSIZE, 4, "requires EVEX, W, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L, 3, "requires EVEX and the L prefix") \ + ENUM_ENTRY(IC_EVEX_L_XS, 4, "requires EVEX and the L and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L_XD, 4, "requires EVEX and the L and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L_OPSIZE, 4, "requires EVEX, L, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_W, 3, "requires EVEX, L and W") \ + ENUM_ENTRY(IC_EVEX_L_W_XS, 4, "requires EVEX, L, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_XD, 4, "requires EVEX, L, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_OPSIZE, 4, "requires EVEX, L, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2, 3, "requires EVEX and the L2 prefix") \ + ENUM_ENTRY(IC_EVEX_L2_XS, 4, "requires EVEX and the L2 and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L2_XD, 4, "requires EVEX and the L2 and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L2_OPSIZE, 4, "requires EVEX, L2, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_W, 3, "requires EVEX, L2 and W") \ + ENUM_ENTRY(IC_EVEX_L2_W_XS, 4, "requires EVEX, L2, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_XD, 4, "requires EVEX, L2, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE, 4, "requires EVEX, L2, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_K, 1, "requires an EVEX_K prefix") \ + ENUM_ENTRY(IC_EVEX_XS_K, 2, "requires EVEX_K and the XS prefix") \ + ENUM_ENTRY(IC_EVEX_XD_K, 2, "requires EVEX_K and the XD prefix") \ + ENUM_ENTRY(IC_EVEX_OPSIZE_K, 2, "requires EVEX_K and the OpSize prefix") \ + ENUM_ENTRY(IC_EVEX_W_K, 3, "requires EVEX_K and the W prefix") \ + ENUM_ENTRY(IC_EVEX_W_XS_K, 4, "requires EVEX_K, W, and XS prefix") \ + ENUM_ENTRY(IC_EVEX_W_XD_K, 4, "requires EVEX_K, W, and XD prefix") \ + ENUM_ENTRY(IC_EVEX_W_OPSIZE_K, 4, "requires EVEX_K, W, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_K, 3, "requires EVEX_K and the L prefix") \ + ENUM_ENTRY(IC_EVEX_L_XS_K, 4, "requires EVEX_K and the L and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L_XD_K, 4, "requires EVEX_K and the L and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L_OPSIZE_K, 4, "requires EVEX_K, L, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_W_K, 3, "requires EVEX_K, L and W") \ + ENUM_ENTRY(IC_EVEX_L_W_XS_K, 4, "requires EVEX_K, L, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_XD_K, 4, "requires EVEX_K, L, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K, 4, "requires EVEX_K, L, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_K, 3, "requires EVEX_K and the L2 prefix") \ + ENUM_ENTRY(IC_EVEX_L2_XS_K, 4, "requires EVEX_K and the L2 and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L2_XD_K, 4, "requires EVEX_K and the L2 and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K, 4, "requires EVEX_K, L2, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_W_K, 3, "requires EVEX_K, L2 and W") \ + ENUM_ENTRY(IC_EVEX_L2_W_XS_K, 4, "requires EVEX_K, L2, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_XD_K, 4, "requires EVEX_K, L2, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4, "requires EVEX_K, L2, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_B, 1, "requires an EVEX_B prefix") \ + ENUM_ENTRY(IC_EVEX_XS_B, 2, "requires EVEX_B and the XS prefix") \ + ENUM_ENTRY(IC_EVEX_XD_B, 2, "requires EVEX_B and the XD prefix") \ + ENUM_ENTRY(IC_EVEX_OPSIZE_B, 2, "requires EVEX_B and the OpSize prefix") \ + ENUM_ENTRY(IC_EVEX_W_B, 3, "requires EVEX_B and the W prefix") \ + ENUM_ENTRY(IC_EVEX_W_XS_B, 4, "requires EVEX_B, W, and XS prefix") \ + ENUM_ENTRY(IC_EVEX_W_XD_B, 4, "requires EVEX_B, W, and XD prefix") \ + ENUM_ENTRY(IC_EVEX_W_OPSIZE_B, 4, "requires EVEX_B, W, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_B, 3, "requires EVEX_B and the L prefix") \ + ENUM_ENTRY(IC_EVEX_L_XS_B, 4, "requires EVEX_B and the L and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L_XD_B, 4, "requires EVEX_B and the L and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L_OPSIZE_B, 4, "requires EVEX_B, L, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_W_B, 3, "requires EVEX_B, L and W") \ + ENUM_ENTRY(IC_EVEX_L_W_XS_B, 4, "requires EVEX_B, L, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_XD_B, 4, "requires EVEX_B, L, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B, 4, "requires EVEX_B, L, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_B, 3, "requires EVEX_B and the L2 prefix") \ + ENUM_ENTRY(IC_EVEX_L2_XS_B, 4, "requires EVEX_B and the L2 and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L2_XD_B, 4, "requires EVEX_B and the L2 and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B, 4, "requires EVEX_B, L2, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_W_B, 3, "requires EVEX_B, L2 and W") \ + ENUM_ENTRY(IC_EVEX_L2_W_XS_B, 4, "requires EVEX_B, L2, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_XD_B, 4, "requires EVEX_B, L2, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4, "requires EVEX_B, L2, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix") \ + ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix") \ + ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix") \ + ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2, "requires EVEX_B, EVEX_K and the OpSize prefix") \ + ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix") \ + ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \ + ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \ + ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, W, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix") \ + ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W") \ + ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, W and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix") \ + ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XS prefix")\ + ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XD prefix")\ + ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, and OpSize") \ + ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W") \ + ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XS prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XD prefix") \ + ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and OpSize") #define ENUM_ENTRY(n, r, d) n, typedef enum { @@ -224,6 +322,7 @@ struct ContextDecision { ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \ ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \ ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \ + ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \ ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \ ENUM_ENTRY(ENCODING_CW, "2-byte") \ ENUM_ENTRY(ENCODING_CD, "4-byte") \ @@ -321,6 +420,9 @@ struct ContextDecision { ENUM_ENTRY(TYPE_XMM64, "8-byte") \ ENUM_ENTRY(TYPE_XMM128, "16-byte") \ ENUM_ENTRY(TYPE_XMM256, "32-byte") \ + ENUM_ENTRY(TYPE_XMM512, "64-byte") \ + ENUM_ENTRY(TYPE_VK8, "8-bit") \ + ENUM_ENTRY(TYPE_VK16, "16-bit") \ ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \ ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \ ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \ diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp index e357710..b9d0082 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp @@ -50,7 +50,7 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, // Try to print any aliases first. if (!printAliasInstr(MI, OS)) printInstruction(MI, OS); - + // Next always print the annotation. printAnnotation(OS, Annot); @@ -139,8 +139,7 @@ void X86ATTInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr()); int64_t Address; if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) { - O << "0x"; - O.write_hex(Address); + O << formatHex((uint64_t)Address); } else { // Otherwise, just print the expression. @@ -159,10 +158,10 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, O << markup("<imm:") << '$' << formatImm((int64_t)Op.getImm()) << markup(">"); - + if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256)) *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm()); - + } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); O << markup("<imm:") @@ -177,7 +176,7 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op, const MCOperand &IndexReg = MI->getOperand(Op+2); const MCOperand &DispSpec = MI->getOperand(Op+3); const MCOperand &SegReg = MI->getOperand(Op+4); - + O << markup("<mem:"); // If this has a segment register, print it. @@ -185,7 +184,7 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op, printOperand(MI, Op+4, O); O << ':'; } - + if (DispSpec.isImm()) { int64_t DispVal = DispSpec.getImm(); if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) @@ -194,21 +193,21 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op, assert(DispSpec.isExpr() && "non-immediate displacement for LEA?"); O << *DispSpec.getExpr(); } - + if (IndexReg.getReg() || BaseReg.getReg()) { O << '('; if (BaseReg.getReg()) printOperand(MI, Op, O); - + if (IndexReg.getReg()) { O << ','; printOperand(MI, Op+2, O); unsigned ScaleVal = MI->getOperand(Op+1).getImm(); if (ScaleVal != 1) { O << ',' - << markup("<imm:") + << markup("<imm:") << ScaleVal // never printed in hex. - << markup(">"); + << markup(">"); } } O << ')'; diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h index 8e09183..8d05256 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h @@ -65,6 +65,9 @@ public: void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); } + void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + printMemReference(MI, OpNo, O); + } void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); } @@ -80,6 +83,9 @@ public: void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); } + void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + printMemReference(MI, OpNo, O); + } }; } diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp index 141f4a4..9dfc9a9 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp @@ -119,7 +119,7 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) - O << Op.getImm(); + O << formatImm(Op.getImm()); else { assert(Op.isExpr() && "unknown pcrel immediate operand"); // If a symbolic branch target was added as a constant expression then print @@ -127,8 +127,7 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr()); int64_t Address; if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) { - O << "0x"; - O.write_hex(Address); + O << formatHex((uint64_t)Address); } else { // Otherwise, just print the expression. @@ -137,18 +136,13 @@ void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, } } -static void PrintRegName(raw_ostream &O, StringRef RegName) { - for (unsigned i = 0, e = RegName.size(); i != e; ++i) - O << (char)toupper(RegName[i]); -} - void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { - PrintRegName(O, getRegisterName(Op.getReg())); + printRegName(O, Op.getReg()); } else if (Op.isImm()) { - O << Op.getImm(); + O << formatImm((int64_t)Op.getImm()); } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); O << *Op.getExpr(); @@ -200,7 +194,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op, DispVal = -DispVal; } } - O << DispVal; + O << formatImm(DispVal); } } diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h index bb769eb..45beeda 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h @@ -41,52 +41,60 @@ public: void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "OPAQUE PTR "; + O << "opaque ptr "; printMemReference(MI, OpNo, O); } void printi8mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "BYTE PTR "; + O << "byte ptr "; printMemReference(MI, OpNo, O); } void printi16mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "WORD PTR "; + O << "word ptr "; printMemReference(MI, OpNo, O); } void printi32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "DWORD PTR "; + O << "dword ptr "; printMemReference(MI, OpNo, O); } void printi64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "QWORD PTR "; + O << "qword ptr "; printMemReference(MI, OpNo, O); } void printi128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "XMMWORD PTR "; + O << "xmmword ptr "; printMemReference(MI, OpNo, O); } void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "YMMWORD PTR "; + O << "ymmword ptr "; + printMemReference(MI, OpNo, O); + } + void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + O << "zmmword ptr "; printMemReference(MI, OpNo, O); } void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "DWORD PTR "; + O << "dword ptr "; printMemReference(MI, OpNo, O); } void printf64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "QWORD PTR "; + O << "qword ptr "; printMemReference(MI, OpNo, O); } void printf80mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "XWORD PTR "; + O << "xword ptr "; printMemReference(MI, OpNo, O); } void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "XMMWORD PTR "; + O << "xmmword ptr "; printMemReference(MI, OpNo, O); } void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - O << "YMMWORD PTR "; + O << "ymmword ptr "; + printMemReference(MI, OpNo, O); + } + void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + O << "zmmword ptr "; printMemReference(MI, OpNo, O); } }; diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index d8f7278..25d1af3 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -462,20 +462,54 @@ namespace X86II { // prefix. Usually used for scalar instructions. Needed by disassembler. VEX_LIG = 1U << 6, + // TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field + // with following encoding: + // - 00 V128 + // - 01 V256 + // - 10 V512 + // - 11 LIG (but, in insn encoding, leave VEX.L and EVEX.L in zeros. + // this will save 1 tsflag bit + + // VEX_EVEX - Specifies that this instruction use EVEX form which provides + // syntax support up to 32 512-bit register operands and up to 7 16-bit + // mask operands as well as source operand data swizzling/memory operand + // conversion, eviction hint, and rounding mode. + EVEX = 1U << 7, + + // EVEX_K - Set if this instruction requires masking + EVEX_K = 1U << 8, + + // EVEX_Z - Set if this instruction has EVEX.Z field set. + EVEX_Z = 1U << 9, + + // EVEX_L2 - Set if this instruction has EVEX.L' field set. + EVEX_L2 = 1U << 10, + + // EVEX_B - Set if this instruction has EVEX.B field set. + EVEX_B = 1U << 11, + + // EVEX_CD8E - compressed disp8 form, element-size + EVEX_CD8EShift = VEXShift + 12, + EVEX_CD8EMask = 3, + + // EVEX_CD8V - compressed disp8 form, vector-width + EVEX_CD8VShift = EVEX_CD8EShift + 2, + EVEX_CD8VMask = 7, + /// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the /// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents /// this as having a 0x0F prefix with a 0x0F opcode, and each instruction /// storing a classifier in the imm8 field. To simplify our implementation, /// we handle this by storeing the classifier in the opcode field and using /// this flag to indicate that the encoder should do the wacky 3DNow! thing. - Has3DNow0F0FOpcode = 1U << 7, + Has3DNow0F0FOpcode = 1U << 17, /// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in /// ModRM or I8IMM. This is used for FMA4 and XOP instructions. - MemOp4 = 1U << 8, + MemOp4 = 1U << 18, /// XOP - Opcode prefix used by XOP instructions. - XOP = 1U << 9 + XOP = 1U << 19 }; @@ -533,12 +567,19 @@ namespace X86II { unsigned CurOp = 0; if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0) ++CurOp; - else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) { - assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1); + else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && + Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1) + // Special case for AVX-512 GATHER with 2 TIED_TO operands + // Skip the first 2 operands: dst, mask_wb + CurOp += 2; + else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && + Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1) // Special case for GATHER with 2 TIED_TO operands // Skip the first 2 operands: dst, mask_wb CurOp += 2; - } + else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0) + // SCATTER + ++CurOp; return CurOp; } @@ -569,12 +610,15 @@ namespace X86II { case X86II::MRMSrcMem: { bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; + bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; + bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); unsigned FirstMemOp = 1; if (HasVEX_4V) ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV). if (HasMemOp4) ++FirstMemOp;// Skip the register source (which is encoded in I8IMM). - + if (HasEVEX_K) + ++FirstMemOp;// Skip the mask register // FIXME: Maybe lea should have its own form? This is a horrible hack. //if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r || // Opcode == X86::LEA16r || Opcode == X86::LEA32r) @@ -611,6 +655,14 @@ namespace X86II { /// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or /// higher) register? e.g. r8, xmm8, xmm13, etc. inline bool isX86_64ExtendedReg(unsigned RegNo) { + if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) || + (RegNo > X86::XMM23 && RegNo <= X86::XMM31) || + (RegNo > X86::YMM7 && RegNo <= X86::YMM15) || + (RegNo > X86::YMM23 && RegNo <= X86::YMM31) || + (RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) || + (RegNo > X86::ZMM23 && RegNo <= X86::ZMM31)) + return true; + switch (RegNo) { default: break; case X86::R8: case X86::R9: case X86::R10: case X86::R11: @@ -621,16 +673,21 @@ namespace X86II { case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W: case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B: case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B: - case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: - case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: - case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11: - case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15: case X86::CR8: case X86::CR9: case X86::CR10: case X86::CR11: case X86::CR12: case X86::CR13: case X86::CR14: case X86::CR15: return true; } return false; } + + /// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher) + /// registers? e.g. zmm21, etc. + static inline bool is32ExtendedReg(unsigned RegNo) { + return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) || + (RegNo > X86::YMM15 && RegNo <= X86::YMM31) || + (RegNo > X86::ZMM15 && RegNo <= X86::ZMM31)); + } + inline bool isX86_64NonExtLowByteReg(unsigned reg) { return (reg == X86::SPL || reg == X86::BPL || diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp index de80dd8..b400b87 100644 --- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp @@ -101,7 +101,18 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target, } else { switch ((unsigned)Fixup.getKind()) { default: llvm_unreachable("invalid fixup kind!"); - case FK_Data_8: Type = ELF::R_X86_64_64; break; + case FK_Data_8: + switch (Modifier) { + default: + llvm_unreachable("Unimplemented"); + case MCSymbolRefExpr::VK_None: + Type = ELF::R_X86_64_64; + break; + case MCSymbolRefExpr::VK_DTPOFF: + Type = ELF::R_X86_64_DTPOFF64; + break; + } + break; case X86::reloc_signed_4byte: switch (Modifier) { default: diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 016af71..8515879 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -53,7 +53,7 @@ public: } unsigned GetX86RegNum(const MCOperand &MO) const { - return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()) & 0x7; + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; } // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range @@ -77,6 +77,14 @@ public: return (~SrcRegNum) & 0xf; } + unsigned char getWriteMaskRegisterEncoding(const MCInst &MI, + unsigned OpNum) const { + assert(X86::K0 != MI.getOperand(OpNum).getReg() && + "Invalid mask register as write-mask!"); + unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum)); + return MaskRegNum; + } + void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const { OS << (char)C; ++CurByte; @@ -152,6 +160,52 @@ static bool isDisp8(int Value) { return Value == (signed char)Value; } +/// isCDisp8 - Return true if this signed displacement fits in a 8-bit +/// compressed dispacement field. +static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) { + assert(((TSFlags >> X86II::VEXShift) & X86II::EVEX) && + "Compressed 8-bit displacement is only valid for EVEX inst."); + + unsigned CD8E = (TSFlags >> X86II::EVEX_CD8EShift) & X86II::EVEX_CD8EMask; + unsigned CD8V = (TSFlags >> X86II::EVEX_CD8VShift) & X86II::EVEX_CD8VMask; + + if (CD8V == 0 && CD8E == 0) { + CValue = Value; + return isDisp8(Value); + } + + unsigned MemObjSize = 1U << CD8E; + if (CD8V & 4) { + // Fixed vector length + MemObjSize *= 1U << (CD8V & 0x3); + } else { + // Modified vector length + bool EVEX_b = (TSFlags >> X86II::VEXShift) & X86II::EVEX_B; + if (!EVEX_b) { + unsigned EVEX_LL = ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) ? 1 : 0; + EVEX_LL += ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2) ? 2 : 0; + assert(EVEX_LL < 3 && ""); + + unsigned NumElems = (1U << (EVEX_LL + 4)) / MemObjSize; + NumElems /= 1U << (CD8V & 0x3); + + MemObjSize *= NumElems; + } + } + + unsigned MemObjMask = MemObjSize - 1; + assert((MemObjSize & MemObjMask) == 0 && "Invalid memory object size."); + + if (Value & MemObjMask) // Unaligned offset + return false; + Value /= MemObjSize; + bool Ret = (Value == (signed char)Value); + + if (Ret) + CValue = Value; + return Ret; +} + /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate /// in an instruction with the specified TSFlags. static MCFixupKind getImmFixupKind(uint64_t TSFlags) { @@ -318,6 +372,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt); const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); unsigned BaseReg = Base.getReg(); + bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; // Handle %rip relative addressing. if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode @@ -378,10 +433,21 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, } // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. - if (Disp.isImm() && isDisp8(Disp.getImm())) { - EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); - EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); - return; + if (Disp.isImm()) { + if (!HasEVEX && isDisp8(Disp.getImm())) { + EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); + EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); + return; + } + // Try EVEX compressed 8-bit displacement first; if failed, fall back to + // 32-bit displacement. + int CDisp8 = 0; + if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { + EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); + EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, + CDisp8 - Disp.getImm()); + return; + } } // Otherwise, emit the most general non-SIB encoding: [REG+disp32] @@ -397,6 +463,8 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, bool ForceDisp32 = false; bool ForceDisp8 = false; + int CDisp8 = 0; + int ImmOffset = 0; if (BaseReg == 0) { // If there is no base register, we emit the special case SIB byte with // MOD=0, BASE=5, to JUST get the index, scale, and displacement. @@ -412,10 +480,15 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, BaseRegNo != N86::EBP) { // Emit no displacement ModR/M byte EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); - } else if (isDisp8(Disp.getImm())) { + } else if (!HasEVEX && isDisp8(Disp.getImm())) { // Emit the disp8 encoding. EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP + } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { + // Emit the disp8 encoding. + EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); + ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP + ImmOffset = CDisp8 - Disp.getImm(); } else { // Emit the normal disp32 encoding. EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); @@ -445,7 +518,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, // Do we need to output a displacement? if (ForceDisp8) - EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); + EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset); else if (ForceDisp32 || Disp.getImm() != 0) EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS, Fixups); @@ -457,6 +530,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, const MCInst &MI, const MCInstrDesc &Desc, raw_ostream &OS) const { + bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; + bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; @@ -468,6 +543,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // 0: Same as REX_R=1 (64 bit mode only) // unsigned char VEX_R = 0x1; + unsigned char EVEX_R2 = 0x1; // VEX_X: equivalent to REX.X, only used when a // register is used for index in SIB Byte. @@ -504,6 +580,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // VEX_4V (VEX vvvv field): a register specifier // (in 1's complement form) or 1111 if unused. unsigned char VEX_4V = 0xf; + unsigned char EVEX_V2 = 0x1; // VEX_L (Vector Length): // @@ -511,6 +588,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // 1: 256-bit vector // unsigned char VEX_L = 0; + unsigned char EVEX_L2 = 0; // VEX_PP: opcode extension providing equivalent // functionality of a SIMD prefix @@ -522,6 +600,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // unsigned char VEX_PP = 0; + // EVEX_U + unsigned char EVEX_U = 1; // Always '1' so far + + // EVEX_z + unsigned char EVEX_z = 0; + + // EVEX_b + unsigned char EVEX_b = 0; + + // EVEX_aaa + unsigned char EVEX_aaa = 0; + // Encode the operand size opcode prefix as needed. if (TSFlags & X86II::OpSize) VEX_PP = 0x01; @@ -534,6 +624,14 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) VEX_L = 1; + if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2)) + EVEX_L2 = 1; + + if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z)) + EVEX_z = 1; + + if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B)) + EVEX_b = 1; switch (TSFlags & X86II::Op0Mask) { default: llvm_unreachable("Invalid prefix!"); @@ -580,12 +678,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, unsigned CurOp = 0; if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0) ++CurOp; - else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) { - assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1); + else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && + Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1) + // Special case for AVX-512 GATHER with 2 TIED_TO operands + // Skip the first 2 operands: dst, mask_wb + CurOp += 2; + else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && + Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1) // Special case for GATHER with 2 TIED_TO operands // Skip the first 2 operands: dst, mask_wb CurOp += 2; - } + else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0) + // SCATTER + ++CurOp; switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!"); @@ -595,18 +700,35 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // MemAddr, src1(VEX_4V), src2(ModR/M) // MemAddr, src1(ModR/M), imm8 // - if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg())) + if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + + X86::AddrBaseReg).getReg())) VEX_B = 0x0; - if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg())) + if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + + X86::AddrIndexReg).getReg())) VEX_X = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand + + X86::AddrIndexReg).getReg())) + EVEX_V2 = 0x0; + + CurOp += X86::AddrNumOperands; - CurOp = X86::AddrNumOperands; - if (HasVEX_4V) - VEX_4V = getVEXRegisterEncoding(MI, CurOp++); + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); + + if (HasVEX_4V) { + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + } const MCOperand &MO = MI.getOperand(CurOp); - if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) - VEX_R = 0x0; + if (MO.isReg()) { + if (X86II::isX86_64ExtendedReg(MO.getReg())) + VEX_R = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MO.getReg())) + EVEX_R2 = 0x0; + } break; } case X86II::MRMSrcMem: @@ -619,11 +741,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // FMA4: // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), - if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp++).getReg())) + if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_R2 = 0x0; + CurOp++; + + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); - if (HasVEX_4V) + if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + } if (X86II::isX86_64ExtendedReg( MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) @@ -631,6 +763,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (X86II::isX86_64ExtendedReg( MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) VEX_X = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand + + X86::AddrIndexReg).getReg())) + EVEX_V2 = 0x0; if (HasVEX_4VOp3) // Instruction format for 4VOp3: @@ -647,8 +782,15 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // MRM[0-9]m instructions forms: // MemAddr // src1(VEX_4V), MemAddr - if (HasVEX_4V) - VEX_4V = getVEXRegisterEncoding(MI, 0); + if (HasVEX_4V) { + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + } + CurOp++; + + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); if (X86II::isX86_64ExtendedReg( MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) @@ -669,16 +811,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_R2 = 0x0; CurOp++; - if (HasVEX_4V) - VEX_4V = getVEXRegisterEncoding(MI, CurOp++); + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); + + if (HasVEX_4V) { + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + } if (HasMemOp4) // Skip second register source (encoded in I8IMM) CurOp++; if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + VEX_X = 0x0; CurOp++; if (HasVEX_4VOp3) VEX_4V = getVEXRegisterEncoding(MI, CurOp); @@ -690,13 +843,24 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(ModR/M), src1(VEX_4V), src2(ModR/M) if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + VEX_X = 0x0; CurOp++; - if (HasVEX_4V) - VEX_4V = getVEXRegisterEncoding(MI, CurOp++); + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); + + if (HasVEX_4V) { + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + } if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_R2 = 0x0; break; case X86II::MRM0r: case X86II::MRM1r: case X86II::MRM2r: case X86II::MRM3r: @@ -704,9 +868,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, case X86II::MRM6r: case X86II::MRM7r: // MRM0r-MRM7r instructions forms: // dst(VEX_4V), src(ModR/M), imm8 - VEX_4V = getVEXRegisterEncoding(MI, 0); - if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg())) + VEX_4V = getVEXRegisterEncoding(MI, CurOp); + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + EVEX_V2 = 0x0; + CurOp++; + + if (HasEVEX_K) + EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); + + if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; + if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + VEX_X = 0x0; break; default: // RawFrm break; @@ -715,29 +888,58 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // Emit segment override opcode prefix as needed. EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS); - // VEX opcode prefix can have 2 or 3 bytes - // - // 3 bytes: - // +-----+ +--------------+ +-------------------+ - // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | - // +-----+ +--------------+ +-------------------+ - // 2 bytes: - // +-----+ +-------------------+ - // | C5h | | R | vvvv | L | pp | - // +-----+ +-------------------+ - // - unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); + if (!HasEVEX) { + // VEX opcode prefix can have 2 or 3 bytes + // + // 3 bytes: + // +-----+ +--------------+ +-------------------+ + // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | + // +-----+ +--------------+ +-------------------+ + // 2 bytes: + // +-----+ +-------------------+ + // | C5h | | R | vvvv | L | pp | + // +-----+ +-------------------+ + // + unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); - if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix - EmitByte(0xC5, CurByte, OS); - EmitByte(LastByte | (VEX_R << 7), CurByte, OS); - return; - } + if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix + EmitByte(0xC5, CurByte, OS); + EmitByte(LastByte | (VEX_R << 7), CurByte, OS); + return; + } - // 3 byte VEX prefix - EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS); - EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); - EmitByte(LastByte | (VEX_W << 7), CurByte, OS); + // 3 byte VEX prefix + EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS); + EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); + EmitByte(LastByte | (VEX_W << 7), CurByte, OS); + } else { + // EVEX opcode prefix can have 4 bytes + // + // +-----+ +--------------+ +-------------------+ +------------------------+ + // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | + // +-----+ +--------------+ +-------------------+ +------------------------+ + assert((VEX_5M & 0x3) == VEX_5M + && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"); + + VEX_5M &= 0x3; + + EmitByte(0x62, CurByte, OS); + EmitByte((VEX_R << 7) | + (VEX_X << 6) | + (VEX_B << 5) | + (EVEX_R2 << 4) | + VEX_5M, CurByte, OS); + EmitByte((VEX_W << 7) | + (VEX_4V << 3) | + (EVEX_U << 2) | + VEX_PP, CurByte, OS); + EmitByte((EVEX_z << 7) | + (EVEX_L2 << 6) | + (VEX_L << 5) | + (EVEX_b << 4) | + (EVEX_V2 << 3) | + EVEX_aaa, CurByte, OS); + } } /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64 @@ -1007,6 +1209,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; const unsigned MemOp4_I8IMMOperand = 2; + // It uses the EVEX.aaa field? + bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; + bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); + // Determine where the memory operand starts, if present. int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); if (MemoryOperand != -1) MemoryOperand += CurOp; @@ -1057,6 +1263,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, EmitByte(BaseOpcode, CurByte, OS); SrcRegNum = CurOp + 1; + if (HasEVEX_K) // Skip writemask + SrcRegNum++; + if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) ++SrcRegNum; @@ -1069,6 +1278,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, EmitByte(BaseOpcode, CurByte, OS); SrcRegNum = CurOp + X86::AddrNumOperands; + if (HasEVEX_K) // Skip writemask + SrcRegNum++; + if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) ++SrcRegNum; @@ -1082,6 +1294,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, EmitByte(BaseOpcode, CurByte, OS); SrcRegNum = CurOp + 1; + if (HasEVEX_K) // Skip writemask + SrcRegNum++; + if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) ++SrcRegNum; @@ -1100,6 +1315,12 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::MRMSrcMem: { int AddrOperands = X86::AddrNumOperands; unsigned FirstMemOp = CurOp+1; + + if (HasEVEX_K) { // Skip writemask + ++AddrOperands; + ++FirstMemOp; + } + if (HasVEX_4V) { ++AddrOperands; ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt index 496b704..adfa7fa 100644 --- a/lib/Target/X86/README-SSE.txt +++ b/lib/Target/X86/README-SSE.txt @@ -517,37 +517,6 @@ to <2 x i64> ops being so bad. //===---------------------------------------------------------------------===// -'select' on vectors and scalars could be a whole lot better. We currently -lower them to conditional branches. On x86-64 for example, we compile this: - -double test(double a, double b, double c, double d) { return a<b ? c : d; } - -to: - -_test: - ucomisd %xmm0, %xmm1 - ja LBB1_2 # entry -LBB1_1: # entry - movapd %xmm3, %xmm2 -LBB1_2: # entry - movapd %xmm2, %xmm0 - ret - -instead of: - -_test: - cmpltsd %xmm1, %xmm0 - andpd %xmm0, %xmm2 - andnpd %xmm3, %xmm0 - orpd %xmm2, %xmm0 - ret - -For unpredictable branches, the later is much more efficient. This should -just be a matter of having scalar sse map to SELECT_CC and custom expanding -or iseling it. - -//===---------------------------------------------------------------------===// - LLVM currently generates stack realignment code, when it is not necessary needed. The problem is that we need to know about stack alignment too early, before RA runs. diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index c865500..461ea9b 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -86,6 +86,19 @@ def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX", def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2", "Enable AVX2 instructions", [FeatureAVX]>; +def FeatureAVX512 : SubtargetFeature<"avx-512", "X86SSELevel", "AVX512", + "Enable AVX-512 instructions", + [FeatureAVX2]>; +def FeatureERI : SubtargetFeature<"avx-512-eri", "HasERI", "true", + "Enable AVX-512 Exponential and Reciprocal Instructions", + [FeatureAVX512]>; +def FeatureCDI : SubtargetFeature<"avx-512-cdi", "HasCDI", "true", + "Enable AVX-512 Conflict Detection Instructions", + [FeatureAVX512]>; +def FeaturePFI : SubtargetFeature<"avx-512-pfi", "HasPFI", "true", + "Enable AVX-512 PreFetch Instructions", + [FeatureAVX512]>; + def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true", "Enable packed carry-less multiplication instructions", [FeatureSSE2]>; @@ -227,6 +240,15 @@ def : ProcessorModel<"core-avx2", HaswellModel, FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE]>; +// KNL +// FIXME: define KNL model +def : ProcessorModel<"knl", HaswellModel, + [FeatureAVX512, FeatureERI, FeatureCDI, FeaturePFI, + FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT, + FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C, + FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI, + FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE]>; + def : Proc<"k6", [FeatureMMX]>; def : Proc<"k6-2", [Feature3DNow]>; def : Proc<"k6-3", [Feature3DNow]>; diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index 6b228b0..9e0ab82 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -702,48 +702,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) { } } -MachineLocation -X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { - MachineLocation Location; - assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!"); - // Frame address. Currently handles register +- offset only. - - if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm()) - Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm()); - else { - DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n"); - } - return Location; -} - -void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI, - raw_ostream &O) { - // Only the target-dependent form of DBG_VALUE should get here. - // Referencing the offset and metadata as NOps-2 and NOps-1 is - // probably portable to other targets; frame pointer location is not. - unsigned NOps = MI->getNumOperands(); - assert(NOps==7); - O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - if (V.getContext().isSubprogram()) - O << DISubprogram(V.getContext()).getDisplayName() << ":"; - O << V.getName(); - O << " <- "; - // Frame address. Currently handles register +- offset only. - O << '['; - if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg()) - printOperand(MI, 0, O); - else - O << "undef"; - O << '+'; printOperand(MI, 3, O); - O << ']'; - O << "+"; - printOperand(MI, NOps-2, O); -} - - - //===----------------------------------------------------------------------===// // Target Registry Stuff //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h index bc7496b..6eed5ce 100644 --- a/lib/Target/X86/X86AsmPrinter.h +++ b/lib/Target/X86/X86AsmPrinter.h @@ -67,11 +67,6 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter { unsigned AsmVariant = 1); virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE; - - void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - - virtual MachineLocation - getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE; }; } // end namespace llvm diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index 9eafbd5..38e2591 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -49,6 +49,12 @@ def RetCC_X86Common : CallingConv<[ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, + // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3 + // can only be used by ABI non-compliant code. This vector type is only + // supported while using the AVX-512 target feature. + CCIfType<[v16i32, v8i64, v16f32, v8f64], + CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>, + // MMX vector types are always returned in MM0. If the target doesn't have // MM0, it doesn't support these vector types. CCIfType<[x86mmx], CCAssignToReg<[MM0]>>, @@ -99,6 +105,10 @@ def RetCC_Intel_OCL_BI : CallingConv<[ CCIfType<[v8f32, v4f64, v8i32, v4i64], CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, + // 512-bit FP vectors + CCIfType<[v16f32, v8f64, v16i32, v8i64], + CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>, + // i32, i64 in the standard way CCDelegateTo<RetCC_X86Common> ]>; @@ -156,6 +166,11 @@ def RetCC_X86_32 : CallingConv<[ def RetCC_X86_64 : CallingConv<[ // HiPE uses RetCC_X86_64_HiPE CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>, + + // Handle explicit CC selection + CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>, + CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>, + // Mingw64 and native Win64 use Win64 CC CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>, @@ -208,10 +223,15 @@ def CC_X86_64_C : CallingConv<[ // fixed arguments to vararg functions are supposed to be passed in // registers. Actually modeling that would be a lot of work, though. CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], - CCIfSubtarget<"hasAVX()", + CCIfSubtarget<"hasFp256()", CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7]>>>>, + // The first 8 512-bit vector arguments are passed in ZMM registers. + CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64], + CCIfSubtarget<"hasAVX512()", + CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>, + // Integer/FP values get stored in stack slots that are 8 bytes in size and // 8-byte aligned if there are no more registers to hold them. CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, @@ -225,7 +245,11 @@ def CC_X86_64_C : CallingConv<[ // 256-bit vectors get 32-byte stack slots that are 32-byte aligned. CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], - CCAssignToStack<32, 32>> + CCAssignToStack<32, 32>>, + + // 512-bit vectors get 64-byte stack slots that are 64-byte aligned. + CCIfType<[v16i32, v8i64, v16f32, v8f64], + CCAssignToStack<64, 64>> ]>; // Calling convention used on Win64 @@ -246,6 +270,9 @@ def CC_X86_Win64_C : CallingConv<[ // 256 bit vectors are passed by pointer CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>, + // 512 bit vectors are passed by pointer + CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, + // The first 4 MMX vector arguments are passed in GPRs. CCIfType<[x86mmx], CCBitConvertToType<i64>>, @@ -340,7 +367,7 @@ def CC_X86_32_Common : CallingConv<[ // The first 4 AVX 256-bit vector arguments are passed in YMM registers. CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], - CCIfSubtarget<"hasAVX()", + CCIfSubtarget<"hasFp256()", CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>, // Other SSE vectors get 16-byte stack slots that are 16-byte aligned. @@ -464,6 +491,10 @@ def CC_Intel_OCL_BI : CallingConv<[ CCIfType<[v8f32, v4f64, v8i32, v4i64], CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>, + // The 512-bit vector arguments are passed in ZMM registers. + CCIfType<[v16f32, v8f64, v16i32, v8i64], + CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>, + CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>, CCDelegateTo<CC_X86_32_C> @@ -489,6 +520,8 @@ def CC_X86_32 : CallingConv<[ def CC_X86_64 : CallingConv<[ CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>, CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>, + CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>, + CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>, // Mingw64 and native Win64 use Win64 CC CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, @@ -528,6 +561,10 @@ def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15, (sequence "YMM%u", 6, 15))>; +def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, + R12, R13, R14, R15, + (sequence "ZMM%u", 6, 21), + K4, K5, K6, K7)>; //Standard C + XMM 8-15 def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64, (sequence "XMM%u", 8, 15))>; @@ -535,3 +572,7 @@ def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64, //Standard C + YMM 8-15 def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64, (sequence "YMM%u", 8, 15))>; + +def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add CSR_64, + (sequence "ZMM%u", 16, 31), + K4, K5, K6, K7)>; diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 295a577..5bc3420 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -79,8 +79,10 @@ private: bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR); - bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM); - bool X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM); + bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM, + bool Aligned = false); + bool X86FastEmitStore(EVT VT, unsigned ValReg, const X86AddressMode &AM, + bool Aligned = false); bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg); @@ -233,7 +235,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, /// and a displacement offset, or a GlobalAddress, /// i.e. V. Return true if it is possible. bool -X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { +X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, + const X86AddressMode &AM, bool Aligned) { // Get opcode and regclass of the output for the given store instruction. unsigned Opc = 0; switch (VT.getSimpleVT().SimpleTy) { @@ -243,8 +246,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { // Mask out all but lowest bit. unsigned AndResult = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); - Val = AndResult; + TII.get(X86::AND8ri), AndResult).addReg(ValReg).addImm(1); + ValReg = AndResult; } // FALLTHROUGH, handling i1 as i8. case MVT::i8: Opc = X86::MOV8mr; break; @@ -260,26 +263,35 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; break; case MVT::v4f32: - Opc = X86::MOVAPSmr; + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr; break; case MVT::v2f64: - Opc = X86::MOVAPDmr; + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr; break; case MVT::v4i32: case MVT::v2i64: case MVT::v8i16: case MVT::v16i8: - Opc = X86::MOVDQAmr; + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr; + else + Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr; break; } addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc)), AM).addReg(Val); + DL, TII.get(Opc)), AM).addReg(ValReg); return true; } bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, - const X86AddressMode &AM) { + const X86AddressMode &AM, bool Aligned) { // Handle 'null' like i32/i64 0. if (isa<ConstantPointerNull>(Val)) Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext())); @@ -314,7 +326,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, if (ValReg == 0) return false; - return X86FastEmitStore(VT, ValReg, AM); + return X86FastEmitStore(VT, ValReg, AM, Aligned); } /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of @@ -688,6 +700,10 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { if (S->isAtomic()) return false; + unsigned SABIAlignment = + TD.getABITypeAlignment(S->getValueOperand()->getType()); + bool Aligned = S->getAlignment() == 0 || S->getAlignment() >= SABIAlignment; + MVT VT; if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true)) return false; @@ -696,7 +712,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { if (!X86SelectAddress(I->getOperand(1), AM)) return false; - return X86FastEmitStore(VT, I->getOperand(0), AM); + return X86FastEmitStore(VT, I->getOperand(0), AM, Aligned); } /// X86SelectRet - Select and emit code to implement ret instructions. @@ -712,10 +728,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { CallingConv::ID CC = F.getCallingConv(); if (CC != CallingConv::C && CC != CallingConv::Fast && - CC != CallingConv::X86_FastCall) + CC != CallingConv::X86_FastCall && + CC != CallingConv::X86_64_SysV) return false; - if (Subtarget->isTargetWin64()) + if (Subtarget->isCallingConvWin64(CC)) return false; // Don't handle popping bytes on return for now. @@ -1376,10 +1393,37 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { // Generate the DIV/IDIV instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); - // Copy output register into result register. - unsigned ResultReg = createResultReg(TypeEntry.RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Copy), ResultReg).addReg(OpEntry.DivRemResultReg); + // For i8 remainder, we can't reference AH directly, as we'll end + // up with bogus copies like %R9B = COPY %AH. Reference AX + // instead to prevent AH references in a REX instruction. + // + // The current assumption of the fast register allocator is that isel + // won't generate explicit references to the GPR8_NOREX registers. If + // the allocator and/or the backend get enhanced to be more robust in + // that regard, this can be, and should be, removed. + unsigned ResultReg = 0; + if ((I->getOpcode() == Instruction::SRem || + I->getOpcode() == Instruction::URem) && + OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) { + unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass); + unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Copy), SourceSuperReg).addReg(X86::AX); + + // Shift AX right by 8 bits instead of using AH. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SHR16ri), + ResultSuperReg).addReg(SourceSuperReg).addImm(8); + + // Now reference the 8-bit subreg of the result. + ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, + /*Kill=*/true, X86::sub_8bit); + } + // Copy the result out of the physreg if we haven't already. + if (!ResultReg) { + ResultReg = createResultReg(TypeEntry.RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Copy), ResultReg) + .addReg(OpEntry.DivRemResultReg); + } UpdateValueMap(I, ResultReg); return true; @@ -1678,9 +1722,6 @@ bool X86FastISel::FastLowerArguments() { if (!FuncInfo.CanLowerReturn) return false; - if (Subtarget->isTargetWin64()) - return false; - const Function *F = FuncInfo.Fn; if (F->isVarArg()) return false; @@ -1688,7 +1729,10 @@ bool X86FastISel::FastLowerArguments() { CallingConv::ID CC = F->getCallingConv(); if (CC != CallingConv::C) return false; - + + if (Subtarget->isCallingConvWin64(CC)) + return false; + if (!Subtarget->is64Bit()) return false; @@ -1732,8 +1776,6 @@ bool X86FastISel::FastLowerArguments() { const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64); for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++Idx) { - if (I->use_empty()) - continue; bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32; const TargetRegisterClass *RC = is32Bit ? RC32 : RC64; unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx]; @@ -1792,8 +1834,10 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Handle only C and fastcc calling conventions for now. ImmutableCallSite CS(CI); CallingConv::ID CC = CS.getCallingConv(); + bool isWin64 = Subtarget->isCallingConvWin64(CC); if (CC != CallingConv::C && CC != CallingConv::Fast && - CC != CallingConv::X86_FastCall) + CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 && + CC != CallingConv::X86_64_SysV) return false; // fastcc with -tailcallopt is intended to provide a guaranteed @@ -1807,7 +1851,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { // Don't know how to handle Win64 varargs yet. Nothing special needed for // x86-32. Special handling for x86-64 is implemented. - if (isVarArg && Subtarget->isTargetWin64()) + if (isVarArg && isWin64) return false; // Fast-isel doesn't know about callee-pop yet. @@ -1937,7 +1981,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { I->getParent()->getContext()); // Allocate shadow area for Win64 - if (Subtarget->isTargetWin64()) + if (isWin64) CCInfo.AllocateStack(32, 8); CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86); @@ -2053,7 +2097,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { X86::EBX).addReg(Base); } - if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) { + if (Subtarget->is64Bit() && isVarArg && !isWin64) { // Count the number of XMM registers allocated. static const uint16_t XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, @@ -2122,7 +2166,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { if (Subtarget->isPICStyleGOT()) MIB.addReg(X86::EBX, RegState::Implicit); - if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) + if (Subtarget->is64Bit() && isVarArg && !isWin64) MIB.addReg(X86::AL, RegState::Implicit); // Add implicit physical register uses to the call. diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 8522c8c..48470da 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -115,9 +115,10 @@ namespace { unsigned Mask = 0; for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(), E = MBB->livein_end(); I != E; ++I) { - unsigned Reg = *I - X86::FP0; - if (Reg < 8) - Mask |= 1 << Reg; + unsigned Reg = *I; + if (Reg < X86::FP0 || Reg > X86::FP6) + continue; + Mask |= 1 << (Reg - X86::FP0); } return Mask; } @@ -1661,6 +1662,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::CALLpcrel32)) .addExternalSymbol("_ftol2") .addReg(X86::ST0, RegState::ImplicitKill) + .addReg(X86::ECX, RegState::ImplicitDefine) .addReg(X86::EAX, RegState::Define | RegState::Implicit) .addReg(X86::EDX, RegState::Define | RegState::Implicit) .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 3061117..b994e67 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -307,7 +307,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, unsigned FramePtr) const { MachineFrameInfo *MFI = MF.getFrameInfo(); MachineModuleInfo &MMI = MF.getMMI(); - const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); // Add callee saved registers to move list. const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); @@ -360,7 +360,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, if (HasFP && FramePtr == Reg) continue; - unsigned DwarfReg = MRI.getDwarfRegNum(Reg, true); + unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset)); } } @@ -914,11 +914,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) .setMIFlag(MachineInstr::FrameSetup); - // MSVC x64's __chkstk needs to adjust %rsp. - // FIXME: %rax preserves the offset and should be available. - if (isSPUpdateNeeded) - emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64, - UseLEA, TII, *RegInfo); + // MSVC x64's __chkstk does not adjust %rsp itself. + // It also does not clobber %rax so we can reuse it when adjusting %rsp. + if (isSPUpdateNeeded) { + BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr) + .addReg(StackPtr) + .addReg(X86::RAX) + .setMIFlag(MachineInstr::FrameSetup); + } if (isEAXAlive) { // Restore EAX @@ -1320,7 +1323,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, unsigned SlotSize = RegInfo->getSlotSize(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); - int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); + int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); if (TailCallReturnAddrDelta < 0) { // create RETURNADDR area @@ -1333,7 +1336,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // } // [EBP] MFI->CreateFixedObject(-TailCallReturnAddrDelta, - (-1U*SlotSize)+TailCallReturnAddrDelta, true); + TailCallReturnAddrDelta - SlotSize, true); } if (hasFP(MF)) { diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 4ffffa1..9465420 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -141,10 +141,6 @@ namespace { /// SelectionDAG operations. /// class X86DAGToDAGISel : public SelectionDAGISel { - /// X86Lowering - This object fully describes how to lower LLVM code to an - /// X86-specific SelectionDAG. - const X86TargetLowering &X86Lowering; - /// Subtarget - Keep a pointer to the X86Subtarget around so that we can /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; @@ -156,7 +152,6 @@ namespace { public: explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) : SelectionDAGISel(tm, OptLevel), - X86Lowering(*tm.getTargetLowering()), Subtarget(&tm.getSubtarget<X86Subtarget>()), OptForSize(false) {} @@ -233,7 +228,8 @@ namespace { SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment) { Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? - CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI->getPointerTy()) : + CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, + getTargetLowering()->getPointerTy()) : AM.Base_Reg; Scale = getI8Imm(AM.Scale); Index = AM.IndexReg; @@ -504,8 +500,10 @@ void X86DAGToDAGISel::PreprocessISelDAG() { // If the source and destination are SSE registers, then this is a legal // conversion that should not be lowered. - bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); - bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); + const X86TargetLowering *X86Lowering = + static_cast<const X86TargetLowering *>(getTargetLowering()); + bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); + bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); if (SrcIsSSE && DstIsSSE) continue; @@ -1556,7 +1554,8 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, /// SDNode *X86DAGToDAGISel::getGlobalBaseReg() { unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); - return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); + return CurDAG->getRegister(GlobalBaseReg, + getTargetLowering()->getPointerTy()).getNode(); } SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { @@ -2532,6 +2531,11 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // Prevent use of AH in a REX instruction by referencing AX instead. // Shift it down 8 bits. + // + // The current assumption of the register allocator is that isel + // won't generate explicit references to the GPR8_NOREX registers. If + // the allocator and/or the backend get enhanced to be more robust in + // that regard, this can be, and should be, removed. if (HiReg == X86::AH && Subtarget->is64Bit() && !SDValue(Node, 1).use_empty()) { SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 346dfbb..9ffe29f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -58,17 +58,14 @@ STATISTIC(NumTailCalls, "Number of tail calls"); static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2); -/// Generate a DAG to grab 128-bits from a vector > 128 bits. This -/// sets things up to match to an AVX VEXTRACTF128 instruction or a -/// simple subregister reference. Idx is an index in the 128 bits we -/// want. It need not be aligned to a 128-bit bounday. That makes -/// lowering EXTRACT_VECTOR_ELT operations easier. -static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, - SelectionDAG &DAG, SDLoc dl) { +static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal, + SelectionDAG &DAG, SDLoc dl, + unsigned vectorWidth) { + assert((vectorWidth == 128 || vectorWidth == 256) && + "Unsupported vector width"); EVT VT = Vec.getValueType(); - assert(VT.is256BitVector() && "Unexpected vector size!"); EVT ElVT = VT.getVectorElementType(); - unsigned Factor = VT.getSizeInBits()/128; + unsigned Factor = VT.getSizeInBits()/vectorWidth; EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, VT.getVectorNumElements()/Factor); @@ -76,13 +73,12 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, if (Vec.getOpcode() == ISD::UNDEF) return DAG.getUNDEF(ResultVT); - // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR - // we can match to VEXTRACTF128. - unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); + // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR + unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits(); - // This is the index of the first element of the 128-bit chunk + // This is the index of the first element of the vectorWidth-bit chunk // we want. - unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) + unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth) * ElemsPerChunk); // If the input is a buildvector just emit a smaller one. @@ -95,38 +91,71 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, VecIdx); return Result; + +} +/// Generate a DAG to grab 128-bits from a vector > 128 bits. This +/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128 +/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4 +/// instructions or a simple subregister reference. Idx is an index in the +/// 128 bits we want. It need not be aligned to a 128-bit bounday. That makes +/// lowering EXTRACT_VECTOR_ELT operations easier. +static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, + SelectionDAG &DAG, SDLoc dl) { + assert((Vec.getValueType().is256BitVector() || + Vec.getValueType().is512BitVector()) && "Unexpected vector size!"); + return ExtractSubVector(Vec, IdxVal, DAG, dl, 128); } -/// Generate a DAG to put 128-bits into a vector > 128 bits. This -/// sets things up to match to an AVX VINSERTF128 instruction or a -/// simple superregister reference. Idx is an index in the 128 bits -/// we want. It need not be aligned to a 128-bit bounday. That makes -/// lowering INSERT_VECTOR_ELT operations easier. -static SDValue Insert128BitVector(SDValue Result, SDValue Vec, - unsigned IdxVal, SelectionDAG &DAG, - SDLoc dl) { +/// Generate a DAG to grab 256-bits from a 512-bit vector. +static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal, + SelectionDAG &DAG, SDLoc dl) { + assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!"); + return ExtractSubVector(Vec, IdxVal, DAG, dl, 256); +} + +static SDValue InsertSubVector(SDValue Result, SDValue Vec, + unsigned IdxVal, SelectionDAG &DAG, + SDLoc dl, unsigned vectorWidth) { + assert((vectorWidth == 128 || vectorWidth == 256) && + "Unsupported vector width"); // Inserting UNDEF is Result if (Vec.getOpcode() == ISD::UNDEF) return Result; - EVT VT = Vec.getValueType(); - assert(VT.is128BitVector() && "Unexpected vector size!"); - EVT ElVT = VT.getVectorElementType(); EVT ResultVT = Result.getValueType(); - // Insert the relevant 128 bits. - unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); + // Insert the relevant vectorWidth bits. + unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits(); - // This is the index of the first element of the 128-bit chunk + // This is the index of the first element of the vectorWidth-bit chunk // we want. - unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) + unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth) * ElemsPerChunk); SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx); } +/// Generate a DAG to put 128-bits into a vector > 128 bits. This +/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or +/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a +/// simple superregister reference. Idx is an index in the 128 bits +/// we want. It need not be aligned to a 128-bit bounday. That makes +/// lowering INSERT_VECTOR_ELT operations easier. +static SDValue Insert128BitVector(SDValue Result, SDValue Vec, + unsigned IdxVal, SelectionDAG &DAG, + SDLoc dl) { + assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!"); + return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128); +} + +static SDValue Insert256BitVector(SDValue Result, SDValue Vec, + unsigned IdxVal, SelectionDAG &DAG, + SDLoc dl) { + assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!"); + return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256); +} /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 /// instructions. This is used because creating CONCAT_VECTOR nodes of @@ -139,6 +168,13 @@ static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, return Insert128BitVector(V, V2, NumElems/2, DAG, dl); } +static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT, + unsigned NumElems, SelectionDAG &DAG, + SDLoc dl) { + SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); + return Insert256BitVector(V, V2, NumElems/2, DAG, dl); +} + static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); bool is64Bit = Subtarget->is64Bit(); @@ -563,10 +599,6 @@ void X86TargetLowering::resetOperationActions() { setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); } - setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); - setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); - setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); if (Subtarget->is64Bit()) { setExceptionPointerRegister(X86::RAX); setExceptionSelectorRegister(X86::RDX); @@ -586,10 +618,12 @@ void X86TargetLowering::resetOperationActions() { // VASTART needs to be custom lowered to use the VarArgsFrameIndex setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VAEND , MVT::Other, Expand); - if (Subtarget->is64Bit()) { + if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) { + // TargetInfo::X86_64ABIBuiltinVaList setOperationAction(ISD::VAARG , MVT::Other, Custom); setOperationAction(ISD::VACOPY , MVT::Other, Custom); } else { + // TargetInfo::CharPtrBuiltinVaList setOperationAction(ISD::VAARG , MVT::Other, Expand); setOperationAction(ISD::VACOPY , MVT::Other, Expand); } @@ -1000,7 +1034,7 @@ void X86TargetLowering::resetOperationActions() { setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); } - if (Subtarget->hasSSE41()) { + if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { setOperationAction(ISD::FFLOOR, MVT::f32, Legal); setOperationAction(ISD::FCEIL, MVT::f32, Legal); setOperationAction(ISD::FTRUNC, MVT::f32, Legal); @@ -1263,6 +1297,150 @@ void X86TargetLowering::resetOperationActions() { } } + if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) { + addRegisterClass(MVT::v16i32, &X86::VR512RegClass); + addRegisterClass(MVT::v16f32, &X86::VR512RegClass); + addRegisterClass(MVT::v8i64, &X86::VR512RegClass); + addRegisterClass(MVT::v8f64, &X86::VR512RegClass); + + addRegisterClass(MVT::v8i1, &X86::VK8RegClass); + addRegisterClass(MVT::v16i1, &X86::VK16RegClass); + + setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, Legal); + setOperationAction(ISD::LOAD, MVT::v16f32, Legal); + setOperationAction(ISD::LOAD, MVT::v8f64, Legal); + setOperationAction(ISD::LOAD, MVT::v8i64, Legal); + setOperationAction(ISD::LOAD, MVT::v16i32, Legal); + setOperationAction(ISD::LOAD, MVT::v16i1, Legal); + + setOperationAction(ISD::FADD, MVT::v16f32, Legal); + setOperationAction(ISD::FSUB, MVT::v16f32, Legal); + setOperationAction(ISD::FMUL, MVT::v16f32, Legal); + setOperationAction(ISD::FDIV, MVT::v16f32, Legal); + setOperationAction(ISD::FSQRT, MVT::v16f32, Legal); + setOperationAction(ISD::FNEG, MVT::v16f32, Custom); + + setOperationAction(ISD::FADD, MVT::v8f64, Legal); + setOperationAction(ISD::FSUB, MVT::v8f64, Legal); + setOperationAction(ISD::FMUL, MVT::v8f64, Legal); + setOperationAction(ISD::FDIV, MVT::v8f64, Legal); + setOperationAction(ISD::FSQRT, MVT::v8f64, Legal); + setOperationAction(ISD::FNEG, MVT::v8f64, Custom); + setOperationAction(ISD::FMA, MVT::v8f64, Legal); + setOperationAction(ISD::FMA, MVT::v16f32, Legal); + setOperationAction(ISD::SDIV, MVT::v16i32, Custom); + + + setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal); + setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal); + setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal); + + setOperationAction(ISD::TRUNCATE, MVT::i1, Legal); + setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); + setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom); + setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom); + setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom); + + setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom); + setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom); + + setOperationAction(ISD::SETCC, MVT::v16i1, Custom); + setOperationAction(ISD::SETCC, MVT::v8i1, Custom); + + setOperationAction(ISD::MUL, MVT::v8i64, Custom); + + setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom); + setOperationAction(ISD::SELECT, MVT::v8f64, Custom); + setOperationAction(ISD::SELECT, MVT::v8i64, Custom); + setOperationAction(ISD::SELECT, MVT::v16f32, Custom); + + setOperationAction(ISD::ADD, MVT::v8i64, Legal); + setOperationAction(ISD::ADD, MVT::v16i32, Legal); + + setOperationAction(ISD::SUB, MVT::v8i64, Legal); + setOperationAction(ISD::SUB, MVT::v16i32, Legal); + + setOperationAction(ISD::MUL, MVT::v16i32, Legal); + + setOperationAction(ISD::SRL, MVT::v8i64, Custom); + setOperationAction(ISD::SRL, MVT::v16i32, Custom); + + setOperationAction(ISD::SHL, MVT::v8i64, Custom); + setOperationAction(ISD::SHL, MVT::v16i32, Custom); + + setOperationAction(ISD::SRA, MVT::v8i64, Custom); + setOperationAction(ISD::SRA, MVT::v16i32, Custom); + + setOperationAction(ISD::AND, MVT::v8i64, Legal); + setOperationAction(ISD::OR, MVT::v8i64, Legal); + setOperationAction(ISD::XOR, MVT::v8i64, Legal); + + // Custom lower several nodes. + for (int i = MVT::FIRST_VECTOR_VALUETYPE; + i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { + MVT VT = (MVT::SimpleValueType)i; + + unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + // Extract subvector is special because the value type + // (result) is 256/128-bit but the source is 512-bit wide. + if (VT.is128BitVector() || VT.is256BitVector()) + setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + + if (VT.getVectorElementType() == MVT::i1) + setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); + + // Do not attempt to custom lower other non-512-bit vectors + if (!VT.is512BitVector()) + continue; + + if (VT != MVT::v8i64) { + setOperationAction(ISD::XOR, VT, Promote); + AddPromotedToType (ISD::XOR, VT, MVT::v8i64); + setOperationAction(ISD::OR, VT, Promote); + AddPromotedToType (ISD::OR, VT, MVT::v8i64); + setOperationAction(ISD::AND, VT, Promote); + AddPromotedToType (ISD::AND, VT, MVT::v8i64); + } + if ( EltSize >= 32) { + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::VSELECT, VT, Legal); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); + } + } + for (int i = MVT::v32i8; i != MVT::v8i64; ++i) { + MVT VT = (MVT::SimpleValueType)i; + + // Do not attempt to promote non-256-bit vectors + if (!VT.is512BitVector()) + continue; + + setOperationAction(ISD::LOAD, VT, Promote); + AddPromotedToType (ISD::LOAD, VT, MVT::v8i64); + setOperationAction(ISD::SELECT, VT, Promote); + AddPromotedToType (ISD::SELECT, VT, MVT::v8i64); + } + }// has AVX-512 + // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion // of this type with custom code. for (int VT = MVT::FIRST_VECTOR_VALUETYPE; @@ -1884,13 +2062,19 @@ static bool IsTailCallConvention(CallingConv::ID CC) { CC == CallingConv::HiPE); } +/// \brief Return true if the calling convention is a C calling convention. +static bool IsCCallConvention(CallingConv::ID CC) { + return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 || + CC == CallingConv::X86_64_SysV); +} + bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) return false; CallSite CS(CI); CallingConv::ID CalleeCC = CS.getCallingConv(); - if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) + if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) return false; return true; @@ -1965,7 +2149,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, MachineFrameInfo *MFI = MF.getFrameInfo(); bool Is64Bit = Subtarget->is64Bit(); bool IsWindows = Subtarget->isTargetWindows(); - bool IsWin64 = Subtarget->isTargetWin64(); + bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); assert(!(isVarArg && IsTailCallConvention(CallConv)) && "Var args not supported with calling convention fastcc, ghc or hipe"); @@ -1976,9 +2160,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 - if (IsWin64) { + if (IsWin64) CCInfo.AllocateStack(32, 8); - } CCInfo.AnalyzeFormalArguments(Ins, CC_X86); @@ -2004,12 +2187,18 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, RC = &X86::FR32RegClass; else if (RegVT == MVT::f64) RC = &X86::FR64RegClass; + else if (RegVT.is512BitVector()) + RC = &X86::VR512RegClass; else if (RegVT.is256BitVector()) RC = &X86::VR256RegClass; else if (RegVT.is128BitVector()) RC = &X86::VR128RegClass; else if (RegVT == MVT::x86mmx) RC = &X86::VR64RegClass; + else if (RegVT == MVT::v8i1) + RC = &X86::VK8RegClass; + else if (RegVT == MVT::v16i1) + RC = &X86::VK16RegClass; else llvm_unreachable("Unknown argument type!"); @@ -2267,7 +2456,8 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, if (!FPDiff) return Chain; // Calculate the new stack slot for the return address. int NewReturnAddrFI = - MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); + MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize, + false); SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, MachinePointerInfo::getFixedStack(NewReturnAddrFI), @@ -2279,10 +2469,10 @@ SDValue X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDLoc &dl = CLI.DL; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; CallingConv::ID CallConv = CLI.CallConv; @@ -2291,7 +2481,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, MachineFunction &MF = DAG.getMachineFunction(); bool Is64Bit = Subtarget->is64Bit(); - bool IsWin64 = Subtarget->isTargetWin64(); + bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); bool IsWindows = Subtarget->isTargetWindows(); StructReturnType SR = callIsStructReturn(Outs); bool IsSibcall = false; @@ -2324,9 +2514,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 - if (IsWin64) { + if (IsWin64) CCInfo.AllocateStack(32, 8); - } CCInfo.AnalyzeCallOperands(Outs, CC_X86); @@ -2837,13 +3026,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { - if (!IsTailCallConvention(CalleeCC) && - CalleeCC != CallingConv::C) + if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) return false; // If -tailcallopt is specified, make fastcc functions tail-callable. const MachineFunction &MF = DAG.getMachineFunction(); - const Function *CallerF = DAG.getMachineFunction().getFunction(); + const Function *CallerF = MF.getFunction(); // If the function return type is x86_fp80 and the callee return type is not, // then the FP_EXTEND of the call result is not a nop. It's not safe to @@ -2853,6 +3041,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CallerCC = CallerF->getCallingConv(); bool CCMatch = CallerCC == CalleeCC; + bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC); + bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC); if (getTargetMachine().Options.GuaranteedTailCallOpt) { if (IsTailCallConvention(CalleeCC) && CCMatch) @@ -2886,7 +3076,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Optimizing for varargs on Win64 is unlikely to be safe without // additional testing. - if (Subtarget->isTargetWin64()) + if (IsCalleeWin64 || IsCallerWin64) return false; SmallVector<CCValAssign, 16> ArgLocs; @@ -2961,9 +3151,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, getTargetMachine(), ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 - if (Subtarget->isTargetWin64()) { + if (IsCalleeWin64) CCInfo.AllocateStack(32, 8); - } CCInfo.AnalyzeCallOperands(Outs, CC_X86); if (CCInfo.getNextStackOffset()) { @@ -3135,7 +3324,8 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { if (ReturnAddrIndex == 0) { // Set up a frame object for the return address. unsigned SlotSize = RegInfo->getSlotSize(); - ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, + ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, + -(int64_t)SlotSize, false); FuncInfo->setRAIndex(ReturnAddrIndex); } @@ -3696,12 +3886,10 @@ static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, unsigned NumLanes = VT.getSizeInBits()/128; unsigned NumLaneElts = NumElts/NumLanes; - for (unsigned l = 0; l != NumLanes; ++l) { - for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; - i != (l+1)*NumLaneElts; - i += 2, ++j) { - int BitI = Mask[i]; - int BitI1 = Mask[i+1]; + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) { + int BitI = Mask[l+i]; + int BitI1 = Mask[l+i+1]; if (!isUndefOrEqual(BitI, j)) return false; if (V2IsSplat) { @@ -3735,11 +3923,10 @@ static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, unsigned NumLanes = VT.getSizeInBits()/128; unsigned NumLaneElts = NumElts/NumLanes; - for (unsigned l = 0; l != NumLanes; ++l) { - for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; - i != (l+1)*NumLaneElts; i += 2, ++j) { - int BitI = Mask[i]; - int BitI1 = Mask[i+1]; + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) { + int BitI = Mask[l+i]; + int BitI1 = Mask[l+i+1]; if (!isUndefOrEqual(BitI, j)) return false; if (V2IsSplat) { @@ -3780,12 +3967,10 @@ static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { unsigned NumLanes = VT.getSizeInBits()/128; unsigned NumLaneElts = NumElts/NumLanes; - for (unsigned l = 0; l != NumLanes; ++l) { - for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; - i != (l+1)*NumLaneElts; - i += 2, ++j) { - int BitI = Mask[i]; - int BitI1 = Mask[i+1]; + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) { + int BitI = Mask[l+i]; + int BitI1 = Mask[l+i+1]; if (!isUndefOrEqual(BitI, j)) return false; @@ -3815,11 +4000,10 @@ static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { unsigned NumLanes = VT.getSizeInBits()/128; unsigned NumLaneElts = NumElts/NumLanes; - for (unsigned l = 0; l != NumLanes; ++l) { - for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; - i != (l+1)*NumLaneElts; i += 2, ++j) { - int BitI = Mask[i]; - int BitI1 = Mask[i+1]; + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) { + int BitI = Mask[l+i]; + int BitI1 = Mask[l+i+1]; if (!isUndefOrEqual(BitI, j)) return false; if (!isUndefOrEqual(BitI1, j)) @@ -4051,42 +4235,59 @@ static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { return true; } -/// isVEXTRACTF128Index - Return true if the specified +/// isVEXTRACTIndex - Return true if the specified /// EXTRACT_SUBVECTOR operand specifies a vector extract that is -/// suitable for input to VEXTRACTF128. -bool X86::isVEXTRACTF128Index(SDNode *N) { +/// suitable for instruction that extract 128 or 256 bit vectors +static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) { + assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width"); if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) return false; - // The index should be aligned on a 128-bit boundary. + // The index should be aligned on a vecWidth-bit boundary. uint64_t Index = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); MVT VT = N->getValueType(0).getSimpleVT(); unsigned ElSize = VT.getVectorElementType().getSizeInBits(); - bool Result = (Index * ElSize) % 128 == 0; + bool Result = (Index * ElSize) % vecWidth == 0; return Result; } -/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR +/// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR /// operand specifies a subvector insert that is suitable for input to -/// VINSERTF128. -bool X86::isVINSERTF128Index(SDNode *N) { +/// insertion of 128 or 256-bit subvectors +static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) { + assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width"); if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) return false; - - // The index should be aligned on a 128-bit boundary. + // The index should be aligned on a vecWidth-bit boundary. uint64_t Index = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); MVT VT = N->getValueType(0).getSimpleVT(); unsigned ElSize = VT.getVectorElementType().getSizeInBits(); - bool Result = (Index * ElSize) % 128 == 0; + bool Result = (Index * ElSize) % vecWidth == 0; return Result; } +bool X86::isVINSERT128Index(SDNode *N) { + return isVINSERTIndex(N, 128); +} + +bool X86::isVINSERT256Index(SDNode *N) { + return isVINSERTIndex(N, 256); +} + +bool X86::isVEXTRACT128Index(SDNode *N) { + return isVEXTRACTIndex(N, 128); +} + +bool X86::isVEXTRACT256Index(SDNode *N) { + return isVEXTRACTIndex(N, 256); +} + /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. /// Handles 128-bit and 256-bit. @@ -4190,12 +4391,10 @@ static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { return (Val - i) * EltSize; } -/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate -/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 -/// instructions. -unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { +static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) { + assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width"); if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) - llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); + llvm_unreachable("Illegal extract subvector for VEXTRACT"); uint64_t Index = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); @@ -4203,16 +4402,14 @@ unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { MVT VecVT = N->getOperand(0).getValueType().getSimpleVT(); MVT ElVT = VecVT.getVectorElementType(); - unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); + unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits(); return Index / NumElemsPerChunk; } -/// getInsertVINSERTF128Immediate - Return the appropriate immediate -/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 -/// instructions. -unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { +static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) { + assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width"); if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) - llvm_unreachable("Illegal insert subvector for VINSERTF128"); + llvm_unreachable("Illegal insert subvector for VINSERT"); uint64_t Index = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); @@ -4220,10 +4417,38 @@ unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { MVT VecVT = N->getValueType(0).getSimpleVT(); MVT ElVT = VecVT.getVectorElementType(); - unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); + unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits(); return Index / NumElemsPerChunk; } +/// getExtractVEXTRACT128Immediate - Return the appropriate immediate +/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 +/// and VINSERTI128 instructions. +unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) { + return getExtractVEXTRACTImmediate(N, 128); +} + +/// getExtractVEXTRACT256Immediate - Return the appropriate immediate +/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4 +/// and VINSERTI64x4 instructions. +unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) { + return getExtractVEXTRACTImmediate(N, 256); +} + +/// getInsertVINSERT128Immediate - Return the appropriate immediate +/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 +/// and VINSERTI128 instructions. +unsigned X86::getInsertVINSERT128Immediate(SDNode *N) { + return getInsertVINSERTImmediate(N, 128); +} + +/// getInsertVINSERT256Immediate - Return the appropriate immediate +/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4 +/// and VINSERTI64x4 instructions. +unsigned X86::getInsertVINSERT256Immediate(SDNode *N) { + return getInsertVINSERTImmediate(N, 256); +} + /// getShuffleCLImmediate - Return the appropriate immediate to shuffle /// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. /// Handles 256-bit. @@ -5063,10 +5288,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, LD->getPointerInfo().getWithOffset(StartOffset), false, false, false, 0); - SmallVector<int, 8> Mask; - for (unsigned i = 0; i != NumElems; ++i) - Mask.push_back(EltNo); - + SmallVector<int, 8> Mask(NumElems, EltNo); return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); } @@ -5184,7 +5406,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { MVT VT = Op.getValueType().getSimpleVT(); SDLoc dl(Op); - assert((VT.is128BitVector() || VT.is256BitVector()) && + assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && "Unsupported vector type for broadcast."); SDValue Ld; @@ -5240,13 +5462,18 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { // The scalar_to_vector node and the suspected // load node must have exactly one user. // Constants may have multiple users. - if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) + + // AVX-512 has register version of the broadcast + bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() && + Ld.getValueType().getSizeInBits() >= 32; + if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) && + !hasRegVer)) return SDValue(); break; } } - bool Is256 = VT.is256BitVector(); + bool IsGE256 = (VT.getSizeInBits() >= 256); // Handle the broadcasting a single constant scalar from the constant pool // into a vector. On Sandybridge it is still better to load a constant vector @@ -5256,7 +5483,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { assert(!CVT.isVector() && "Must not broadcast a vector type"); unsigned ScalarSize = CVT.getSizeInBits(); - if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { + if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) { const Constant *C = 0; if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) C = CI->getConstantIntValue(); @@ -5280,14 +5507,14 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { // Handle AVX2 in-register broadcasts. if (!IsLoad && Subtarget->hasInt256() && - (ScalarSize == 32 || (Is256 && ScalarSize == 64))) + (ScalarSize == 32 || (IsGE256 && ScalarSize == 64))) return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); // The scalar source must be a normal load. if (!IsLoad) return SDValue(); - if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) + if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); // The integer check is needed for the 64-bit into 128-bit so it doesn't match @@ -5375,6 +5602,108 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { return NV; } +// Lower BUILD_VECTOR operation for v8i1 and v16i1 types. +SDValue +X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { + + EVT VT = Op.getValueType(); + assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) && + "Unexpected type in LowerBUILD_VECTORvXi1!"); + + SDLoc dl(Op); + if (ISD::isBuildVectorAllZeros(Op.getNode())) { + SDValue Cst = DAG.getTargetConstant(0, MVT::i1); + SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, + Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + Ops, VT.getVectorNumElements()); + } + + if (ISD::isBuildVectorAllOnes(Op.getNode())) { + SDValue Cst = DAG.getTargetConstant(1, MVT::i1); + SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, + Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + Ops, VT.getVectorNumElements()); + } + + bool AllContants = true; + uint64_t Immediate = 0; + for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) { + SDValue In = Op.getOperand(idx); + if (In.getOpcode() == ISD::UNDEF) + continue; + if (!isa<ConstantSDNode>(In)) { + AllContants = false; + break; + } + if (cast<ConstantSDNode>(In)->getZExtValue()) + Immediate |= (1ULL << idx); + } + + if (AllContants) { + SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, + DAG.getConstant(Immediate, MVT::i16)); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask, + DAG.getIntPtrConstant(0)); + } + + if (!isSplatVector(Op.getNode())) + llvm_unreachable("Unsupported predicate operation"); + + SDValue In = Op.getOperand(0); + SDValue EFLAGS, X86CC; + if (In.getOpcode() == ISD::SETCC) { + SDValue Op0 = In.getOperand(0); + SDValue Op1 = In.getOperand(1); + ISD::CondCode CC = cast<CondCodeSDNode>(In.getOperand(2))->get(); + bool isFP = Op1.getValueType().isFloatingPoint(); + unsigned X86CCVal = TranslateX86CC(CC, isFP, Op0, Op1, DAG); + + assert(X86CCVal != X86::COND_INVALID && "Unsupported predicate operation"); + + X86CC = DAG.getConstant(X86CCVal, MVT::i8); + EFLAGS = EmitCmp(Op0, Op1, X86CCVal, DAG); + EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); + } else if (In.getOpcode() == X86ISD::SETCC) { + X86CC = In.getOperand(0); + EFLAGS = In.getOperand(1); + } else { + // The algorithm: + // Bit1 = In & 0x1 + // if (Bit1 != 0) + // ZF = 0 + // else + // ZF = 1 + // if (ZF == 0) + // res = allOnes ### CMOVNE -1, %res + // else + // res = allZero + MVT InVT = In.getValueType().getSimpleVT(); + SDValue Bit1 = DAG.getNode(ISD::AND, dl, InVT, In, DAG.getConstant(1, InVT)); + EFLAGS = EmitTest(Bit1, X86::COND_NE, DAG); + X86CC = DAG.getConstant(X86::COND_NE, MVT::i8); + } + + if (VT == MVT::v16i1) { + SDValue Cst1 = DAG.getConstant(-1, MVT::i16); + SDValue Cst0 = DAG.getConstant(0, MVT::i16); + SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i16, + Cst0, Cst1, X86CC, EFLAGS); + return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp); + } + + if (VT == MVT::v8i1) { + SDValue Cst1 = DAG.getConstant(-1, MVT::i32); + SDValue Cst0 = DAG.getConstant(0, MVT::i32); + SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i32, + Cst0, Cst1, X86CC, EFLAGS); + CmovOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CmovOp); + return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp); + } + llvm_unreachable("Unsupported predicate operation"); +} + SDValue X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); @@ -5383,6 +5712,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { MVT ExtVT = VT.getVectorElementType(); unsigned NumElems = Op.getNumOperands(); + // Generate vectors for predicate vectors. + if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512()) + return LowerBUILD_VECTORvXi1(Op, DAG); + // Vectors containing all zeros can be matched by pxor and xorps later if (ISD::isBuildVectorAllZeros(Op.getNode())) { // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd @@ -5713,19 +6046,22 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { SDLoc dl(Op); MVT ResVT = Op.getValueType().getSimpleVT(); - assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); + assert((ResVT.is256BitVector() || + ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide"); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); unsigned NumElems = ResVT.getVectorNumElements(); + if(ResVT.is256BitVector()) + return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); - return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); + return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl); } static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { assert(Op.getNumOperands() == 2); - // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors + // AVX/AVX-512 can use the vinsertf128 instruction to create 256-bit vectors // from two other 128-bit ones. return LowerAVXCONCAT_VECTORS(Op, DAG); } @@ -7195,6 +7531,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { + SDLoc dl(Op); if (!isa<ConstantSDNode>(Op.getOperand(1))) return SDValue(); @@ -7203,17 +7540,19 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // If this is a 256-bit vector result, first extract the 128-bit vector and // then extract the element from the 128-bit vector. - if (VecVT.is256BitVector()) { - SDLoc dl(Op.getNode()); - unsigned NumElems = VecVT.getVectorNumElements(); + if (VecVT.is256BitVector() || VecVT.is512BitVector()) { SDValue Idx = Op.getOperand(1); unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); // Get the 128-bit vector. Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); + EVT EltVT = VecVT.getVectorElementType(); + + unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits(); - if (IdxVal >= NumElems/2) - IdxVal -= NumElems/2; + //if (IdxVal >= NumElems/2) + // IdxVal -= NumElems/2; + IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk; return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, DAG.getConstant(IdxVal, MVT::i32)); } @@ -7227,7 +7566,6 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, } MVT VT = Op.getValueType().getSimpleVT(); - SDLoc dl(Op); // TODO: handle v16i8. if (VT.getSizeInBits() == 16) { SDValue Vec = Op.getOperand(0); @@ -7348,19 +7686,20 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { // If this is a 256-bit vector result, first extract the 128-bit vector, // insert the element into the extracted half and then place it back. - if (VT.is256BitVector()) { + if (VT.is256BitVector() || VT.is512BitVector()) { if (!isa<ConstantSDNode>(N2)) return SDValue(); // Get the desired 128-bit vector half. - unsigned NumElems = VT.getVectorNumElements(); unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); // Insert the element into the desired half. - bool Upper = IdxVal >= NumElems/2; + unsigned NumEltsIn128 = 128/EltVT.getSizeInBits(); + unsigned IdxIn128 = IdxVal - (IdxVal/NumEltsIn128) * NumEltsIn128; + V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, - DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); + DAG.getConstant(IdxIn128, MVT::i32)); // Insert the changed part back to the 256-bit vector return Insert128BitVector(N0, V, IdxVal, DAG, dl); @@ -7393,9 +7732,10 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { // vector and then insert into the 256-bit vector. if (!OpVT.is128BitVector()) { // Insert into a 128-bit vector. + unsigned SizeFactor = OpVT.getSizeInBits()/128; EVT VT128 = EVT::getVectorVT(*Context, OpVT.getVectorElementType(), - OpVT.getVectorNumElements() / 2); + OpVT.getVectorNumElements() / SizeFactor); Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); @@ -7418,16 +7758,22 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { // upper bits of a vector. static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - if (Subtarget->hasFp256()) { - SDLoc dl(Op.getNode()); - SDValue Vec = Op.getNode()->getOperand(0); - SDValue Idx = Op.getNode()->getOperand(1); + SDLoc dl(Op); + SDValue In = Op.getOperand(0); + SDValue Idx = Op.getOperand(1); + unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); + EVT ResVT = Op.getValueType(); + EVT InVT = In.getValueType(); - if (Op.getNode()->getValueType(0).is128BitVector() && - Vec.getNode()->getValueType(0).is256BitVector() && + if (Subtarget->hasFp256()) { + if (ResVT.is128BitVector() && + (InVT.is256BitVector() || InVT.is512BitVector()) && isa<ConstantSDNode>(Idx)) { - unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); - return Extract128BitVector(Vec, IdxVal, DAG, dl); + return Extract128BitVector(In, IdxVal, DAG, dl); + } + if (ResVT.is256BitVector() && InVT.is512BitVector() && + isa<ConstantSDNode>(Idx)) { + return Extract256BitVector(In, IdxVal, DAG, dl); } } return SDValue(); @@ -7444,12 +7790,20 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SDValue SubVec = Op.getNode()->getOperand(1); SDValue Idx = Op.getNode()->getOperand(2); - if (Op.getNode()->getValueType(0).is256BitVector() && + if ((Op.getNode()->getValueType(0).is256BitVector() || + Op.getNode()->getValueType(0).is512BitVector()) && SubVec.getNode()->getValueType(0).is128BitVector() && isa<ConstantSDNode>(Idx)) { unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); } + + if (Op.getNode()->getValueType(0).is512BitVector() && + SubVec.getNode()->getValueType(0).is256BitVector() && + isa<ConstantSDNode>(Idx)) { + unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); + return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl); + } } return SDValue(); } @@ -8100,7 +8454,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, LLVMContext *Context = DAG.getContext(); // Build some magic constants. - const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; + static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; Constant *C0 = ConstantDataVector::get(*Context, CV0); SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); @@ -8837,7 +9191,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, Opnds.push_back(N->getOperand(1)); for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { - SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot; + SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot; // BFS traverse all OR'd operands. if (I->getOpcode() == ISD::OR) { Opnds.push_back(I->getOperand(0)); @@ -9236,6 +9590,51 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, return SDValue(); } +/// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point +/// mask CMPs. +static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0, + SDValue &Op1) { + unsigned SSECC; + bool Swap = false; + + // SSE Condition code mapping: + // 0 - EQ + // 1 - LT + // 2 - LE + // 3 - UNORD + // 4 - NEQ + // 5 - NLT + // 6 - NLE + // 7 - ORD + switch (SetCCOpcode) { + default: llvm_unreachable("Unexpected SETCC condition"); + case ISD::SETOEQ: + case ISD::SETEQ: SSECC = 0; break; + case ISD::SETOGT: + case ISD::SETGT: Swap = true; // Fallthrough + case ISD::SETLT: + case ISD::SETOLT: SSECC = 1; break; + case ISD::SETOGE: + case ISD::SETGE: Swap = true; // Fallthrough + case ISD::SETLE: + case ISD::SETOLE: SSECC = 2; break; + case ISD::SETUO: SSECC = 3; break; + case ISD::SETUNE: + case ISD::SETNE: SSECC = 4; break; + case ISD::SETULE: Swap = true; // Fallthrough + case ISD::SETUGE: SSECC = 5; break; + case ISD::SETULT: Swap = true; // Fallthrough + case ISD::SETUGT: SSECC = 6; break; + case ISD::SETO: SSECC = 7; break; + case ISD::SETUEQ: + case ISD::SETONE: SSECC = 8; break; + } + if (Swap) + std::swap(Op0, Op1); + + return SSECC; +} + // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 // ones, and then concatenate the result back. static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { @@ -9283,43 +9682,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, assert(EltVT == MVT::f32 || EltVT == MVT::f64); #endif - unsigned SSECC; - bool Swap = false; - - // SSE Condition code mapping: - // 0 - EQ - // 1 - LT - // 2 - LE - // 3 - UNORD - // 4 - NEQ - // 5 - NLT - // 6 - NLE - // 7 - ORD - switch (SetCCOpcode) { - default: llvm_unreachable("Unexpected SETCC condition"); - case ISD::SETOEQ: - case ISD::SETEQ: SSECC = 0; break; - case ISD::SETOGT: - case ISD::SETGT: Swap = true; // Fallthrough - case ISD::SETLT: - case ISD::SETOLT: SSECC = 1; break; - case ISD::SETOGE: - case ISD::SETGE: Swap = true; // Fallthrough - case ISD::SETLE: - case ISD::SETOLE: SSECC = 2; break; - case ISD::SETUO: SSECC = 3; break; - case ISD::SETUNE: - case ISD::SETNE: SSECC = 4; break; - case ISD::SETULE: Swap = true; // Fallthrough - case ISD::SETUGE: SSECC = 5; break; - case ISD::SETULT: Swap = true; // Fallthrough - case ISD::SETUGT: SSECC = 6; break; - case ISD::SETO: SSECC = 7; break; - case ISD::SETUEQ: - case ISD::SETONE: SSECC = 8; break; - } - if (Swap) - std::swap(Op0, Op1); + unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1); // In the two special cases we can't handle, emit two comparisons. if (SSECC == 8) { @@ -9351,8 +9714,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, // GT and EQ comparisons for integer, swapping operands and multiple // operations may be required for some comparisons. unsigned Opc; - bool Swap = false, Invert = false, FlipSigns = false; - + bool Swap = false, Invert = false, FlipSigns = false, MinMax = false; + switch (SetCCOpcode) { default: llvm_unreachable("Unexpected SETCC condition"); case ISD::SETNE: Invert = true; @@ -9366,6 +9729,23 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, case ISD::SETUGE: Swap = true; case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; } + + // Special case: Use min/max operations for SETULE/SETUGE + MVT VET = VT.getVectorElementType(); + bool hasMinMax = + (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32)) + || (Subtarget->hasSSE2() && (VET == MVT::i8)); + + if (hasMinMax) { + switch (SetCCOpcode) { + default: break; + case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break; + case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break; + } + + if (MinMax) { Swap = false; Invert = false; FlipSigns = false; } + } + if (Swap) std::swap(Op0, Op1); @@ -9399,8 +9779,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); // Create masks for only the low parts/high parts of the 64 bit integers. - const int MaskHi[] = { 1, 1, 3, 3 }; - const int MaskLo[] = { 0, 0, 2, 2 }; + static const int MaskHi[] = { 1, 1, 3, 3 }; + static const int MaskLo[] = { 0, 0, 2, 2 }; SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); @@ -9427,7 +9807,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1); // Make sure the lower and upper halves are both all-ones. - const int Mask[] = { 1, 0, 3, 2 }; + static const int Mask[] = { 1, 0, 3, 2 }; SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask); Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf); @@ -9452,6 +9832,9 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, // If the logical-not of the result is required, perform that now. if (Invert) Result = DAG.getNOT(dl, Result, VT); + + if (MinMax) + Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result); return Result; } @@ -9560,8 +9943,30 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Op1 = Op.getOperand(1); SDValue Op2 = Op.getOperand(2); SDLoc DL(Op); + EVT VT = Op1.getValueType(); SDValue CC; + // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops + // are available. Otherwise fp cmovs get lowered into a less efficient branch + // sequence later on. + if (Cond.getOpcode() == ISD::SETCC && + ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) || + (Subtarget->hasSSE1() && VT == MVT::f32)) && + VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) { + SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1); + int SSECC = translateX86FSETCC( + cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1); + + if (SSECC != 8) { + unsigned Opcode = VT == MVT::f32 ? X86ISD::FSETCCss : X86ISD::FSETCCsd; + SDValue Cmp = DAG.getNode(Opcode, DL, VT, CondOp0, CondOp1, + DAG.getConstant(SSECC, MVT::i8)); + SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2); + SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1); + return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And); + } + } + if (Cond.getOpcode() == ISD::SETCC) { SDValue NewCond = LowerSETCC(Cond, DAG); if (NewCond.getNode()) @@ -10889,8 +11294,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { X86CC = X86::COND_E; break; } - SmallVector<SDValue, 5> NewOps; - NewOps.append(Op->op_begin()+1, Op->op_end()); + SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, @@ -10907,8 +11311,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { else Opcode = X86ISD::PCMPESTRI; - SmallVector<SDValue, 5> NewOps; - NewOps.append(Op->op_begin()+1, Op->op_end()); + SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); } @@ -11492,7 +11895,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, "Should not custom lower when pmuldq is available!"); // Extract the odd parts. - const int UnpackMask[] = { 1, -1, 3, -1 }; + static const int UnpackMask[] = { 1, -1, 3, -1 }; SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); @@ -11506,7 +11909,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, // Merge the two vectors back together with a shuffle. This expands into 2 // shuffles. - const int ShufMask[] = { 0, 4, 2, 6 }; + static const int ShufMask[] = { 0, 4, 2, 6 }; return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); } @@ -11560,9 +11963,11 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { return SDValue(); APInt SplatValue, SplatUndef; - unsigned MinSplatBits; + unsigned SplatBitSize; bool HasAnyUndefs; - if (!C->isConstantSplat(SplatValue, SplatUndef, MinSplatBits, HasAnyUndefs)) + if (!C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, + HasAnyUndefs) || + EltTy.getSizeInBits() < SplatBitSize) return SDValue(); if ((SplatValue != 0) && @@ -12706,6 +13111,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::SHLD: return "X86ISD::SHLD"; case X86ISD::SHRD: return "X86ISD::SHRD"; case X86ISD::FAND: return "X86ISD::FAND"; + case X86ISD::FANDN: return "X86ISD::FANDN"; case X86ISD::FOR: return "X86ISD::FOR"; case X86ISD::FXOR: return "X86ISD::FXOR"; case X86ISD::FSRL: return "X86ISD::FSRL"; @@ -12829,6 +13235,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; + case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM"; case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; case X86ISD::VPERMV: return "X86ISD::VPERMV"; @@ -12917,6 +13324,20 @@ bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { return NumBits1 > NumBits2; } +bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + + if (!isTypeLegal(EVT::getEVT(Ty1))) + return false; + + assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); + + // Assuming the caller doesn't have a zeroext or signext return parameter, + // truncation all the way down to i1 is valid. + return true; +} + bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { return isInt<32>(Imm); } @@ -12968,6 +13389,27 @@ bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { return false; } +bool +X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + if (!(Subtarget->hasFMA() || Subtarget->hasFMA4())) + return false; + + VT = VT.getScalarType(); + + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + case MVT::f64: + return true; + default: + break; + } + + return false; +} + bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { // i16 instructions are longer (0x66 prefix) and potentially slower. return !(VT1 == MVT::i32 && VT2 == MVT::i16); @@ -14434,12 +14876,11 @@ X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, } else { // __chkstk(MSVCRT): does not update stack pointer. // Clobbers R10, R11 and EFLAGS. - // FIXME: RAX(allocated size) might be reused and not killed. BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) .addExternalSymbol("__chkstk") .addReg(X86::RAX, RegState::Implicit) .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); - // RAX has the offset to subtracted from RSP. + // RAX has the offset to be subtracted from RSP. BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) .addReg(X86::RSP) .addReg(X86::RAX); @@ -16299,6 +16740,38 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +/// \brief Returns a vector of 0s if the node in input is a vector logical +/// shift by a constant amount which is known to be bigger than or equal +/// to the vector element size in bits. +static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { + EVT VT = N->getValueType(0); + + if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && + (!Subtarget->hasInt256() || + (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) + return SDValue(); + + SDValue Amt = N->getOperand(1); + SDLoc DL(N); + if (isSplatVector(Amt.getNode())) { + SDValue SclrAmt = Amt->getOperand(0); + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { + APInt ShiftAmt = C->getAPIntValue(); + unsigned MaxAmount = VT.getVectorElementType().getSizeInBits(); + + // SSE2/AVX2 logical shifts always return a vector of 0s + // if the shift amount is bigger than or equal to + // the element size. The constant shift amount will be + // encoded as a 8-bit immediate. + if (ShiftAmt.trunc(8).uge(MaxAmount)) + return getZeroVector(VT, Subtarget, DAG, DL); + } + } + + return SDValue(); +} + /// PerformShiftCombine - Combine shifts. static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, @@ -16308,6 +16781,12 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, if (V.getNode()) return V; } + if (N->getOpcode() != ISD::SRA) { + // Try to fold this logical shift into a zero vector. + SDValue V = performShiftToAllZeros(N, DAG, Subtarget); + if (V.getNode()) return V; + } + return SDValue(); } @@ -17253,7 +17732,7 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { RHS.getOpcode() != ISD::VECTOR_SHUFFLE) return false; - EVT VT = LHS.getValueType(); + MVT VT = LHS.getValueType().getSimpleVT(); assert((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for horizontal add/sub"); @@ -17324,23 +17803,24 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { // LHS = VECTOR_SHUFFLE A, B, LMask // RHS = VECTOR_SHUFFLE A, B, RMask // Check that the masks correspond to performing a horizontal operation. - for (unsigned i = 0; i != NumElts; ++i) { - int LIdx = LMask[i], RIdx = RMask[i]; - - // Ignore any UNDEF components. - if (LIdx < 0 || RIdx < 0 || - (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || - (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) - continue; + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0; i != NumLaneElts; ++i) { + int LIdx = LMask[i+l], RIdx = RMask[i+l]; + + // Ignore any UNDEF components. + if (LIdx < 0 || RIdx < 0 || + (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || + (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) + continue; - // Check that successive elements are being operated on. If not, this is - // not a horizontal operation. - unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs - unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; - int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; - if (!(LIdx == Index && RIdx == Index + 1) && - !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) - return false; + // Check that successive elements are being operated on. If not, this is + // not a horizontal operation. + unsigned Src = (i/HalfLaneElts); // each lane is split between srcs + int Index = 2*(i%HalfLaneElts) + NumElts*Src + l; + if (!(LIdx == Index && RIdx == Index + 1) && + !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) + return false; + } } LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. @@ -17428,6 +17908,19 @@ static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +/// PerformFANDNCombine - Do target-specific dag combines on X86ISD::FANDN nodes +static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) { + // FANDN(x, 0.0) -> 0.0 + // FANDN(0.0, x) -> x + if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) + if (C->getValueAPF().isPosZero()) + return N->getOperand(1); + if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) + if (C->getValueAPF().isPosZero()) + return N->getOperand(1); + return SDValue(); +} + static SDValue PerformBTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI) { @@ -17882,6 +18375,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::FMIN: case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); case X86ISD::FAND: return PerformFANDCombine(N, DAG); + case X86ISD::FANDN: return PerformFANDNCombine(N, DAG); case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::ANY_EXTEND: @@ -18423,7 +18917,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::pair<unsigned, const TargetRegisterClass*> X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { // First, see if this is a constraint that directly corresponds to an LLVM // register class. if (Constraint.size() == 1) { @@ -18490,7 +18984,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed if (!Subtarget->hasSSE1()) break; - switch (VT.getSimpleVT().SimpleTy) { + switch (VT.SimpleTy) { default: break; // Scalar SSE types. case MVT::f32: @@ -18515,6 +19009,11 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case MVT::v8f32: case MVT::v4f64: return std::make_pair(0U, &X86::VR256RegClass); + case MVT::v8f64: + case MVT::v16f32: + case MVT::v16i32: + case MVT::v8i64: + return std::make_pair(0U, &X86::VR512RegClass); } break; } @@ -18625,7 +19124,13 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } } else if (Res.second == &X86::FR32RegClass || Res.second == &X86::FR64RegClass || - Res.second == &X86::VR128RegClass) { + Res.second == &X86::VR128RegClass || + Res.second == &X86::VR256RegClass || + Res.second == &X86::FR32XRegClass || + Res.second == &X86::FR64XRegClass || + Res.second == &X86::VR128XRegClass || + Res.second == &X86::VR256XRegClass || + Res.second == &X86::VR512RegClass) { // Handle references to XMM physical registers that got mapped into the // wrong class. This can happen with constraints like {xmm0} where the // target independent register mapper will just pick the first match it can @@ -18639,6 +19144,8 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, Res.second = &X86::VR128RegClass; else if (X86::VR256RegClass.hasType(VT)) Res.second = &X86::VR256RegClass; + else if (X86::VR512RegClass.hasType(VT)) + Res.second = &X86::VR512RegClass; } return Res; diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index c0e1015..2703274 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -53,6 +53,10 @@ namespace llvm { /// to X86::XORPS or X86::XORPD. FXOR, + /// FAND - Bitwise logical ANDNOT of floating point values. This + /// corresponds to X86::ANDNPS or X86::ANDNPD. + FANDN, + /// FSRL - Bitwise logical right shift of floating point values. These /// corresponds to X86::PSRLDQ. FSRL, @@ -290,6 +294,10 @@ namespace llvm { // TESTP - Vector packed fp sign bitwise comparisons TESTP, + // OR/AND test for masks + KORTEST, + KTEST, + // Several flavors of instructions with vector shuffle behaviors. PALIGNR, PSHUFD, @@ -313,6 +321,8 @@ namespace llvm { VPERMI, VPERM2X128, VBROADCAST, + // masked broadcast + VBROADCASTM, // PMULUDQ - Vector multiply packed unsigned doubleword integers PMULUDQ, @@ -434,25 +444,45 @@ namespace llvm { /// Define some predicates that are used for node matching. namespace X86 { - /// isVEXTRACTF128Index - Return true if the specified + /// isVEXTRACT128Index - Return true if the specified + /// EXTRACT_SUBVECTOR operand specifies a vector extract that is + /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions. + bool isVEXTRACT128Index(SDNode *N); + + /// isVINSERT128Index - Return true if the specified + /// INSERT_SUBVECTOR operand specifies a subvector insert that is + /// suitable for input to VINSERTF128, VINSERTI128 instructions. + bool isVINSERT128Index(SDNode *N); + + /// isVEXTRACT256Index - Return true if the specified /// EXTRACT_SUBVECTOR operand specifies a vector extract that is - /// suitable for input to VEXTRACTF128. - bool isVEXTRACTF128Index(SDNode *N); + /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions. + bool isVEXTRACT256Index(SDNode *N); - /// isVINSERTF128Index - Return true if the specified + /// isVINSERT256Index - Return true if the specified /// INSERT_SUBVECTOR operand specifies a subvector insert that is - /// suitable for input to VINSERTF128. - bool isVINSERTF128Index(SDNode *N); + /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions. + bool isVINSERT256Index(SDNode *N); - /// getExtractVEXTRACTF128Immediate - Return the appropriate + /// getExtractVEXTRACT128Immediate - Return the appropriate /// immediate to extract the specified EXTRACT_SUBVECTOR index - /// with VEXTRACTF128 instructions. - unsigned getExtractVEXTRACTF128Immediate(SDNode *N); + /// with VEXTRACTF128, VEXTRACTI128 instructions. + unsigned getExtractVEXTRACT128Immediate(SDNode *N); - /// getInsertVINSERTF128Immediate - Return the appropriate + /// getInsertVINSERT128Immediate - Return the appropriate /// immediate to insert at the specified INSERT_SUBVECTOR index - /// with VINSERTF128 instructions. - unsigned getInsertVINSERTF128Immediate(SDNode *N); + /// with VINSERTF128, VINSERT128 instructions. + unsigned getInsertVINSERT128Immediate(SDNode *N); + + /// getExtractVEXTRACT256Immediate - Return the appropriate + /// immediate to extract the specified EXTRACT_SUBVECTOR index + /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions. + unsigned getExtractVEXTRACT256Immediate(SDNode *N); + + /// getInsertVINSERT256Immediate - Return the appropriate + /// immediate to insert at the specified INSERT_SUBVECTOR index + /// with VINSERTF64x4, VINSERTI64x4 instructions. + unsigned getInsertVINSERT256Immediate(SDNode *N); /// isZeroNode - Returns true if Elt is a constant zero or a floating point /// constant +0.0. @@ -610,7 +640,7 @@ namespace llvm { /// error, this returns a register number of 0. std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. @@ -634,6 +664,8 @@ namespace llvm { virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; virtual bool isTruncateFree(EVT VT1, EVT VT2) const; + virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; + /// isZExtFree - Return true if any actual instruction that defines a /// value of type Ty1 implicit zero-extends the value to Ty2 in the result /// register. This does not necessarily include registers defined in @@ -646,11 +678,11 @@ namespace llvm { virtual bool isZExtFree(EVT VT1, EVT VT2) const; virtual bool isZExtFree(SDValue Val, EVT VT2) const; - /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than - /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to - /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd - /// is expanded to mul + add. - virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } + /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster + /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be + /// expanded to FMAs when this method returns true, otherwise fmuladd is + /// expanded to fmul + fadd. + virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const; /// isNarrowingProfitable - Return true if it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow @@ -802,6 +834,7 @@ namespace llvm { SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; @@ -821,7 +854,9 @@ namespace llvm { SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const; SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerZERO_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) const; SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td new file mode 100644 index 0000000..8abae14 --- /dev/null +++ b/lib/Target/X86/X86InstrAVX512.td @@ -0,0 +1,715 @@ +// Bitcasts between 512-bit vector types. Return the original type since +// no instruction is needed for the conversion +let Predicates = [HasAVX512] in { + def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>; + def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>; + def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; + def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>; + def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>; + def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>; + def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>; + def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>; + def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>; + def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>; + def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>; + def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>; + def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; + + def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>; + def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>; + def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>; + def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>; + def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>; + def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>; + def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>; + def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>; + def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>; + def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>; + def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>; + def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>; + def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>; + def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>; + def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>; + def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>; + def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>; + def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>; + def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>; + def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>; + def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>; + def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>; + def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>; + def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>; + def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>; + def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>; + def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>; + def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>; + def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>; + def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>; + +// Bitcasts between 256-bit vector types. Return the original type since +// no instruction is needed for the conversion + def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>; + def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>; + def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>; + def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>; + def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>; + def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>; + def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>; + def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>; + def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>; + def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>; + def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>; + def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>; + def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>; + def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>; + def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>; + def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>; + def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>; + def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>; + def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>; + def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>; + def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>; + def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>; + def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>; + def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>; + def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>; + def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>; + def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>; + def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>; + def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>; + def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>; +} + +//===----------------------------------------------------------------------===// +// AVX-512 - VECTOR INSERT +// +// -- 32x8 form -- +let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in { +def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst), + (ins VR512:$src1, VR128X:$src2, i8imm:$src3), + "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512; +let mayLoad = 1 in +def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst), + (ins VR512:$src1, f128mem:$src2, i8imm:$src3), + "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>; +} + +// -- 64x4 fp form -- +let neverHasSideEffects = 1, ExeDomain = SSEPackedDouble in { +def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst), + (ins VR512:$src1, VR256X:$src2, i8imm:$src3), + "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, VEX_W; +let mayLoad = 1 in +def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst), + (ins VR512:$src1, i256mem:$src2, i8imm:$src3), + "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>; +} +// -- 32x4 integer form -- +let neverHasSideEffects = 1 in { +def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst), + (ins VR512:$src1, VR128X:$src2, i8imm:$src3), + "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512; +let mayLoad = 1 in +def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst), + (ins VR512:$src1, i128mem:$src2, i8imm:$src3), + "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>; + +} + +let neverHasSideEffects = 1 in { +// -- 64x4 form -- +def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst), + (ins VR512:$src1, VR256X:$src2, i8imm:$src3), + "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, VEX_W; +let mayLoad = 1 in +def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst), + (ins VR512:$src1, i256mem:$src2, i8imm:$src3), + "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>; +} + +def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2), + (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2), + (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2), + (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2), + (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; + +def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2), + (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), + (bc_v4i32 (loadv2i64 addr:$src2)), + (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2), + (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2), + (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert128_imm VR512:$ins))>; + +def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2), + (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2), + (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2), + (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2), + (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; + +def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2), + (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2), + (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2), + (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; +def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1), + (bc_v8i32 (loadv4i64 addr:$src2)), + (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2, + (INSERT_get_vinsert256_imm VR512:$ins))>; + +// vinsertps - insert f32 to XMM +def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), + (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3), + "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + [(set VR128X:$dst, (X86insrtps VR128X:$src1, VR128X:$src2, imm:$src3))]>, + EVEX_4V; +def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst), + (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3), + "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + [(set VR128X:$dst, (X86insrtps VR128X:$src1, + (v4f32 (scalar_to_vector (loadf32 addr:$src2))), + imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>; + +//===----------------------------------------------------------------------===// +// AVX-512 VECTOR EXTRACT +//--- +let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in { +// -- 32x4 form -- +def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst), + (ins VR512:$src1, i8imm:$src2), + "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512; +def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs), + (ins f128mem:$dst, VR512:$src1, i8imm:$src2), + "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>; + +// -- 64x4 form -- +def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst), + (ins VR512:$src1, i8imm:$src2), + "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, VEX_W; +let mayStore = 1 in +def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs), + (ins f256mem:$dst, VR512:$src1, i8imm:$src2), + "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>; +} + +let neverHasSideEffects = 1 in { +// -- 32x4 form -- +def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst), + (ins VR512:$src1, i8imm:$src2), + "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512; +def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs), + (ins i128mem:$dst, VR512:$src1, i8imm:$src2), + "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>; + +// -- 64x4 form -- +def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst), + (ins VR512:$src1, i8imm:$src2), + "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, VEX_W; +let mayStore = 1 in +def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs), + (ins i256mem:$dst, VR512:$src1, i8imm:$src2), + "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>; +} + +def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)), + (v4f32 (VEXTRACTF32x4rr VR512:$src1, + (EXTRACT_get_vextract128_imm VR128X:$ext)))>; + +def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)), + (v4i32 (VEXTRACTF32x4rr VR512:$src1, + (EXTRACT_get_vextract128_imm VR128X:$ext)))>; + +def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)), + (v2f64 (VEXTRACTF32x4rr VR512:$src1, + (EXTRACT_get_vextract128_imm VR128X:$ext)))>; + +def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)), + (v2i64 (VEXTRACTI32x4rr VR512:$src1, + (EXTRACT_get_vextract128_imm VR128X:$ext)))>; + + +def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)), + (v8f32 (VEXTRACTF64x4rr VR512:$src1, + (EXTRACT_get_vextract256_imm VR256X:$ext)))>; + +def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)), + (v8i32 (VEXTRACTI64x4rr VR512:$src1, + (EXTRACT_get_vextract256_imm VR256X:$ext)))>; + +def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)), + (v4f64 (VEXTRACTF64x4rr VR512:$src1, + (EXTRACT_get_vextract256_imm VR256X:$ext)))>; + +def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)), + (v4i64 (VEXTRACTI64x4rr VR512:$src1, + (EXTRACT_get_vextract256_imm VR256X:$ext)))>; + +// A 256-bit subvector extract from the first 512-bit vector position +// is a subregister copy that needs no instruction. +def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))), + (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>; +def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))), + (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>; +def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))), + (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>; +def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))), + (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>; + +// zmm -> xmm +def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))), + (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>; +def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))), + (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>; +def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))), + (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>; +def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))), + (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>; + + +// A 128-bit subvector insert to the first 512-bit vector position +// is a subregister copy that needs no instruction. +def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)), + (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), + (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), + sub_ymm)>; +def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)), + (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), + (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), + sub_ymm)>; +def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)), + (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), + sub_ymm)>; +def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)), + (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), + (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm), + sub_ymm)>; + +def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; +def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; +def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; +def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)), + (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>; + +// vextractps - extract 32 bits from XMM +def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst), + (ins VR128X:$src1, u32u8imm:$src2), + "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>, + EVEX; + +def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs), + (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2), + "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2), + addr:$dst)]>, EVEX; + +//===---------------------------------------------------------------------===// +// AVX-512 BROADCAST +//--- +multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr, + RegisterClass DestRC, + RegisterClass SrcRC, X86MemOperand x86memop> { + def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + []>, EVEX; + def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),[]>, EVEX; +} +let ExeDomain = SSEPackedSingle in { + defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss{z}", VR512, + VR128X, f32mem>, + EVEX_V512, EVEX_CD8<32, CD8VT1>; +} + +let ExeDomain = SSEPackedDouble in { + defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd{z}", VR512, + VR128X, f64mem>, + EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>; +} + +def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))), + (VBROADCASTSSZrm addr:$src)>; +def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))), + (VBROADCASTSDZrm addr:$src)>; + +multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr, + RegisterClass SrcRC, RegisterClass KRC> { + def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + []>, EVEX, EVEX_V512; + def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), + (ins KRC:$mask, SrcRC:$src), + !strconcat(OpcodeStr, + "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"), + []>, EVEX, EVEX_V512, EVEX_KZ; +} + +defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>; +defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>, + VEX_W; + +def : Pat <(v16i32 (X86vzext VK16WM:$mask)), + (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>; + +def : Pat <(v8i64 (X86vzext VK8WM:$mask)), + (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>; + +def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))), + (VPBROADCASTDrZrr GR32:$src)>; +def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))), + (VPBROADCASTQrZrr GR64:$src)>; + +multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr, + X86MemOperand x86memop, PatFrag ld_frag, + RegisterClass DstRC, ValueType OpVT, ValueType SrcVT, + RegisterClass KRC> { + def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, + (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX; + def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask, + VR128X:$src), + !strconcat(OpcodeStr, + "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"), + [(set DstRC:$dst, + (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>, + EVEX, EVEX_KZ; + def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, + (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX; + def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask, + x86memop:$src), + !strconcat(OpcodeStr, + "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"), + [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask, + (ld_frag addr:$src))))]>, EVEX, EVEX_KZ; +} + +defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem, + loadi32, VR512, v16i32, v4i32, VK16WM>, + EVEX_V512, EVEX_CD8<32, CD8VT1>; +defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem, + loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W, + EVEX_CD8<64, CD8VT1>; + +def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))), + (VBROADCASTSSZrr VR128X:$src)>; +def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))), + (VBROADCASTSDZrr VR128X:$src)>; + +// Provide fallback in case the load node that is used in the patterns above +// is used by additional users, which prevents the pattern selection. +def : Pat<(v16f32 (X86VBroadcast FR32X:$src)), + (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>; +def : Pat<(v8f64 (X86VBroadcast FR64X:$src)), + (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>; + + +let Predicates = [HasAVX512] in { +def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))), + (EXTRACT_SUBREG + (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), + addr:$src)), sub_ymm)>; +} +//===----------------------------------------------------------------------===// +// AVX-512 BROADCAST MASK TO VECTOR REGISTER +//--- + +multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr, + RegisterClass DstRC, RegisterClass KRC, + ValueType OpVT, ValueType SrcVT> { +def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + []>, EVEX; +} + +defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512, + VK16, v16i32, v16i1>, EVEX_V512; +defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512, + VK8, v8i64, v8i1>, EVEX_V512, VEX_W; + +// Mask register copy, including +// - copy between mask registers +// - load/store mask registers +// - copy from GPR to mask register and vice versa +// +multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk, + string OpcodeStr, RegisterClass KRC, + ValueType vt, X86MemOperand x86memop> { + let neverHasSideEffects = 1 in { + def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; + let mayLoad = 1 in + def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + [(set KRC:$dst, (vt (load addr:$src)))]>; + let mayStore = 1 in + def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; + } +} + +multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk, + string OpcodeStr, + RegisterClass KRC, RegisterClass GRC> { + let neverHasSideEffects = 1 in { + def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; + def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; + } +} + +let Predicates = [HasAVX512] in { + defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>, + VEX, TB; + defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>, + VEX, TB; +} + +let Predicates = [HasAVX512] in { + // GR16 from/to 16-bit mask + def : Pat<(v16i1 (bitconvert (i16 GR16:$src))), + (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>; + def : Pat<(i16 (bitconvert (v16i1 VK16:$src))), + (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>; + + // Store kreg in memory + def : Pat<(store (v16i1 VK16:$src), addr:$dst), + (KMOVWmk addr:$dst, VK16:$src)>; + + def : Pat<(store (v8i1 VK8:$src), addr:$dst), + (KMOVWmk addr:$dst, (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16)))>; +} +// With AVX-512 only, 8-bit mask is promoted to 16-bit mask. +let Predicates = [HasAVX512] in { + // GR from/to 8-bit mask without native support + def : Pat<(v8i1 (bitconvert (i8 GR8:$src))), + (COPY_TO_REGCLASS + (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)), + VK8)>; + def : Pat<(i8 (bitconvert (v8i1 VK8:$src))), + (EXTRACT_SUBREG + (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)), + sub_8bit)>; +} + +// Mask unary operation +// - KNOT +multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr, + RegisterClass KRC, SDPatternOperator OpNode> { + let Predicates = [HasAVX512] in + def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + [(set KRC:$dst, (OpNode KRC:$src))]>; +} + +multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr, + SDPatternOperator OpNode> { + defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>, + VEX, TB; +} + +defm KNOT : avx512_mask_unop_w<0x44, "knot", not>; + +def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>; +def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), + (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>; + +// With AVX-512, 8-bit mask is promoted to 16-bit mask. +def : Pat<(not VK8:$src), + (COPY_TO_REGCLASS + (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>; + +// Mask binary operation +// - KADD, KAND, KANDN, KOR, KXNOR, KXOR +multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr, + RegisterClass KRC, SDPatternOperator OpNode> { + let Predicates = [HasAVX512] in + def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>; +} + +multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr, + SDPatternOperator OpNode> { + defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>, + VEX_4V, VEX_L, TB; +} + +def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>; +def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>; + +let isCommutable = 1 in { + defm KADD : avx512_mask_binop_w<0x4a, "kadd", add>; + defm KAND : avx512_mask_binop_w<0x41, "kand", and>; + let isCommutable = 0 in + defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>; + defm KOR : avx512_mask_binop_w<0x45, "kor", or>; + defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>; + defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>; +} + +multiclass avx512_mask_binop_int<string IntName, string InstName> { + let Predicates = [HasAVX512] in + def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1") + VK16:$src1, VK16:$src2), + (!cast<Instruction>(InstName##"Wrr") VK16:$src1, VK16:$src2)>; +} + +defm : avx512_mask_binop_int<"kadd", "KADD">; +defm : avx512_mask_binop_int<"kand", "KAND">; +defm : avx512_mask_binop_int<"kandn", "KANDN">; +defm : avx512_mask_binop_int<"kor", "KOR">; +defm : avx512_mask_binop_int<"kxnor", "KXNOR">; +defm : avx512_mask_binop_int<"kxor", "KXOR">; +// With AVX-512, 8-bit mask is promoted to 16-bit mask. +multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> { + let Predicates = [HasAVX512] in + def : Pat<(OpNode VK8:$src1, VK8:$src2), + (COPY_TO_REGCLASS + (Inst (COPY_TO_REGCLASS VK8:$src1, VK16), + (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>; +} + +defm : avx512_binop_pat<and, KANDWrr>; +defm : avx512_binop_pat<andn, KANDNWrr>; +defm : avx512_binop_pat<or, KORWrr>; +defm : avx512_binop_pat<xnor, KXNORWrr>; +defm : avx512_binop_pat<xor, KXORWrr>; + +// Mask unpacking +multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr, + RegisterClass KRC1, RegisterClass KRC2> { + let Predicates = [HasAVX512] in + def rr : I<opc, MRMSrcReg, (outs KRC1:$dst), (ins KRC2:$src1, KRC2:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; +} + +multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> { + defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16, VK8>, + VEX_4V, VEX_L, OpSize, TB; +} + +defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">; + +multiclass avx512_mask_unpck_int<string IntName, string InstName> { + let Predicates = [HasAVX512] in + def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1") + VK8:$src1, VK8:$src2), + (!cast<Instruction>(InstName##"BWrr") VK8:$src1, VK8:$src2)>; +} + +defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">; +// Mask bit testing +multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC, + SDNode OpNode> { + let Predicates = [HasAVX512], Defs = [EFLAGS] in + def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), + [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>; +} + +multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> { + defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>, + VEX, TB; +} + +defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>; +defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest>; + +// Mask shift +multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC, + SDNode OpNode> { + let Predicates = [HasAVX512] in + def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm), + !strconcat(OpcodeStr, + "\t{$imm, $src, $dst|$dst, $src, $imm}"), + [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>; +} + +multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr, + SDNode OpNode> { + defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>, + VEX, OpSize, TA, VEX_W; +} + +defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", shl>; +defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", srl>; + +// Mask setting all 0s or 1s +multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> { + let Predicates = [HasAVX512] in + let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in + def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "", + [(set KRC:$dst, (VT Val))]>; +} + +multiclass avx512_mask_setop_w<PatFrag Val> { + defm B : avx512_mask_setop<VK8, v8i1, Val>; + defm W : avx512_mask_setop<VK16, v16i1, Val>; +} + +defm KSET0 : avx512_mask_setop_w<immAllZerosV>; +defm KSET1 : avx512_mask_setop_w<immAllOnesV>; + +// With AVX-512 only, 8-bit mask is promoted to 16-bit mask. +let Predicates = [HasAVX512] in { + def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>; + def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>; +} +def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))), + (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>; + +def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))), + (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>; + +def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), + (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index fa2b2d8..9ce02ba 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -1041,13 +1041,13 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, } // Defs = [EFLAGS] def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; + "{$src, %al|al, $src}">; def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; + "{$src, %ax|ax, $src}">; def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; + "{$src, %eax|eax, $src}">; def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; + "{$src, %rax|rax, $src}">; } /// ArithBinOp_RFF - This is an arithmetic binary operator where the pattern is @@ -1112,13 +1112,13 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, } // Uses = [EFLAGS], Defs = [EFLAGS] def NAME#8i8 : BinOpAI_FF<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; + "{$src, %al|al, $src}">; def NAME#16i16 : BinOpAI_FF<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; + "{$src, %ax|ax, $src}">; def NAME#32i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; + "{$src, %eax|eax, $src}">; def NAME#64i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; + "{$src, %rax|rax, $src}">; } /// ArithBinOp_F - This is an arithmetic binary operator where the pattern is @@ -1179,13 +1179,13 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, } // Defs = [EFLAGS] def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; + "{$src, %al|al, $src}">; def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; + "{$src, %ax|ax, $src}">; def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; + "{$src, %eax|eax, $src}">; def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; + "{$src, %rax|rax, $src}">; } @@ -1253,13 +1253,13 @@ let isCompare = 1 in { } // Defs = [EFLAGS] def TEST8i8 : BinOpAI<0xA8, "test", Xi8 , AL, - "{$src, %al|AL, $src}">; + "{$src, %al|al, $src}">; def TEST16i16 : BinOpAI<0xA8, "test", Xi16, AX, - "{$src, %ax|AX, $src}">; + "{$src, %ax|ax, $src}">; def TEST32i32 : BinOpAI<0xA8, "test", Xi32, EAX, - "{$src, %eax|EAX, $src}">; + "{$src, %eax|eax, $src}">; def TEST64i32 : BinOpAI<0xA8, "test", Xi64, RAX, - "{$src, %rax|RAX, $src}">; + "{$src, %rax|rax, $src}">; } // isCompare //===----------------------------------------------------------------------===// @@ -1302,12 +1302,12 @@ let neverHasSideEffects = 1 in { let isCommutable = 1 in def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src), !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"), - [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMul]>; + [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMul, WriteIMulH]>; let mayLoad = 1 in def rm : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src), !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"), - [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMulLd]>; + [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMulLd, WriteIMulH]>; } } @@ -1336,7 +1336,7 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { def ADCX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "adcx{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8, OpSize; - + def ADCX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "adcx{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8, OpSize, REX_W, Requires<[In64BitMode]>; @@ -1361,7 +1361,7 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { def ADOX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "adox{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8XS; - + def ADOX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "adox{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8XS, REX_W, Requires<[In64BitMode]>; diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 8a7ee7d..8969946 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -129,12 +129,13 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), // The MSVC runtime contains an _ftol2 routine for converting floating-point // to integer values. It has a strange calling convention: the input is -// popped from the x87 stack, and the return value is given in EDX:EAX. No -// other registers (aside from flags) are touched. +// popped from the x87 stack, and the return value is given in EDX:EAX. ECX is +// used as a temporary register. No other registers (aside from flags) are +// touched. // Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80 // variant is unnecessary. -let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in { +let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in { def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src), "# win32 fptoui", [(X86WinFTOL RFP32:$src)]>, diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index 2224a08..7c37888 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -229,22 +229,22 @@ class FPrST0PInst<bits<8> o, string asm> // of some of the 'reverse' forms of the fsub and fdiv instructions. As such, // we have to put some 'r's in and take them out of weird places. def ADD_FST0r : FPST0rInst <0xC0, "fadd\t$op">; -def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, ST(0)}">; +def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, st(0)}">; def ADD_FPrST0 : FPrST0PInst<0xC0, "faddp\t$op">; def SUBR_FST0r : FPST0rInst <0xE8, "fsubr\t$op">; -def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, ST(0)}">; +def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, st(0)}">; def SUB_FPrST0 : FPrST0PInst<0xE8, "fsub{r}p\t$op">; def SUB_FST0r : FPST0rInst <0xE0, "fsub\t$op">; -def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, ST(0)}">; +def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, st(0)}">; def SUBR_FPrST0 : FPrST0PInst<0xE0, "fsub{|r}p\t$op">; def MUL_FST0r : FPST0rInst <0xC8, "fmul\t$op">; -def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, ST(0)}">; +def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, st(0)}">; def MUL_FPrST0 : FPrST0PInst<0xC8, "fmulp\t$op">; def DIVR_FST0r : FPST0rInst <0xF8, "fdivr\t$op">; -def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, ST(0)}">; +def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, st(0)}">; def DIV_FPrST0 : FPrST0PInst<0xF8, "fdiv{r}p\t$op">; def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">; -def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, ST(0)}">; +def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, st(0)}">; def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">; def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">; @@ -337,21 +337,21 @@ defm CMOVNP : FPCMov<X86_COND_NP>; let Predicates = [HasCMov] in { // These are not factored because there's no clean way to pass DA/DB. def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins), - "fcmovb\t{$op, %st(0)|ST(0), $op}">, DA; + "fcmovb\t{$op, %st(0)|st(0), $op}">, DA; def CMOVBE_F : FPI<0xD0, AddRegFrm, (outs RST:$op), (ins), - "fcmovbe\t{$op, %st(0)|ST(0), $op}">, DA; + "fcmovbe\t{$op, %st(0)|st(0), $op}">, DA; def CMOVE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins), - "fcmove\t{$op, %st(0)|ST(0), $op}">, DA; + "fcmove\t{$op, %st(0)|st(0), $op}">, DA; def CMOVP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins), - "fcmovu\t {$op, %st(0)|ST(0), $op}">, DA; + "fcmovu\t{$op, %st(0)|st(0), $op}">, DA; def CMOVNB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins), - "fcmovnb\t{$op, %st(0)|ST(0), $op}">, DB; + "fcmovnb\t{$op, %st(0)|st(0), $op}">, DB; def CMOVNBE_F: FPI<0xD0, AddRegFrm, (outs RST:$op), (ins), - "fcmovnbe\t{$op, %st(0)|ST(0), $op}">, DB; + "fcmovnbe\t{$op, %st(0)|st(0), $op}">, DB; def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins), - "fcmovne\t{$op, %st(0)|ST(0), $op}">, DB; + "fcmovne\t{$op, %st(0)|st(0), $op}">, DB; def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins), - "fcmovnu\t{$op, %st(0)|ST(0), $op}">, DB; + "fcmovnu\t{$op, %st(0)|st(0), $op}">, DB; } // Predicates = [HasCMov] // Floating point loads & stores. @@ -578,7 +578,7 @@ def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), let SchedRW = [WriteALU] in { let Defs = [AX], Uses = [FPSW] in def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags - (outs), (ins), "fnstsw %ax", + (outs), (ins), "fnstsw\t{%ax|ax}", [(set AX, (X86fp_stsw FPSW))], IIC_FNSTSW>, DF; def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 1432414..64018b3 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -96,6 +96,20 @@ def SSEPackedSingle : Domain<1>; def SSEPackedDouble : Domain<2>; def SSEPackedInt : Domain<3>; +// Class specifying the vector form of the decompressed +// displacement of 8-bit. +class CD8VForm<bits<3> val> { + bits<3> Value = val; +} +def CD8VF : CD8VForm<0>; // v := VL +def CD8VH : CD8VForm<1>; // v := VL/2 +def CD8VQ : CD8VForm<2>; // v := VL/4 +def CD8VO : CD8VForm<3>; // v := VL/8 +def CD8VT1 : CD8VForm<4>; // v := 1 +def CD8VT2 : CD8VForm<5>; // v := 2 +def CD8VT4 : CD8VForm<6>; // v := 4 +def CD8VT8 : CD8VForm<7>; // v := 8 + // Prefix byte classes which are used to indicate to the ad-hoc machine code // emitter that various prefix bytes are required. class OpSize { bit hasOpSizePrefix = 1; } @@ -132,6 +146,19 @@ class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; } class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; } class VEX_L { bit hasVEX_L = 1; } class VEX_LIG { bit ignoresVEX_L = 1; } +class EVEX : VEX { bit hasEVEXPrefix = 1; } +class EVEX_4V : VEX_4V { bit hasEVEXPrefix = 1; } +class EVEX_K { bit hasEVEX_K = 1; } +class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; } +class EVEX_B { bit hasEVEX_B = 1; } +class EVEX_V512 { bit hasEVEX_L2 = 1; bit hasVEX_L = 0; } +class EVEX_CD8<int esize, CD8VForm form> { + bits<2> EVEX_CD8E = !if(!eq(esize, 8), 0b00, + !if(!eq(esize, 16), 0b01, + !if(!eq(esize, 32), 0b10, + !if(!eq(esize, 64), 0b11, ?)))); + bits<3> EVEX_CD8V = form.Value; +} class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; } class MemOp4 { bit hasMemOp4Prefix = 1; } class XOP { bit hasXOP_Prefix = 1; } @@ -177,6 +204,13 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins, // to be encoded in a immediate field? bit hasVEX_L = 0; // Does this inst use large (256-bit) registers? bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit + bit hasEVEXPrefix = 0; // Does this inst require EVEX form? + bit hasEVEX_K = 0; // Does this inst require masking? + bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field? + bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field? + bit hasEVEX_B = 0; // Does this inst set the EVEX_B field? + bits<2> EVEX_CD8E = 0; // Compressed disp8 form - element-size. + bits<3> EVEX_CD8V = 0; // Compressed disp8 form - vector-width. bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding? bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix? @@ -200,9 +234,16 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins, let TSFlags{37} = hasVEX_i8ImmReg; let TSFlags{38} = hasVEX_L; let TSFlags{39} = ignoresVEX_L; - let TSFlags{40} = has3DNow0F0FOpcode; - let TSFlags{41} = hasMemOp4Prefix; - let TSFlags{42} = hasXOP_Prefix; + let TSFlags{40} = hasEVEXPrefix; + let TSFlags{41} = hasEVEX_K; + let TSFlags{42} = hasEVEX_Z; + let TSFlags{43} = hasEVEX_L2; + let TSFlags{44} = hasEVEX_B; + let TSFlags{46-45} = EVEX_CD8E; + let TSFlags{49-47} = EVEX_CD8V; + let TSFlags{50} = has3DNow0F0FOpcode; + let TSFlags{51} = hasMemOp4Prefix; + let TSFlags{52} = hasXOP_Prefix; } class PseudoI<dag oops, dag iops, list<dag> pattern> @@ -553,6 +594,74 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, Requires<[HasAVX2]>; + +// AVX-512 Instruction Templates: +// Instructions introduced in AVX-512 (no SSE equivalent forms) +// +// AVX5128I - AVX-512 instructions with T8 and OpSize prefix. +// AVX512AIi8 - AVX-512 instructions with TA, OpSize prefix and ImmT = Imm8. +// AVX512PDI - AVX-512 instructions with TB, OpSize, double packed. +// AVX512PSI - AVX-512 instructions with TB, single packed. +// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes. +// AVX512XSI - AVX-512 instructions with XS prefix, generic domain. +// AVX512BI - AVX-512 instructions with TB, OpSize, int packed domain. +// AVX512SI - AVX-512 scalar instructions with TB and OpSize prefixes. + +class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize, + Requires<[HasAVX512]>; +class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8XS, + Requires<[HasAVX512]>; +class AVX512XSI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin>, XS, + Requires<[HasAVX512]>; +class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, XD, + Requires<[HasAVX512]>; +class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize, + Requires<[HasAVX512]>; +class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize, + Requires<[HasAVX512]>; +class AVX512SI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize, + Requires<[HasAVX512]>; +class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, + Requires<[HasAVX512]>; +class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, + Requires<[HasAVX512]>; +class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, + OpSize, Requires<[HasAVX512]>; +class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB, + Requires<[HasAVX512]>; +class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary> + : Ii8<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>; +class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>; +class AVX512FMA3<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag>pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin>, T8, + OpSize, EVEX_4V, Requires<[HasAVX512]>; + // AES Instruction Templates: // // AES8I @@ -628,6 +737,13 @@ class RIi64<bits<8> o, Format f, dag outs, dag ins, string asm, let CodeSize = 3; } +class RIi64_NOREX<bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : X86Inst<o, f, Imm64, outs, ins, asm, itin> { + let Pattern = pattern; + let CodeSize = 3; +} + class RSSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> : SSI<o, F, outs, ins, asm, pattern, itin>, REX_W; diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 2a72fb6..0b51521 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -47,6 +47,8 @@ def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp, [SDNPCommutative, SDNPAssociative]>; def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp, [SDNPCommutative, SDNPAssociative]>; +def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp, + [SDNPCommutative, SDNPAssociative]>; def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>; def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>; def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>; @@ -136,6 +138,8 @@ def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>; def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>; +def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>; +def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>; def X86pmuludq : SDNode<"X86ISD::PMULUDQ", SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, @@ -153,7 +157,9 @@ def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>, def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisInt<3>]>; -def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>; +def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>; +def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>; + def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>; @@ -192,6 +198,7 @@ def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>; def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>; def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>; +def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>; def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>; def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>; @@ -405,28 +412,54 @@ def BYTE_imm : SDNodeXForm<imm, [{ return getI32Imm(N->getZExtValue() >> 3); }]>; -// EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index -// to VEXTRACTF128 imm. -def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{ - return getI8Imm(X86::getExtractVEXTRACTF128Immediate(N)); +// EXTRACT_get_vextract128_imm xform function: convert extract_subvector index +// to VEXTRACTF128/VEXTRACTI128 imm. +def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{ + return getI8Imm(X86::getExtractVEXTRACT128Immediate(N)); +}]>; + +// INSERT_get_vinsert128_imm xform function: convert insert_subvector index to +// VINSERTF128/VINSERTI128 imm. +def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{ + return getI8Imm(X86::getInsertVINSERT128Immediate(N)); }]>; -// INSERT_get_vinsertf128_imm xform function: convert insert_subvector index to -// VINSERTF128 imm. -def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{ - return getI8Imm(X86::getInsertVINSERTF128Immediate(N)); +// EXTRACT_get_vextract256_imm xform function: convert extract_subvector index +// to VEXTRACTF64x4 imm. +def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{ + return getI8Imm(X86::getExtractVEXTRACT256Immediate(N)); }]>; -def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index), +// INSERT_get_vinsert256_imm xform function: convert insert_subvector index to +// VINSERTF64x4 imm. +def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{ + return getI8Imm(X86::getInsertVINSERT256Immediate(N)); +}]>; + +def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index), + (extract_subvector node:$bigvec, + node:$index), [{ + return X86::isVEXTRACT128Index(N); +}], EXTRACT_get_vextract128_imm>; + +def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec, + node:$index), + (insert_subvector node:$bigvec, node:$smallvec, + node:$index), [{ + return X86::isVINSERT128Index(N); +}], INSERT_get_vinsert128_imm>; + + +def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index), (extract_subvector node:$bigvec, node:$index), [{ - return X86::isVEXTRACTF128Index(N); -}], EXTRACT_get_vextractf128_imm>; + return X86::isVEXTRACT256Index(N); +}], EXTRACT_get_vextract256_imm>; -def vinsertf128_insert : PatFrag<(ops node:$bigvec, node:$smallvec, +def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec, node:$index), (insert_subvector node:$bigvec, node:$smallvec, node:$index), [{ - return X86::isVINSERTF128Index(N); -}], INSERT_get_vinsertf128_imm>; + return X86::isVINSERT256Index(N); +}], INSERT_get_vinsert256_imm>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index df7b721..0443a93 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -3733,19 +3733,6 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { return false; } -MachineInstr* -X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const { - X86AddressMode AM; - AM.BaseType = X86AddressMode::FrameIndexBase; - AM.Base.FrameIndex = FrameIx; - MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE)); - addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, const SmallVectorImpl<MachineOperand> &MOs, MachineInstr *MI, @@ -4660,6 +4647,167 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, return true; } +bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First, + MachineInstr *Second) const { + // Check if this processor supports macro-fusion. Since this is a minor + // heuristic, we haven't specifically reserved a feature. hasAVX is a decent + // proxy for SandyBridge+. + if (!TM.getSubtarget<X86Subtarget>().hasAVX()) + return false; + + enum { + FuseTest, + FuseCmp, + FuseInc + } FuseKind; + + switch(Second->getOpcode()) { + default: + return false; + case X86::JE_4: + case X86::JNE_4: + case X86::JL_4: + case X86::JLE_4: + case X86::JG_4: + case X86::JGE_4: + FuseKind = FuseInc; + break; + case X86::JB_4: + case X86::JBE_4: + case X86::JA_4: + case X86::JAE_4: + FuseKind = FuseCmp; + break; + case X86::JS_4: + case X86::JNS_4: + case X86::JP_4: + case X86::JNP_4: + case X86::JO_4: + case X86::JNO_4: + FuseKind = FuseTest; + break; + } + switch (First->getOpcode()) { + default: + return false; + case X86::TEST8rr: + case X86::TEST16rr: + case X86::TEST32rr: + case X86::TEST64rr: + case X86::TEST8ri: + case X86::TEST16ri: + case X86::TEST32ri: + case X86::TEST32i32: + case X86::TEST64i32: + case X86::TEST64ri32: + case X86::TEST8rm: + case X86::TEST16rm: + case X86::TEST32rm: + case X86::TEST64rm: + case X86::AND16i16: + case X86::AND16ri: + case X86::AND16ri8: + case X86::AND16rm: + case X86::AND16rr: + case X86::AND32i32: + case X86::AND32ri: + case X86::AND32ri8: + case X86::AND32rm: + case X86::AND32rr: + case X86::AND64i32: + case X86::AND64ri32: + case X86::AND64ri8: + case X86::AND64rm: + case X86::AND64rr: + case X86::AND8i8: + case X86::AND8ri: + case X86::AND8rm: + case X86::AND8rr: + return true; + case X86::CMP16i16: + case X86::CMP16ri: + case X86::CMP16ri8: + case X86::CMP16rm: + case X86::CMP16rr: + case X86::CMP32i32: + case X86::CMP32ri: + case X86::CMP32ri8: + case X86::CMP32rm: + case X86::CMP32rr: + case X86::CMP64i32: + case X86::CMP64ri32: + case X86::CMP64ri8: + case X86::CMP64rm: + case X86::CMP64rr: + case X86::CMP8i8: + case X86::CMP8ri: + case X86::CMP8rm: + case X86::CMP8rr: + case X86::ADD16i16: + case X86::ADD16ri: + case X86::ADD16ri8: + case X86::ADD16ri8_DB: + case X86::ADD16ri_DB: + case X86::ADD16rm: + case X86::ADD16rr: + case X86::ADD16rr_DB: + case X86::ADD32i32: + case X86::ADD32ri: + case X86::ADD32ri8: + case X86::ADD32ri8_DB: + case X86::ADD32ri_DB: + case X86::ADD32rm: + case X86::ADD32rr: + case X86::ADD32rr_DB: + case X86::ADD64i32: + case X86::ADD64ri32: + case X86::ADD64ri32_DB: + case X86::ADD64ri8: + case X86::ADD64ri8_DB: + case X86::ADD64rm: + case X86::ADD64rr: + case X86::ADD64rr_DB: + case X86::ADD8i8: + case X86::ADD8mi: + case X86::ADD8mr: + case X86::ADD8ri: + case X86::ADD8rm: + case X86::ADD8rr: + case X86::SUB16i16: + case X86::SUB16ri: + case X86::SUB16ri8: + case X86::SUB16rm: + case X86::SUB16rr: + case X86::SUB32i32: + case X86::SUB32ri: + case X86::SUB32ri8: + case X86::SUB32rm: + case X86::SUB32rr: + case X86::SUB64i32: + case X86::SUB64ri32: + case X86::SUB64ri8: + case X86::SUB64rm: + case X86::SUB64rr: + case X86::SUB8i8: + case X86::SUB8ri: + case X86::SUB8rm: + case X86::SUB8rr: + return FuseKind == FuseCmp || FuseKind == FuseInc; + case X86::INC16r: + case X86::INC32r: + case X86::INC64_16r: + case X86::INC64_32r: + case X86::INC64r: + case X86::INC8r: + case X86::DEC16r: + case X86::DEC32r: + case X86::DEC64_16r: + case X86::DEC64_32r: + case X86::DEC64r: + case X86::DEC8r: + return FuseKind == FuseInc; + } +} bool X86InstrInfo:: ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 332874f..a0d1ba7 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -275,12 +275,6 @@ public: virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const; - virtual - MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; - /// foldMemoryOperand - If this target supports it, fold a load or store of /// the specified stack slot into the specified machine instruction for the /// specified operand(s). If this is possible, the target should perform the @@ -345,6 +339,9 @@ public: int64_t Offset1, int64_t Offset2, unsigned NumLoads) const; + virtual bool shouldScheduleAdjacent(MachineInstr* First, + MachineInstr *Second) const LLVM_OVERRIDE; + virtual void getNoopForMachoTarget(MCInst &NopInst) const; virtual diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 817bd6c..0960a2a 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -317,6 +317,16 @@ def X86MemVY64Operand : AsmOperandClass { let Name = "MemVY64"; let PredicateMethod = "isMemVY64"; } +def X86MemVZ64Operand : AsmOperandClass { + let Name = "MemVZ64"; let PredicateMethod = "isMemVZ64"; +} +def X86MemVZ32Operand : AsmOperandClass { + let Name = "MemVZ32"; let PredicateMethod = "isMemVZ32"; +} +def X86Mem512AsmOperand : AsmOperandClass { + let Name = "Mem512"; let PredicateMethod = "isMem512"; +} + def X86AbsMemAsmOperand : AsmOperandClass { let Name = "AbsMem"; let SuperClasses = [X86MemAsmOperand]; @@ -345,6 +355,8 @@ def i128mem : X86MemOperand<"printi128mem"> { let ParserMatchClass = X86Mem128AsmOperand; } def i256mem : X86MemOperand<"printi256mem"> { let ParserMatchClass = X86Mem256AsmOperand; } +def i512mem : X86MemOperand<"printi512mem"> { + let ParserMatchClass = X86Mem512AsmOperand; } def f32mem : X86MemOperand<"printf32mem"> { let ParserMatchClass = X86Mem32AsmOperand; } def f64mem : X86MemOperand<"printf64mem"> { @@ -355,6 +367,12 @@ def f128mem : X86MemOperand<"printf128mem"> { let ParserMatchClass = X86Mem128AsmOperand; } def f256mem : X86MemOperand<"printf256mem">{ let ParserMatchClass = X86Mem256AsmOperand; } +def f512mem : X86MemOperand<"printf512mem">{ + let ParserMatchClass = X86Mem512AsmOperand; } +def v512mem : Operand<iPTR> { + let PrintMethod = "printf512mem"; + let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm); + let ParserMatchClass = X86Mem512AsmOperand; } // Gather mem operands def vx32mem : X86MemOperand<"printi32mem">{ @@ -369,6 +387,15 @@ def vx64mem : X86MemOperand<"printi64mem">{ def vy64mem : X86MemOperand<"printi64mem">{ let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm); let ParserMatchClass = X86MemVY64Operand; } +def vy64xmem : X86MemOperand<"printi64mem">{ + let MIOperandInfo = (ops ptr_rc, i8imm, VR256X, i32imm, i8imm); + let ParserMatchClass = X86MemVY64Operand; } +def vz32mem : X86MemOperand<"printi32mem">{ + let MIOperandInfo = (ops ptr_rc, i16imm, VR512, i32imm, i8imm); + let ParserMatchClass = X86MemVZ32Operand; } +def vz64mem : X86MemOperand<"printi64mem">{ + let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm); + let ParserMatchClass = X86MemVZ64Operand; } } // A version of i8mem for use on x86-64 that uses GR64_NOREX instead of @@ -590,11 +617,19 @@ def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">; def HasAVX : Predicate<"Subtarget->hasAVX()">; def HasAVX2 : Predicate<"Subtarget->hasAVX2()">; def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">; +def HasAVX512 : Predicate<"Subtarget->hasAVX512()">; +def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">; +def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">; +def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">; +def HasCDI : Predicate<"Subtarget->hasCDI()">; +def HasPFI : Predicate<"Subtarget->hasPFI()">; +def HasEMI : Predicate<"Subtarget->hasERI()">; def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">; def HasAES : Predicate<"Subtarget->hasAES()">; def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">; def HasFMA : Predicate<"Subtarget->hasFMA()">; +def UseFMAOnAVX : Predicate<"Subtarget->hasFMA() && !Subtarget->hasAVX512()">; def HasFMA4 : Predicate<"Subtarget->hasFMA4()">; def HasXOP : Predicate<"Subtarget->hasXOP()">; def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">; @@ -803,11 +838,11 @@ def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [], IIC_POP_REG>; def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [], IIC_POP_REG>, OpSize; -def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", [], +def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", [], IIC_POP_MEM>, OpSize; def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [], IIC_POP_REG>; -def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", [], +def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [], IIC_POP_MEM>; def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize; @@ -851,7 +886,7 @@ def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>; def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>; -def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", [], +def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", [], IIC_POP_MEM>; } // mayLoad, SchedRW let mayStore = 1, SchedRW = [WriteStore] in { @@ -1041,40 +1076,52 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), /// 32-bit offset from the PC. These are only valid in x86-32 mode. let SchedRW = [WriteALU] in { def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src), - "mov{b}\t{$src, %al|AL, $src}", [], IIC_MOV_MEM>, + "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src), - "mov{w}\t{$src, %ax|AL, $src}", [], IIC_MOV_MEM>, OpSize, + "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, OpSize, Requires<[In32BitMode]>; def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src), - "mov{l}\t{$src, %eax|EAX, $src}", [], IIC_MOV_MEM>, + "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins), - "mov{b}\t{%al, $dst|$dst, AL}", [], IIC_MOV_MEM>, + "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins), - "mov{w}\t{%ax, $dst|$dst, AL}", [], IIC_MOV_MEM>, OpSize, + "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, OpSize, Requires<[In32BitMode]>; def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins), - "mov{l}\t{%eax, $dst|$dst, EAX}", [], IIC_MOV_MEM>, + "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; } -// FIXME: These definitions are utterly broken -// Just leave them commented out for now because they're useless outside -// of the large code model, and most compilers won't generate the instructions -// in question. -/* -def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src), - "mov{q}\t{$src, %rax|RAX, $src}", []>; -def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src), - "mov{q}\t{$src, %rax|RAX, $src}", []>; -def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins), - "mov{q}\t{%rax, $dst|$dst, RAX}", []>; -def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), - "mov{q}\t{%rax, $dst|$dst, RAX}", []>; -*/ - +// These forms all have full 64-bit absolute addresses in their instructions +// and use the movabs mnemonic to indicate this specific form. +def MOV64o8a : RIi64_NOREX<0xA0, RawFrm, (outs), (ins offset64:$src), + "movabs{b}\t{$src, %al|al, $src}", []>, + Requires<[In64BitMode]>; +def MOV64o16a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset64:$src), + "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize, + Requires<[In64BitMode]>; +def MOV64o32a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset64:$src), + "movabs{l}\t{$src, %eax|eax, $src}", []>, + Requires<[In64BitMode]>; +def MOV64o64a : RIi64<0xA1, RawFrm, (outs), (ins offset64:$src), + "movabs{q}\t{$src, %rax|rax, $src}", []>, + Requires<[In64BitMode]>; + +def MOV64ao8 : RIi64_NOREX<0xA2, RawFrm, (outs offset64:$dst), (ins), + "movabs{b}\t{%al, $dst|$dst, al}", []>, + Requires<[In64BitMode]>; +def MOV64ao16 : RIi64_NOREX<0xA3, RawFrm, (outs offset64:$dst), (ins), + "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize, + Requires<[In64BitMode]>; +def MOV64ao32 : RIi64_NOREX<0xA3, RawFrm, (outs offset64:$dst), (ins), + "movabs{l}\t{%eax, $dst|$dst, eax}", []>, + Requires<[In64BitMode]>; +def MOV64ao64 : RIi64<0xA3, RawFrm, (outs offset64:$dst), (ins), + "movabs{q}\t{%rax, $dst|$dst, rax}", []>, + Requires<[In64BitMode]>; let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in { def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src), @@ -1407,17 +1454,17 @@ def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), // Swap between EAX and other registers. def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), - "xchg{w}\t{$src, %ax|AX, $src}", [], IIC_XCHG_REG>, OpSize; + "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize; def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), - "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>, + "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>, Requires<[In32BitMode]>; // Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding. // xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP. def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src), - "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>, + "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>, Requires<[In64BitMode]>; def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), - "xchg{q}\t{$src, %rax|RAX, $src}", [], IIC_XCHG_REG>; + "xchg{q}\t{$src, %rax|rax, $src}", [], IIC_XCHG_REG>; } // SchedRW let SchedRW = [WriteALU] in { @@ -1814,6 +1861,7 @@ include "X86InstrXOP.td" // SSE, MMX and 3DNow! vector support. include "X86InstrSSE.td" +include "X86InstrAVX512.td" include "X86InstrMMX.td" include "X86Instr3DNow.td" @@ -1921,29 +1969,31 @@ def : MnemonicAlias<"fucomip", "fucompi", "att">; def : MnemonicAlias<"fwait", "wait", "att">; -class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond> +class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond, + string VariantName> : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix), - !strconcat(Prefix, NewCond, Suffix)>; + !strconcat(Prefix, NewCond, Suffix), VariantName>; /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for /// example "setz" -> "sete". -multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix> { - def C : CondCodeAlias<Prefix, Suffix, "c", "b">; // setc -> setb - def Z : CondCodeAlias<Prefix, Suffix, "z" , "e">; // setz -> sete - def NA : CondCodeAlias<Prefix, Suffix, "na", "be">; // setna -> setbe - def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae">; // setnb -> setae - def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae">; // setnc -> setae - def NG : CondCodeAlias<Prefix, Suffix, "ng", "le">; // setng -> setle - def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge">; // setnl -> setge - def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne">; // setnz -> setne - def PE : CondCodeAlias<Prefix, Suffix, "pe", "p">; // setpe -> setp - def PO : CondCodeAlias<Prefix, Suffix, "po", "np">; // setpo -> setnp - - def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b">; // setnae -> setb - def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a">; // setnbe -> seta - def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l">; // setnge -> setl - def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g">; // setnle -> setg +multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix, + string V = ""> { + def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb + def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete + def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe + def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae + def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae + def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle + def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge + def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne + def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp + def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp + + def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb + def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta + def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl + def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg } // Aliases for set<CC> @@ -1951,9 +2001,11 @@ defm : IntegerCondCodeMnemonicAlias<"set", "">; // Aliases for j<CC> defm : IntegerCondCodeMnemonicAlias<"j", "">; // Aliases for cmov<CC>{w,l,q} -defm : IntegerCondCodeMnemonicAlias<"cmov", "w">; -defm : IntegerCondCodeMnemonicAlias<"cmov", "l">; -defm : IntegerCondCodeMnemonicAlias<"cmov", "q">; +defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">; +defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">; +defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">; +// No size suffix for intel-style asm. +defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">; //===----------------------------------------------------------------------===// @@ -1965,75 +2017,83 @@ def : InstAlias<"aad", (AAD8i8 10)>; def : InstAlias<"aam", (AAM8i8 10)>; // Disambiguate the mem/imm form of bt-without-a-suffix as btl. -def : InstAlias<"bt $imm, $mem", (BT32mi8 i32mem:$mem, i32i8imm:$imm)>; +// Likewise for btc/btr/bts. +def : InstAlias<"bt {$imm, $mem|$mem, $imm}", + (BT32mi8 i32mem:$mem, i32i8imm:$imm), 0>; +def : InstAlias<"btc {$imm, $mem|$mem, $imm}", + (BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0>; +def : InstAlias<"btr {$imm, $mem|$mem, $imm}", + (BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0>; +def : InstAlias<"bts {$imm, $mem|$mem, $imm}", + (BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0>; // clr aliases. -def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg)>; -def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg)>; -def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg)>; -def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg)>; +def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>; +def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg), 0>; +def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg), 0>; +def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg), 0>; // div and idiv aliases for explicit A register. -def : InstAlias<"divb $src, %al", (DIV8r GR8 :$src)>; -def : InstAlias<"divw $src, %ax", (DIV16r GR16:$src)>; -def : InstAlias<"divl $src, %eax", (DIV32r GR32:$src)>; -def : InstAlias<"divq $src, %rax", (DIV64r GR64:$src)>; -def : InstAlias<"divb $src, %al", (DIV8m i8mem :$src)>; -def : InstAlias<"divw $src, %ax", (DIV16m i16mem:$src)>; -def : InstAlias<"divl $src, %eax", (DIV32m i32mem:$src)>; -def : InstAlias<"divq $src, %rax", (DIV64m i64mem:$src)>; -def : InstAlias<"idivb $src, %al", (IDIV8r GR8 :$src)>; -def : InstAlias<"idivw $src, %ax", (IDIV16r GR16:$src)>; -def : InstAlias<"idivl $src, %eax", (IDIV32r GR32:$src)>; -def : InstAlias<"idivq $src, %rax", (IDIV64r GR64:$src)>; -def : InstAlias<"idivb $src, %al", (IDIV8m i8mem :$src)>; -def : InstAlias<"idivw $src, %ax", (IDIV16m i16mem:$src)>; -def : InstAlias<"idivl $src, %eax", (IDIV32m i32mem:$src)>; -def : InstAlias<"idivq $src, %rax", (IDIV64m i64mem:$src)>; +def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>; +def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>; +def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>; +def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>; +def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>; +def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>; +def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>; +def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>; +def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>; +def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>; +def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>; +def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>; +def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>; +def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>; +def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>; +def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>; // Various unary fpstack operations default to operating on on ST1. // For example, "fxch" -> "fxch %st(1)" def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>; -def : InstAlias<"fsubp", (SUBR_FPrST0 ST1)>; -def : InstAlias<"fsubrp", (SUB_FPrST0 ST1)>; -def : InstAlias<"fmulp", (MUL_FPrST0 ST1)>; -def : InstAlias<"fdivp", (DIVR_FPrST0 ST1)>; -def : InstAlias<"fdivrp", (DIV_FPrST0 ST1)>; -def : InstAlias<"fxch", (XCH_F ST1)>; -def : InstAlias<"fcom", (COM_FST0r ST1)>; -def : InstAlias<"fcomp", (COMP_FST0r ST1)>; -def : InstAlias<"fcomi", (COM_FIr ST1)>; -def : InstAlias<"fcompi", (COM_FIPr ST1)>; -def : InstAlias<"fucom", (UCOM_Fr ST1)>; -def : InstAlias<"fucomp", (UCOM_FPr ST1)>; -def : InstAlias<"fucomi", (UCOM_FIr ST1)>; -def : InstAlias<"fucompi", (UCOM_FIPr ST1)>; +def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>; +def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>; +def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>; +def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>; +def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>; +def : InstAlias<"fxch", (XCH_F ST1), 0>; +def : InstAlias<"fcom", (COM_FST0r ST1), 0>; +def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>; +def : InstAlias<"fcomi", (COM_FIr ST1), 0>; +def : InstAlias<"fcompi", (COM_FIPr ST1), 0>; +def : InstAlias<"fucom", (UCOM_Fr ST1), 0>; +def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>; +def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>; +def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>; // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op. // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with // gas. multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> { - def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"), + def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"), (Inst RST:$op), EmitAlias>; - def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"), + def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"), (Inst ST0), EmitAlias>; } defm : FpUnaryAlias<"fadd", ADD_FST0r>; defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>; defm : FpUnaryAlias<"fsub", SUB_FST0r>; -defm : FpUnaryAlias<"fsubp", SUBR_FPrST0>; +defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>; defm : FpUnaryAlias<"fsubr", SUBR_FST0r>; -defm : FpUnaryAlias<"fsubrp", SUB_FPrST0>; +defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>; defm : FpUnaryAlias<"fmul", MUL_FST0r>; defm : FpUnaryAlias<"fmulp", MUL_FPrST0>; defm : FpUnaryAlias<"fdiv", DIV_FST0r>; -defm : FpUnaryAlias<"fdivp", DIVR_FPrST0>; +defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>; defm : FpUnaryAlias<"fdivr", DIVR_FST0r>; -defm : FpUnaryAlias<"fdivrp", DIV_FPrST0>; +defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>; defm : FpUnaryAlias<"fcomi", COM_FIr, 0>; defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>; defm : FpUnaryAlias<"fcompi", COM_FIPr>; @@ -2043,16 +2103,16 @@ defm : FpUnaryAlias<"fucompi", UCOM_FIPr>; // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they // commute. We also allow fdiv[r]p/fsubrp even though they don't commute, // solely because gas supports it. -def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op), 0>; -def : InstAlias<"fmulp %st(0), $op", (MUL_FPrST0 RST:$op)>; -def : InstAlias<"fsubp %st(0), $op", (SUBR_FPrST0 RST:$op)>; -def : InstAlias<"fsubrp %st(0), $op", (SUB_FPrST0 RST:$op)>; -def : InstAlias<"fdivp %st(0), $op", (DIVR_FPrST0 RST:$op)>; -def : InstAlias<"fdivrp %st(0), $op", (DIV_FPrST0 RST:$op)>; +def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>; +def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>; +def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>; +def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>; +def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>; +def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>; // We accept "fnstsw %eax" even though it only writes %ax. -def : InstAlias<"fnstsw %eax", (FNSTSW16r)>; -def : InstAlias<"fnstsw %al" , (FNSTSW16r)>; +def : InstAlias<"fnstsw\t{%eax|eax}", (FNSTSW16r)>; +def : InstAlias<"fnstsw\t{%al|al}" , (FNSTSW16r)>; def : InstAlias<"fnstsw" , (FNSTSW16r)>; // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but @@ -2071,12 +2131,12 @@ def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>; def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>; // inb %dx -> inb %al, %dx -def : InstAlias<"inb %dx", (IN8rr)>; -def : InstAlias<"inw %dx", (IN16rr)>; -def : InstAlias<"inl %dx", (IN32rr)>; -def : InstAlias<"inb $port", (IN8ri i8imm:$port)>; -def : InstAlias<"inw $port", (IN16ri i8imm:$port)>; -def : InstAlias<"inl $port", (IN32ri i8imm:$port)>; +def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>; +def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>; +def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>; +def : InstAlias<"inb\t$port", (IN8ri i8imm:$port), 0>; +def : InstAlias<"inw\t$port", (IN16ri i8imm:$port), 0>; +def : InstAlias<"inl\t$port", (IN32ri i8imm:$port), 0>; // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp @@ -2104,7 +2164,7 @@ def : InstAlias<"movq $src, $dst", // movsd with no operands (as opposed to the SSE scalar move of a double) is an // alias for movsl. (as in rep; movsd) -def : InstAlias<"movsd", (MOVSD)>; +def : InstAlias<"movsd", (MOVSD), 0>; // movsx aliases def : InstAlias<"movsx $src, $dst", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>; @@ -2125,12 +2185,12 @@ def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>; // Note: No GR32->GR64 movzx form. // outb %dx -> outb %al, %dx -def : InstAlias<"outb %dx", (OUT8rr)>; -def : InstAlias<"outw %dx", (OUT16rr)>; -def : InstAlias<"outl %dx", (OUT32rr)>; -def : InstAlias<"outb $port", (OUT8ir i8imm:$port)>; -def : InstAlias<"outw $port", (OUT16ir i8imm:$port)>; -def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>; +def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>; +def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>; +def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>; +def : InstAlias<"outb\t$port", (OUT8ir i8imm:$port), 0>; +def : InstAlias<"outw\t$port", (OUT16ir i8imm:$port), 0>; +def : InstAlias<"outl\t$port", (OUT32ir i8imm:$port), 0>; // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity @@ -2138,19 +2198,19 @@ def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>; def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>; // shld/shrd op,op -> shld op, op, CL -def : InstAlias<"shldw $r2, $r1", (SHLD16rrCL GR16:$r1, GR16:$r2)>; -def : InstAlias<"shldl $r2, $r1", (SHLD32rrCL GR32:$r1, GR32:$r2)>; -def : InstAlias<"shldq $r2, $r1", (SHLD64rrCL GR64:$r1, GR64:$r2)>; -def : InstAlias<"shrdw $r2, $r1", (SHRD16rrCL GR16:$r1, GR16:$r2)>; -def : InstAlias<"shrdl $r2, $r1", (SHRD32rrCL GR32:$r1, GR32:$r2)>; -def : InstAlias<"shrdq $r2, $r1", (SHRD64rrCL GR64:$r1, GR64:$r2)>; - -def : InstAlias<"shldw $reg, $mem", (SHLD16mrCL i16mem:$mem, GR16:$reg)>; -def : InstAlias<"shldl $reg, $mem", (SHLD32mrCL i32mem:$mem, GR32:$reg)>; -def : InstAlias<"shldq $reg, $mem", (SHLD64mrCL i64mem:$mem, GR64:$reg)>; -def : InstAlias<"shrdw $reg, $mem", (SHRD16mrCL i16mem:$mem, GR16:$reg)>; -def : InstAlias<"shrdl $reg, $mem", (SHRD32mrCL i32mem:$mem, GR32:$reg)>; -def : InstAlias<"shrdq $reg, $mem", (SHRD64mrCL i64mem:$mem, GR64:$reg)>; +def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>; +def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>; +def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>; +def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>; +def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>; +def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>; + +def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>; +def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>; +def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>; +def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>; +def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>; +def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>; /* FIXME: This is disabled because the asm matcher is currently incapable of * matching a fixed immediate like $1. @@ -2181,19 +2241,19 @@ defm : ShiftRotateByOneAlias<"ror", "ROR">; FIXME */ // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms. -def : InstAlias<"testb $val, $mem", (TEST8rm GR8 :$val, i8mem :$mem)>; -def : InstAlias<"testw $val, $mem", (TEST16rm GR16:$val, i16mem:$mem)>; -def : InstAlias<"testl $val, $mem", (TEST32rm GR32:$val, i32mem:$mem)>; -def : InstAlias<"testq $val, $mem", (TEST64rm GR64:$val, i64mem:$mem)>; +def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", (TEST8rm GR8 :$val, i8mem :$mem)>; +def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", (TEST16rm GR16:$val, i16mem:$mem)>; +def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", (TEST32rm GR32:$val, i32mem:$mem)>; +def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", (TEST64rm GR64:$val, i64mem:$mem)>; // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms. -def : InstAlias<"xchgb $mem, $val", (XCHG8rm GR8 :$val, i8mem :$mem)>; -def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>; -def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>; -def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>; +def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", (XCHG8rm GR8 :$val, i8mem :$mem)>; +def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", (XCHG16rm GR16:$val, i16mem:$mem)>; +def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", (XCHG32rm GR32:$val, i32mem:$mem)>; +def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", (XCHG64rm GR64:$val, i64mem:$mem)>; // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms. -def : InstAlias<"xchgw %ax, $src", (XCHG16ar GR16:$src)>; -def : InstAlias<"xchgl %eax, $src", (XCHG32ar GR32:$src)>, Requires<[In32BitMode]>; -def : InstAlias<"xchgl %eax, $src", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>; -def : InstAlias<"xchgq %rax, $src", (XCHG64ar GR64:$src)>; +def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src)>; +def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src)>, Requires<[In32BitMode]>; +def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>; +def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src)>; diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 07314a0..cb12956 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -189,13 +189,14 @@ multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag, string asm, Domain d> { - def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst),(ins DstRC:$src1, SrcRC:$src2), - asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], - NoItinerary, d>; - def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), - (ins DstRC:$src1, x86memop:$src2), asm, - [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], - NoItinerary, d>; + def irr : MMXPI<opc, MRMSrcReg, (outs DstRC:$dst), + (ins DstRC:$src1, SrcRC:$src2), asm, + [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], + NoItinerary, d>; + def irm : MMXPI<opc, MRMSrcMem, (outs DstRC:$dst), + (ins DstRC:$src1, x86memop:$src2), asm, + [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], + NoItinerary, d>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 79b1ca3..a86006a 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2843,8 +2843,8 @@ defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for, defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor, SSE_BIT_ITINS_P>; -let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in - defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, +let isCommutable = 0 in + defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", X86fandn, SSE_BIT_ITINS_P>; /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops @@ -5477,12 +5477,12 @@ def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", TB, Requires<[HasSSE3]>; } // SchedRW -def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>; -def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>; +def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[In32BitMode]>; +def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>; -def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>, +def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>, Requires<[In32BitMode]>; -def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>, +def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>, Requires<[In64BitMode]>; //===----------------------------------------------------------------------===// @@ -6139,11 +6139,11 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> { } let ExeDomain = SSEPackedSingle in { - let Predicates = [HasAVX] in { + let Predicates = [UseAVX] in { defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX; def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst), (ins VR128:$src1, i32i8imm:$src2), - "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}", + "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX; } defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">; @@ -7016,17 +7016,17 @@ defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem, int_x86_sse41_pblendvb>; // Aliases with the implicit xmm0 argument -def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (BLENDVPDrr0 VR128:$dst, VR128:$src2)>; -def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>; -def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (BLENDVPSrr0 VR128:$dst, VR128:$src2)>; -def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>; -def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (PBLENDVBrr0 VR128:$dst, VR128:$src2)>; -def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", +def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}", (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>; let Predicates = [UseSSE41] in { @@ -7266,62 +7266,62 @@ let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in { let Constraints = "$src1 = $dst" in { def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i8mem:$src2), - "crc32{b} \t{$src2, $src1|$src1, $src2}", + "crc32{b}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_8 GR32:$src1, (load addr:$src2)))]>; def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR8:$src2), - "crc32{b} \t{$src2, $src1|$src1, $src2}", + "crc32{b}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>; def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i16mem:$src2), - "crc32{w} \t{$src2, $src1|$src1, $src2}", + "crc32{w}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_16 GR32:$src1, (load addr:$src2)))]>, OpSize; def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR16:$src2), - "crc32{w} \t{$src2, $src1|$src1, $src2}", + "crc32{w}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>, OpSize; def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "crc32{l} \t{$src2, $src1|$src1, $src2}", + "crc32{l}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_32 GR32:$src1, (load addr:$src2)))]>; def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "crc32{l} \t{$src2, $src1|$src1, $src2}", + "crc32{l}\t{$src2, $src1|$src1, $src2}", [(set GR32:$dst, (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>; def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i8mem:$src2), - "crc32{b} \t{$src2, $src1|$src1, $src2}", + "crc32{b}\t{$src2, $src1|$src1, $src2}", [(set GR64:$dst, (int_x86_sse42_crc32_64_8 GR64:$src1, (load addr:$src2)))]>, REX_W; def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR8:$src2), - "crc32{b} \t{$src2, $src1|$src1, $src2}", + "crc32{b}\t{$src2, $src1|$src1, $src2}", [(set GR64:$dst, (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>, REX_W; def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "crc32{q} \t{$src2, $src1|$src1, $src2}", + "crc32{q}\t{$src2, $src1|$src1, $src2}", [(set GR64:$dst, (int_x86_sse42_crc32_64_64 GR64:$src1, (load addr:$src2)))]>, REX_W; def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "crc32{q} \t{$src2, $src1|$src1, $src2}", + "crc32{q}\t{$src2, $src1|$src1, $src2}", [(set GR64:$dst, (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>, REX_W; @@ -7586,62 +7586,62 @@ def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst), } let Predicates = [HasAVX] in { -def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2), +def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2), +def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; } let Predicates = [HasAVX1Only] in { -def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), +def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), +def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (bc_v4i32 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (bc_v16i8 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (bc_v8i16 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; } //===----------------------------------------------------------------------===// @@ -7661,59 +7661,59 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), // AVX1 patterns let Predicates = [HasAVX] in { -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v4f32 (VEXTRACTF128rr (v8f32 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v2f64 (VEXTRACTF128rr (v4f64 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + (EXTRACT_get_vextract128_imm VR128:$ext)))>; -def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1), +def : Pat<(alignedstore (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; + (EXTRACT_get_vextract128_imm VR128:$ext))>; } let Predicates = [HasAVX1Only] in { -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v2i64 (VEXTRACTF128rr (v4i64 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v4i32 (VEXTRACTF128rr (v8i32 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v8i16 (VEXTRACTF128rr (v16i16 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v16i8 (VEXTRACTF128rr (v32i8 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + (EXTRACT_get_vextract128_imm VR128:$ext)))>; -def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1), +def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTF128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; + (EXTRACT_get_vextract128_imm VR128:$ext))>; } //===----------------------------------------------------------------------===// @@ -8191,42 +8191,42 @@ def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst), } let Predicates = [HasAVX2] in { -def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), +def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), +def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), (iPTR imm)), (VINSERTI128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (bc_v4i32 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTI128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (bc_v16i8 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTI128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), + (INSERT_get_vinsert128_imm VR256:$ins))>; +def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (bc_v8i16 (memopv2i64 addr:$src2)), (iPTR imm)), (VINSERTI128rm VR256:$src1, addr:$src2, - (INSERT_get_vinsertf128_imm VR256:$ins))>; + (INSERT_get_vinsert128_imm VR256:$ins))>; } //===----------------------------------------------------------------------===// @@ -8245,39 +8245,39 @@ def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs), VEX, VEX_L; let Predicates = [HasAVX2] in { -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v2i64 (VEXTRACTI128rr (v4i64 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v4i32 (VEXTRACTI128rr (v8i32 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v8i16 (VEXTRACTI128rr (v16i16 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), + (EXTRACT_get_vextract128_imm VR128:$ext)))>; +def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), (v16i8 (VEXTRACTI128rr (v32i8 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + (EXTRACT_get_vextract128_imm VR128:$ext)))>; -def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1), +def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTI128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTI128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTI128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; -def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1), + (EXTRACT_get_vextract128_imm VR128:$ext))>; +def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1), (iPTR imm))), addr:$dst), (VEXTRACTI128mr addr:$dst, VR256:$src1, - (EXTRACT_get_vextractf128_imm VR128:$ext))>; + (EXTRACT_get_vextract128_imm VR128:$ext))>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrSVM.td b/lib/Target/X86/X86InstrSVM.td index 757dcd0..0191c01 100644 --- a/lib/Target/X86/X86InstrSVM.td +++ b/lib/Target/X86/X86InstrSVM.td @@ -26,37 +26,37 @@ def CLGI : I<0x01, MRM_DD, (outs), (ins), "clgi", []>, TB; // 0F 01 DE let Uses = [EAX] in -def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|EAX}", []>, TB; +def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|eax}", []>, TB; // 0F 01 D8 let Uses = [EAX] in def VMRUN32 : I<0x01, MRM_D8, (outs), (ins), - "vmrun\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>; + "vmrun\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>; let Uses = [RAX] in def VMRUN64 : I<0x01, MRM_D8, (outs), (ins), - "vmrun\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>; + "vmrun\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>; // 0F 01 DA let Uses = [EAX] in def VMLOAD32 : I<0x01, MRM_DA, (outs), (ins), - "vmload\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>; + "vmload\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>; let Uses = [RAX] in def VMLOAD64 : I<0x01, MRM_DA, (outs), (ins), - "vmload\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>; + "vmload\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>; // 0F 01 DB let Uses = [EAX] in def VMSAVE32 : I<0x01, MRM_DB, (outs), (ins), - "vmsave\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>; + "vmsave\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>; let Uses = [RAX] in def VMSAVE64 : I<0x01, MRM_DB, (outs), (ins), - "vmsave\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>; + "vmsave\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>; // 0F 01 DF let Uses = [EAX, ECX] in def INVLPGA32 : I<0x01, MRM_DF, (outs), (ins), - "invlpga\t{%ecx, %eax|EAX, ECX}", []>, TB, Requires<[In32BitMode]>; + "invlpga\t{%ecx, %eax|eax, ecx}", []>, TB, Requires<[In32BitMode]>; let Uses = [RAX, ECX] in def INVLPGA64 : I<0x01, MRM_DF, (outs), (ins), - "invlpga\t{%ecx, %rax|RAX, ECX}", []>, TB, Requires<[In64BitMode]>; + "invlpga\t{%ecx, %rax|rax, ecx}", []>, TB, Requires<[In64BitMode]>; diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index 89c1a689..1937770 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -18,16 +18,16 @@ let Defs = [EFLAGS] in { let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1), - "shl{b}\t{%cl, $dst|$dst, CL}", + "shl{b}\t{%cl, $dst|$dst, cl}", [(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>; def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1), - "shl{w}\t{%cl, $dst|$dst, CL}", + "shl{w}\t{%cl, $dst|$dst, cl}", [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize; def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1), - "shl{l}\t{%cl, $dst|$dst, CL}", + "shl{l}\t{%cl, $dst|$dst, cl}", [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>; def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1), - "shl{q}\t{%cl, $dst|$dst, CL}", + "shl{q}\t{%cl, $dst|$dst, cl}", [(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>; } // Uses = [CL] @@ -70,17 +70,17 @@ let SchedRW = [WriteShiftLd, WriteRMW] in { // using CL? let Uses = [CL] in { def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst), - "shl{b}\t{%cl, $dst|$dst, CL}", + "shl{b}\t{%cl, $dst|$dst, cl}", [(store (shl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>; def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst), - "shl{w}\t{%cl, $dst|$dst, CL}", + "shl{w}\t{%cl, $dst|$dst, cl}", [(store (shl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>, OpSize; def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst), - "shl{l}\t{%cl, $dst|$dst, CL}", + "shl{l}\t{%cl, $dst|$dst, cl}", [(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>; def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst), - "shl{q}\t{%cl, $dst|$dst, CL}", + "shl{q}\t{%cl, $dst|$dst, cl}", [(store (shl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>; } def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src), @@ -124,16 +124,16 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst), let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1), - "shr{b}\t{%cl, $dst|$dst, CL}", + "shr{b}\t{%cl, $dst|$dst, cl}", [(set GR8:$dst, (srl GR8:$src1, CL))], IIC_SR>; def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1), - "shr{w}\t{%cl, $dst|$dst, CL}", + "shr{w}\t{%cl, $dst|$dst, cl}", [(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize; def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1), - "shr{l}\t{%cl, $dst|$dst, CL}", + "shr{l}\t{%cl, $dst|$dst, cl}", [(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>; def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1), - "shr{q}\t{%cl, $dst|$dst, CL}", + "shr{q}\t{%cl, $dst|$dst, cl}", [(set GR64:$dst, (srl GR64:$src1, CL))], IIC_SR>; } @@ -171,17 +171,17 @@ def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1), let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst), - "shr{b}\t{%cl, $dst|$dst, CL}", + "shr{b}\t{%cl, $dst|$dst, cl}", [(store (srl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>; def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst), - "shr{w}\t{%cl, $dst|$dst, CL}", + "shr{w}\t{%cl, $dst|$dst, cl}", [(store (srl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>, OpSize; def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst), - "shr{l}\t{%cl, $dst|$dst, CL}", + "shr{l}\t{%cl, $dst|$dst, cl}", [(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>; def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst), - "shr{q}\t{%cl, $dst|$dst, CL}", + "shr{q}\t{%cl, $dst|$dst, cl}", [(store (srl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>; } def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src), @@ -224,19 +224,19 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst), let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1), - "sar{b}\t{%cl, $dst|$dst, CL}", + "sar{b}\t{%cl, $dst|$dst, cl}", [(set GR8:$dst, (sra GR8:$src1, CL))], IIC_SR>; def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1), - "sar{w}\t{%cl, $dst|$dst, CL}", + "sar{w}\t{%cl, $dst|$dst, cl}", [(set GR16:$dst, (sra GR16:$src1, CL))], IIC_SR>, OpSize; def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1), - "sar{l}\t{%cl, $dst|$dst, CL}", + "sar{l}\t{%cl, $dst|$dst, cl}", [(set GR32:$dst, (sra GR32:$src1, CL))], IIC_SR>; def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1), - "sar{q}\t{%cl, $dst|$dst, CL}", + "sar{q}\t{%cl, $dst|$dst, cl}", [(set GR64:$dst, (sra GR64:$src1, CL))], IIC_SR>; } @@ -283,19 +283,19 @@ def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst), - "sar{b}\t{%cl, $dst|$dst, CL}", + "sar{b}\t{%cl, $dst|$dst, cl}", [(store (sra (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>; def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst), - "sar{w}\t{%cl, $dst|$dst, CL}", + "sar{w}\t{%cl, $dst|$dst, cl}", [(store (sra (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>, OpSize; def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst), - "sar{l}\t{%cl, $dst|$dst, CL}", + "sar{l}\t{%cl, $dst|$dst, cl}", [(store (sra (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>; def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst), - "sar{q}\t{%cl, $dst|$dst, CL}", + "sar{q}\t{%cl, $dst|$dst, cl}", [(store (sra (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>; } @@ -349,7 +349,7 @@ def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt), "rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1), - "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1), "rcl{w}\t$dst", [], IIC_SR>, OpSize; @@ -357,7 +357,7 @@ def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt), "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize; let Uses = [CL] in def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1), - "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize; + "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize; def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1), "rcl{l}\t$dst", [], IIC_SR>; @@ -365,7 +365,7 @@ def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt), "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1), - "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1), @@ -374,7 +374,7 @@ def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1), - "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1), @@ -383,7 +383,7 @@ def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt), "rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1), - "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1), "rcr{w}\t$dst", [], IIC_SR>, OpSize; @@ -391,7 +391,7 @@ def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt), "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize; let Uses = [CL] in def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1), - "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize; + "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize; def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1), "rcr{l}\t$dst", [], IIC_SR>; @@ -399,7 +399,7 @@ def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt), "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1), - "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1), "rcr{q}\t$dst", [], IIC_SR>; @@ -407,7 +407,7 @@ def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>; let Uses = [CL] in def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1), - "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; } // Constraints = "$src = $dst" @@ -448,22 +448,22 @@ def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt), let Uses = [CL] in { def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst), - "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst), - "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize; + "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize; def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst), - "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst), - "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst), - "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst), - "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize; + "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize; def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst), - "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst), - "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; + "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>; } } // SchedRW } // hasSideEffects = 0 @@ -472,16 +472,16 @@ let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { // FIXME: provide shorter instructions when imm8 == 1 let Uses = [CL] in { def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1), - "rol{b}\t{%cl, $dst|$dst, CL}", + "rol{b}\t{%cl, $dst|$dst, cl}", [(set GR8:$dst, (rotl GR8:$src1, CL))], IIC_SR>; def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1), - "rol{w}\t{%cl, $dst|$dst, CL}", + "rol{w}\t{%cl, $dst|$dst, cl}", [(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize; def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1), - "rol{l}\t{%cl, $dst|$dst, CL}", + "rol{l}\t{%cl, $dst|$dst, cl}", [(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>; def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1), - "rol{q}\t{%cl, $dst|$dst, CL}", + "rol{q}\t{%cl, $dst|$dst, cl}", [(set GR64:$dst, (rotl GR64:$src1, CL))], IIC_SR>; } @@ -525,19 +525,19 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst), - "rol{b}\t{%cl, $dst|$dst, CL}", + "rol{b}\t{%cl, $dst|$dst, cl}", [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>; def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst), - "rol{w}\t{%cl, $dst|$dst, CL}", + "rol{w}\t{%cl, $dst|$dst, cl}", [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>, OpSize; def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst), - "rol{l}\t{%cl, $dst|$dst, CL}", + "rol{l}\t{%cl, $dst|$dst, cl}", [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>; def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst), - "rol{q}\t{%cl, $dst|$dst, %cl}", + "rol{q}\t{%cl, $dst|$dst, cl}", [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>; } @@ -582,16 +582,16 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst), let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1), - "ror{b}\t{%cl, $dst|$dst, CL}", + "ror{b}\t{%cl, $dst|$dst, cl}", [(set GR8:$dst, (rotr GR8:$src1, CL))], IIC_SR>; def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1), - "ror{w}\t{%cl, $dst|$dst, CL}", + "ror{w}\t{%cl, $dst|$dst, cl}", [(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize; def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1), - "ror{l}\t{%cl, $dst|$dst, CL}", + "ror{l}\t{%cl, $dst|$dst, cl}", [(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>; def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1), - "ror{q}\t{%cl, $dst|$dst, CL}", + "ror{q}\t{%cl, $dst|$dst, cl}", [(set GR64:$dst, (rotr GR64:$src1, CL))], IIC_SR>; } @@ -635,19 +635,19 @@ def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst), - "ror{b}\t{%cl, $dst|$dst, CL}", + "ror{b}\t{%cl, $dst|$dst, cl}", [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>; def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst), - "ror{w}\t{%cl, $dst|$dst, CL}", + "ror{w}\t{%cl, $dst|$dst, cl}", [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>, OpSize; def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst), - "ror{l}\t{%cl, $dst|$dst, CL}", + "ror{l}\t{%cl, $dst|$dst, cl}", [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>; def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst), - "ror{q}\t{%cl, $dst|$dst, CL}", + "ror{q}\t{%cl, $dst|$dst, cl}", [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>; } @@ -699,35 +699,35 @@ let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))], IIC_SHD16_REG_CL>, TB, OpSize; def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))], IIC_SHD16_REG_CL>, TB, OpSize; def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))], IIC_SHD32_REG_CL>, TB; def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))], IIC_SHD32_REG_CL>, TB; def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))], IIC_SHD64_REG_CL>, TB; def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))], IIC_SHD64_REG_CL>, TB; @@ -782,29 +782,29 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg, let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), - "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL), addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize; def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), - "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL), addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize; def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), - "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL), addr:$dst)], IIC_SHD32_MEM_CL>, TB; def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), - "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL), addr:$dst)], IIC_SHD32_MEM_CL>, TB; def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), - "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL), addr:$dst)], IIC_SHD64_MEM_CL>, TB; def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), - "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}", + "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}", [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL), addr:$dst)], IIC_SHD64_MEM_CL>, TB; } diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td index bab3cdd..2196dc3 100644 --- a/lib/Target/X86/X86InstrSystem.td +++ b/lib/Target/X86/X86InstrSystem.td @@ -77,43 +77,43 @@ def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>, let SchedRW = [WriteSystem] in { let Defs = [AL], Uses = [DX] in def IN8rr : I<0xEC, RawFrm, (outs), (ins), - "in{b}\t{%dx, %al|AL, DX}", [], IIC_IN_RR>; + "in{b}\t{%dx, %al|al, dx}", [], IIC_IN_RR>; let Defs = [AX], Uses = [DX] in def IN16rr : I<0xED, RawFrm, (outs), (ins), - "in{w}\t{%dx, %ax|AX, DX}", [], IIC_IN_RR>, OpSize; + "in{w}\t{%dx, %ax|ax, dx}", [], IIC_IN_RR>, OpSize; let Defs = [EAX], Uses = [DX] in def IN32rr : I<0xED, RawFrm, (outs), (ins), - "in{l}\t{%dx, %eax|EAX, DX}", [], IIC_IN_RR>; + "in{l}\t{%dx, %eax|eax, dx}", [], IIC_IN_RR>; let Defs = [AL] in def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i8imm:$port), - "in{b}\t{$port, %al|AL, $port}", [], IIC_IN_RI>; + "in{b}\t{$port, %al|al, $port}", [], IIC_IN_RI>; let Defs = [AX] in def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port), - "in{w}\t{$port, %ax|AX, $port}", [], IIC_IN_RI>, OpSize; + "in{w}\t{$port, %ax|ax, $port}", [], IIC_IN_RI>, OpSize; let Defs = [EAX] in def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port), - "in{l}\t{$port, %eax|EAX, $port}", [], IIC_IN_RI>; + "in{l}\t{$port, %eax|eax, $port}", [], IIC_IN_RI>; let Uses = [DX, AL] in def OUT8rr : I<0xEE, RawFrm, (outs), (ins), - "out{b}\t{%al, %dx|DX, AL}", [], IIC_OUT_RR>; + "out{b}\t{%al, %dx|dx, al}", [], IIC_OUT_RR>; let Uses = [DX, AX] in def OUT16rr : I<0xEF, RawFrm, (outs), (ins), - "out{w}\t{%ax, %dx|DX, AX}", [], IIC_OUT_RR>, OpSize; + "out{w}\t{%ax, %dx|dx, ax}", [], IIC_OUT_RR>, OpSize; let Uses = [DX, EAX] in def OUT32rr : I<0xEF, RawFrm, (outs), (ins), - "out{l}\t{%eax, %dx|DX, EAX}", [], IIC_OUT_RR>; + "out{l}\t{%eax, %dx|dx, eax}", [], IIC_OUT_RR>; let Uses = [AL] in def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i8imm:$port), - "out{b}\t{%al, $port|$port, AL}", [], IIC_OUT_IR>; + "out{b}\t{%al, $port|$port, al}", [], IIC_OUT_IR>; let Uses = [AX] in def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port), - "out{w}\t{%ax, $port|$port, AX}", [], IIC_OUT_IR>, OpSize; + "out{w}\t{%ax, $port|$port, ax}", [], IIC_OUT_IR>, OpSize; let Uses = [EAX] in def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port), - "out{l}\t{%eax, $port|$port, EAX}", [], IIC_OUT_IR>; + "out{l}\t{%eax, $port|$port, eax}", [], IIC_OUT_IR>; def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", [], IIC_INS>; def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", [], IIC_INS>, OpSize; @@ -248,75 +248,75 @@ def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src), "ltr{w}\t$src", [], IIC_LTR>, TB; def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins), - "push{w}\t{%cs|CS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, + "push{w}\t{%cs|cs}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, OpSize; def PUSHCS32 : I<0x0E, RawFrm, (outs), (ins), - "push{l}\t{%cs|CS}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>; + "push{l}\t{%cs|cs}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>; def PUSHSS16 : I<0x16, RawFrm, (outs), (ins), - "push{w}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, + "push{w}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, OpSize; def PUSHSS32 : I<0x16, RawFrm, (outs), (ins), - "push{l}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; + "push{l}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; def PUSHDS16 : I<0x1E, RawFrm, (outs), (ins), - "push{w}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, + "push{w}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, OpSize; def PUSHDS32 : I<0x1E, RawFrm, (outs), (ins), - "push{l}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; + "push{l}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; def PUSHES16 : I<0x06, RawFrm, (outs), (ins), - "push{w}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, + "push{w}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>, OpSize; def PUSHES32 : I<0x06, RawFrm, (outs), (ins), - "push{l}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; + "push{l}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>; def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins), - "push{w}\t{%fs|FS}", [], IIC_PUSH_SR>, OpSize, TB; + "push{w}\t{%fs|fs}", [], IIC_PUSH_SR>, OpSize, TB; def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins), - "push{l}\t{%fs|FS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>; + "push{l}\t{%fs|fs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>; def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins), - "push{w}\t{%gs|GS}", [], IIC_PUSH_SR>, OpSize, TB; + "push{w}\t{%gs|gs}", [], IIC_PUSH_SR>, OpSize, TB; def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins), - "push{l}\t{%gs|GS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>; + "push{l}\t{%gs|gs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>; def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins), - "push{q}\t{%fs|FS}", [], IIC_PUSH_SR>, TB; + "push{q}\t{%fs|fs}", [], IIC_PUSH_SR>, TB; def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins), - "push{q}\t{%gs|GS}", [], IIC_PUSH_SR>, TB; + "push{q}\t{%gs|gs}", [], IIC_PUSH_SR>, TB; // No "pop cs" instruction. def POPSS16 : I<0x17, RawFrm, (outs), (ins), - "pop{w}\t{%ss|SS}", [], IIC_POP_SR_SS>, + "pop{w}\t{%ss|ss}", [], IIC_POP_SR_SS>, OpSize, Requires<[In32BitMode]>; def POPSS32 : I<0x17, RawFrm, (outs), (ins), - "pop{l}\t{%ss|SS}", [], IIC_POP_SR_SS>, + "pop{l}\t{%ss|ss}", [], IIC_POP_SR_SS>, Requires<[In32BitMode]>; def POPDS16 : I<0x1F, RawFrm, (outs), (ins), - "pop{w}\t{%ds|DS}", [], IIC_POP_SR>, + "pop{w}\t{%ds|ds}", [], IIC_POP_SR>, OpSize, Requires<[In32BitMode]>; def POPDS32 : I<0x1F, RawFrm, (outs), (ins), - "pop{l}\t{%ds|DS}", [], IIC_POP_SR>, + "pop{l}\t{%ds|ds}", [], IIC_POP_SR>, Requires<[In32BitMode]>; def POPES16 : I<0x07, RawFrm, (outs), (ins), - "pop{w}\t{%es|ES}", [], IIC_POP_SR>, + "pop{w}\t{%es|es}", [], IIC_POP_SR>, OpSize, Requires<[In32BitMode]>; def POPES32 : I<0x07, RawFrm, (outs), (ins), - "pop{l}\t{%es|ES}", [], IIC_POP_SR>, + "pop{l}\t{%es|es}", [], IIC_POP_SR>, Requires<[In32BitMode]>; def POPFS16 : I<0xa1, RawFrm, (outs), (ins), - "pop{w}\t{%fs|FS}", [], IIC_POP_SR>, OpSize, TB; + "pop{w}\t{%fs|fs}", [], IIC_POP_SR>, OpSize, TB; def POPFS32 : I<0xa1, RawFrm, (outs), (ins), - "pop{l}\t{%fs|FS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>; + "pop{l}\t{%fs|fs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>; def POPFS64 : I<0xa1, RawFrm, (outs), (ins), - "pop{q}\t{%fs|FS}", [], IIC_POP_SR>, TB; + "pop{q}\t{%fs|fs}", [], IIC_POP_SR>, TB; def POPGS16 : I<0xa9, RawFrm, (outs), (ins), - "pop{w}\t{%gs|GS}", [], IIC_POP_SR>, OpSize, TB; + "pop{w}\t{%gs|gs}", [], IIC_POP_SR>, OpSize, TB; def POPGS32 : I<0xa9, RawFrm, (outs), (ins), - "pop{l}\t{%gs|GS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>; + "pop{l}\t{%gs|gs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>; def POPGS64 : I<0xa9, RawFrm, (outs), (ins), - "pop{q}\t{%gs|GS}", [], IIC_POP_SR>, TB; + "pop{q}\t{%gs|gs}", [], IIC_POP_SR>, TB; def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), diff --git a/lib/Target/X86/X86InstrTSX.td b/lib/Target/X86/X86InstrTSX.td index 363a190..59a6f1e 100644 --- a/lib/Target/X86/X86InstrTSX.td +++ b/lib/Target/X86/X86InstrTSX.td @@ -37,3 +37,10 @@ def XTEST : I<0x01, MRM_D6, (outs), (ins), def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm), "xabort\t$imm", [(int_x86_xabort imm:$imm)]>, Requires<[HasRTM]>; + +// HLE prefixes + +def XACQUIRE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "xacquire", []>, Requires<[HasHLE]>; + +def XRELEASE_PREFIX : I<0xF3, RawFrm, (outs), (ins), "xrelease", []>, Requires<[HasHLE]>; + diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index a453245..c7c00b5 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -254,6 +254,34 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { Inst.addOperand(Saved); } +/// \brief If a movsx instruction has a shorter encoding for the used register +/// simplify the instruction to use it instead. +static void SimplifyMOVSX(MCInst &Inst) { + unsigned NewOpcode = 0; + unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); + switch (Inst.getOpcode()) { + default: + llvm_unreachable("Unexpected instruction!"); + case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw + if (Op0 == X86::AX && Op1 == X86::AL) + NewOpcode = X86::CBW; + break; + case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl + if (Op0 == X86::EAX && Op1 == X86::AX) + NewOpcode = X86::CWDE; + break; + case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq + if (Op0 == X86::RAX && Op1 == X86::EAX) + NewOpcode = X86::CDQE; + break; + } + + if (NewOpcode != 0) { + Inst = MCInst(); + Inst.setOpcode(NewOpcode); + } +} + /// \brief Simplify things like MOV32rm to MOV32o32a. static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, unsigned Opcode) { @@ -557,6 +585,13 @@ ReSimplify: case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break; case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break; + // Try to shrink some forms of movsx. + case X86::MOVSX16rr8: + case X86::MOVSX32rr16: + case X86::MOVSX64rr32: + SimplifyMOVSX(OutMI); + break; + case X86::MORESTACK_RET: OutMI.setOpcode(X86::RET); break; @@ -654,13 +689,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { X86MCInstLower MCInstLowering(Mang, *MF, *this); switch (MI->getOpcode()) { case TargetOpcode::DBG_VALUE: - if (isVerbose() && OutStreamer.hasRawTextSupport()) { - std::string TmpStr; - raw_string_ostream OS(TmpStr); - PrintDebugValueComment(MI, OS); - OutStreamer.EmitRawText(StringRef(OS.str())); - } - return; + llvm_unreachable("Should be handled target independently"); // Emit nothing here but a comment if we can. case X86::Int_MemBarrier: diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index eacae2c..0923310 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -241,6 +241,11 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { case CallingConv::Intel_OCL_BI: { bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); + bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); + if (HasAVX512 && IsWin64) + return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; + if (HasAVX512 && Is64Bit) + return CSR_64_Intel_OCL_BI_AVX512_SaveList; if (HasAVX && IsWin64) return CSR_Win64_Intel_OCL_BI_AVX_SaveList; if (HasAVX && Is64Bit) @@ -275,8 +280,13 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const uint32_t* X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); + bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); if (CC == CallingConv::Intel_OCL_BI) { + if (IsWin64 && HasAVX512) + return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; + if (Is64Bit && HasAVX512) + return CSR_64_Intel_OCL_BI_AVX512_RegMask; if (IsWin64 && HasAVX) return CSR_Win64_Intel_OCL_BI_AVX_RegMask; if (Is64Bit && HasAVX) @@ -344,14 +354,8 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(X86::GS); // Mark the floating point stack registers as reserved. - Reserved.set(X86::ST0); - Reserved.set(X86::ST1); - Reserved.set(X86::ST2); - Reserved.set(X86::ST3); - Reserved.set(X86::ST4); - Reserved.set(X86::ST5); - Reserved.set(X86::ST6); - Reserved.set(X86::ST7); + for (unsigned n = 0; n != 8; ++n) + Reserved.set(X86::ST0 + n); // Reserve the registers that only exist in 64-bit mode. if (!Is64Bit) { @@ -364,19 +368,17 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { for (unsigned n = 0; n != 8; ++n) { // R8, R9, ... - static const uint16_t GPR64[] = { - X86::R8, X86::R9, X86::R10, X86::R11, - X86::R12, X86::R13, X86::R14, X86::R15 - }; - for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI) + for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) Reserved.set(*AI); // XMM8, XMM9, ... - static const uint16_t XMMReg[] = { - X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11, - X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15 - }; - for (MCRegAliasIterator AI(XMMReg[n], this, true); AI.isValid(); ++AI) + for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) + Reserved.set(*AI); + } + } + if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) { + for (unsigned n = 16; n != 32; ++n) { + for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) Reserved.set(*AI); } } @@ -409,10 +411,11 @@ bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { } bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { + if (MF.getFunction()->hasFnAttribute("no-realign-stack")) + return false; + const MachineFrameInfo *MFI = MF.getFrameInfo(); const MachineRegisterInfo *MRI = &MF.getRegInfo(); - if (!MF.getTarget().Options.RealignStack) - return false; // Stack realignment requires a frame pointer. If we already started // register allocation with frame pointer elimination, it is too late now. @@ -690,4 +693,15 @@ unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, } } } + +unsigned get512BitSuperRegister(unsigned Reg) { + if (Reg >= X86::XMM0 && Reg <= X86::XMM31) + return X86::ZMM0 + (Reg - X86::XMM0); + if (Reg >= X86::YMM0 && Reg <= X86::YMM31) + return X86::ZMM0 + (Reg - X86::YMM0); + if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) + return Reg; + llvm_unreachable("Unexpected SIMD register"); +} + } diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 6a1b328..fb17682 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -137,6 +137,9 @@ public: // e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX unsigned getX86SubSuperRegister(unsigned, MVT::SimpleValueType, bool High=false); +//get512BitRegister - X86 utility - returns 512-bit super register +unsigned get512BitSuperRegister(unsigned Reg); + } // End llvm namespace #endif diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index fbbb257..b802728 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -26,6 +26,7 @@ let Namespace = "X86" in { def sub_16bit : SubRegIndex<16>; def sub_32bit : SubRegIndex<32>; def sub_xmm : SubRegIndex<128>; + def sub_ymm : SubRegIndex<256>; } //===----------------------------------------------------------------------===// @@ -186,28 +187,53 @@ def XMM12: X86Reg<"xmm12", 12>, DwarfRegNum<[29, -2, -2]>; def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>; def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>; def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>; + +def XMM16: X86Reg<"xmm16", 16>, DwarfRegNum<[60, -2, -2]>; +def XMM17: X86Reg<"xmm17", 17>, DwarfRegNum<[61, -2, -2]>; +def XMM18: X86Reg<"xmm18", 18>, DwarfRegNum<[62, -2, -2]>; +def XMM19: X86Reg<"xmm19", 19>, DwarfRegNum<[63, -2, -2]>; +def XMM20: X86Reg<"xmm20", 20>, DwarfRegNum<[64, -2, -2]>; +def XMM21: X86Reg<"xmm21", 21>, DwarfRegNum<[65, -2, -2]>; +def XMM22: X86Reg<"xmm22", 22>, DwarfRegNum<[66, -2, -2]>; +def XMM23: X86Reg<"xmm23", 23>, DwarfRegNum<[67, -2, -2]>; +def XMM24: X86Reg<"xmm24", 24>, DwarfRegNum<[68, -2, -2]>; +def XMM25: X86Reg<"xmm25", 25>, DwarfRegNum<[69, -2, -2]>; +def XMM26: X86Reg<"xmm26", 26>, DwarfRegNum<[70, -2, -2]>; +def XMM27: X86Reg<"xmm27", 27>, DwarfRegNum<[71, -2, -2]>; +def XMM28: X86Reg<"xmm28", 28>, DwarfRegNum<[72, -2, -2]>; +def XMM29: X86Reg<"xmm29", 29>, DwarfRegNum<[73, -2, -2]>; +def XMM30: X86Reg<"xmm30", 30>, DwarfRegNum<[74, -2, -2]>; +def XMM31: X86Reg<"xmm31", 31>, DwarfRegNum<[75, -2, -2]>; + } // CostPerUse -// YMM Registers, used by AVX instructions +// YMM0-15 registers, used by AVX instructions and +// YMM16-31 registers, used by AVX-512 instructions. let SubRegIndices = [sub_xmm] in { -def YMM0: X86Reg<"ymm0", 0, [XMM0]>, DwarfRegAlias<XMM0>; -def YMM1: X86Reg<"ymm1", 1, [XMM1]>, DwarfRegAlias<XMM1>; -def YMM2: X86Reg<"ymm2", 2, [XMM2]>, DwarfRegAlias<XMM2>; -def YMM3: X86Reg<"ymm3", 3, [XMM3]>, DwarfRegAlias<XMM3>; -def YMM4: X86Reg<"ymm4", 4, [XMM4]>, DwarfRegAlias<XMM4>; -def YMM5: X86Reg<"ymm5", 5, [XMM5]>, DwarfRegAlias<XMM5>; -def YMM6: X86Reg<"ymm6", 6, [XMM6]>, DwarfRegAlias<XMM6>; -def YMM7: X86Reg<"ymm7", 7, [XMM7]>, DwarfRegAlias<XMM7>; -def YMM8: X86Reg<"ymm8", 8, [XMM8]>, DwarfRegAlias<XMM8>; -def YMM9: X86Reg<"ymm9", 9, [XMM9]>, DwarfRegAlias<XMM9>; -def YMM10: X86Reg<"ymm10", 10, [XMM10]>, DwarfRegAlias<XMM10>; -def YMM11: X86Reg<"ymm11", 11, [XMM11]>, DwarfRegAlias<XMM11>; -def YMM12: X86Reg<"ymm12", 12, [XMM12]>, DwarfRegAlias<XMM12>; -def YMM13: X86Reg<"ymm13", 13, [XMM13]>, DwarfRegAlias<XMM13>; -def YMM14: X86Reg<"ymm14", 14, [XMM14]>, DwarfRegAlias<XMM14>; -def YMM15: X86Reg<"ymm15", 15, [XMM15]>, DwarfRegAlias<XMM15>; + foreach Index = 0-31 in { + def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>, + DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>; + } +} + +// ZMM Registers, used by AVX-512 instructions. +let SubRegIndices = [sub_ymm] in { + foreach Index = 0-31 in { + def ZMM#Index : X86Reg<"zmm"#Index, Index, [!cast<X86Reg>("YMM"#Index)]>, + DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>; + } } + // Mask Registers, used by AVX-512 instructions. + def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, -2, -2]>; + def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, -2, -2]>; + def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, -2, -2]>; + def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, -2, -2]>; + def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, -2, -2]>; + def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, -2, -2]>; + def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, -2, -2]>; + def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, -2, -2]>; + class STRegister<string n, bits<16> Enc, list<Register> A> : X86Reg<n, Enc> { let Aliases = A; } @@ -421,3 +447,25 @@ def FPCCR : RegisterClass<"X86", [i16], 16, (add FPSW)> { let CopyCost = -1; // Don't allow copying of status registers. let isAllocatable = 0; } + +// AVX-512 vector/mask registers. +def VR512 : RegisterClass<"X86", [v16f32, v8f64, v16i32, v8i64], 512, + (sequence "ZMM%u", 0, 31)>; + +// Scalar AVX-512 floating point registers. +def FR32X : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 31)>; + +def FR64X : RegisterClass<"X86", [f64], 64, (add FR32X)>; + +// Extended VR128 and VR256 for AVX-512 instructions +def VR128X : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], + 128, (add FR32X)>; +def VR256X : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + 256, (sequence "YMM%u", 0, 31)>; + +def VK8 : RegisterClass<"X86", [v8i1], 8, (sequence "K%u", 0, 7)>; +def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)>; + +def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)>; +def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)>; + diff --git a/lib/Target/X86/X86SchedHaswell.td b/lib/Target/X86/X86SchedHaswell.td index 84c9203..62ba2bc 100644 --- a/lib/Target/X86/X86SchedHaswell.td +++ b/lib/Target/X86/X86SchedHaswell.td @@ -16,9 +16,8 @@ def HaswellModel : SchedMachineModel { // All x86 instructions are modeled as a single micro-op, and HW can decode 4 // instructions per cycle. let IssueWidth = 4; - let MinLatency = 0; // 0 = Out-of-order execution. + let MicroOpBufferSize = 192; // Based on the reorder buffer. let LoadLatency = 4; - let ILPWindow = 30; let MispredictPenalty = 16; } @@ -50,6 +49,12 @@ def HWPort15 : ProcResGroup<[HWPort1, HWPort5]>; def HWPort015 : ProcResGroup<[HWPort0, HWPort1, HWPort5]>; def HWPort0156: ProcResGroup<[HWPort0, HWPort1, HWPort5, HWPort6]>; +// 60 Entry Unified Scheduler +def HWPortAny : ProcResGroup<[HWPort0, HWPort1, HWPort2, HWPort3, HWPort4, + HWPort5, HWPort6, HWPort7]> { + let BufferSize=60; +} + // Integer division issued on port 0. def HWDivider : ProcResource<1>; @@ -86,6 +91,7 @@ def : WriteRes<WriteZero, []>; defm : HWWriteResPair<WriteALU, HWPort0156, 1>; defm : HWWriteResPair<WriteIMul, HWPort1, 3>; +def : WriteRes<WriteIMulH, []> { let Latency = 3; } defm : HWWriteResPair<WriteShift, HWPort056, 1>; defm : HWWriteResPair<WriteJump, HWPort5, 1>; diff --git a/lib/Target/X86/X86SchedSandyBridge.td b/lib/Target/X86/X86SchedSandyBridge.td index b36b3ad..52ead94 100644 --- a/lib/Target/X86/X86SchedSandyBridge.td +++ b/lib/Target/X86/X86SchedSandyBridge.td @@ -17,9 +17,8 @@ def SandyBridgeModel : SchedMachineModel { // instructions per cycle. // FIXME: Identify instructions that aren't a single fused micro-op. let IssueWidth = 4; - let MinLatency = 0; // 0 = Out-of-order execution. + let MicroOpBufferSize = 168; // Based on the reorder buffer. let LoadLatency = 4; - let ILPWindow = 20; let MispredictPenalty = 16; } @@ -46,6 +45,11 @@ def SBPort05 : ProcResGroup<[SBPort0, SBPort5]>; def SBPort15 : ProcResGroup<[SBPort1, SBPort5]>; def SBPort015 : ProcResGroup<[SBPort0, SBPort1, SBPort5]>; +// 54 Entry Unified Scheduler +def SBPortAny : ProcResGroup<[SBPort0, SBPort1, SBPort23, SBPort4, SBPort5]> { + let BufferSize=54; +} + // Integer division issued on port 0. def SBDivider : ProcResource<1>; @@ -82,6 +86,7 @@ def : WriteRes<WriteZero, []>; defm : SBWriteResPair<WriteALU, SBPort015, 1>; defm : SBWriteResPair<WriteIMul, SBPort1, 3>; +def : WriteRes<WriteIMulH, []> { let Latency = 3; } defm : SBWriteResPair<WriteShift, SBPort05, 1>; defm : SBWriteResPair<WriteJump, SBPort5, 1>; diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td index 9f2c781..ceb2e05 100644 --- a/lib/Target/X86/X86Schedule.td +++ b/lib/Target/X86/X86Schedule.td @@ -42,6 +42,7 @@ multiclass X86SchedWritePair { // Arithmetic. defm WriteALU : X86SchedWritePair; // Simple integer ALU op. defm WriteIMul : X86SchedWritePair; // Integer multiplication. +def WriteIMulH : SchedWrite; // Integer multiplication, high part. defm WriteIDiv : X86SchedWritePair; // Integer division. def WriteLEA : SchedWrite; // LEA instructions can't fold loads. @@ -550,8 +551,9 @@ def IIC_NOP : InstrItinClass; // Resources beyond the decoder operate on micro-ops and are bufferred // so adjacent micro-ops don't directly compete. // -// MinLatency=0 indicates that RAW dependencies can be decoded in the -// same cycle. +// MicroOpBufferSize > 1 indicates that RAW dependencies can be +// decoded in the same cycle. The value 32 is a reasonably arbitrary +// number of in-flight instructions. // // HighLatency=10 is optimistic. X86InstrInfo::isHighLatencyDef // indicates high latency opcodes. Alternatively, InstrItinData @@ -559,17 +561,12 @@ def IIC_NOP : InstrItinClass; // latencies. Since these latencies are not used for pipeline hazards, // they do not need to be exact. // -// ILPWindow=10 is an arbitrary threshold that approximates cycles of -// latency hidden by instruction buffers. The actual value is not very -// important but should be zero for inorder and nonzero for OOO processors. -// // The GenericModel contains no instruciton itineraries. def GenericModel : SchedMachineModel { let IssueWidth = 4; - let MinLatency = 0; + let MicroOpBufferSize = 32; let LoadLatency = 4; let HighLatency = 10; - let ILPWindow = 10; } include "X86ScheduleAtom.td" diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td index cb0960a..14a1471 100644 --- a/lib/Target/X86/X86ScheduleAtom.td +++ b/lib/Target/X86/X86ScheduleAtom.td @@ -525,11 +525,9 @@ def AtomItineraries : ProcessorItineraries< // Atom machine model. def AtomModel : SchedMachineModel { let IssueWidth = 2; // Allows 2 instructions per scheduling group. - let MinLatency = 1; // InstrStage cycles overrides MinLatency. - // OperandCycles may be used for expected latency. + let MicroOpBufferSize = 0; // In-order execution, always hide latency. let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles. let HighLatency = 30;// Expected, may be overriden by OperandCycles. - let ILPWindow = 0; // Always try to hide expected latency. let Itineraries = AtomItineraries; } diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 74da2a9..fae90f2 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -477,6 +477,9 @@ void X86Subtarget::initializeEnvironment() { HasBMI2 = false; HasRTM = false; HasHLE = false; + HasERI = false; + HasCDI = false; + HasPFI=false; HasADX = false; HasPRFCHW = false; HasRDSEED = false; diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 66832b9..8793238 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -42,7 +42,7 @@ enum Style { class X86Subtarget : public X86GenSubtargetInfo { protected: enum X86SSEEnum { - NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2 + NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512 }; enum X863DNowEnum { @@ -169,6 +169,15 @@ protected: /// address generation (AG) time. bool LEAUsesAG; + /// Processor has AVX-512 PreFetch Instructions + bool HasPFI; + + /// Processor has AVX-512 Exponential and Reciprocal Instructions + bool HasERI; + + /// Processor has AVX-512 Conflict Detection Instructions + bool HasCDI; + /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -249,6 +258,7 @@ public: bool hasSSE42() const { return X86SSELevel >= SSE42; } bool hasAVX() const { return X86SSELevel >= AVX; } bool hasAVX2() const { return X86SSELevel >= AVX2; } + bool hasAVX512() const { return X86SSELevel >= AVX512; } bool hasFp256() const { return hasAVX(); } bool hasInt256() const { return hasAVX2(); } bool hasSSE4A() const { return HasSSE4A; } @@ -282,6 +292,9 @@ public: bool padShortFunctions() const { return PadShortFunctions; } bool callRegIndirect() const { return CallRegIndirect; } bool LEAusesAG() const { return LEAUsesAG; } + bool hasCDI() const { return HasCDI; } + bool hasPFI() const { return HasPFI; } + bool hasERI() const { return HasERI; } bool isAtom() const { return X86ProcFamily == IntelAtom; } @@ -338,7 +351,13 @@ public: } bool isPICStyleStubAny() const { return PICStyle == PICStyles::StubDynamicNoPIC || - PICStyle == PICStyles::StubPIC; } + PICStyle == PICStyles::StubPIC; + } + + bool isCallingConvWin64(CallingConv::ID CC) const { + return (isTargetWin64() && CC != CallingConv::X86_64_SysV) || + CC == CallingConv::X86_64_Win64; + } /// ClassifyGlobalReference - Classify a global variable reference for the /// current subtarget according to how we should reference it in a non-pcrel diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 0422a61..49ebd1a 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -132,7 +132,7 @@ void X86TargetMachine::addAnalysisPasses(PassManagerBase &PM) { // Add first the target-independent BasicTTI pass, then our X86 pass. This // allows the X86 pass to delegate to the target independent layer when // appropriate. - PM.add(createBasicTargetTransformInfoPass(getTargetLowering())); + PM.add(createBasicTargetTransformInfoPass(this)); PM.add(createX86TargetTransformInfoPass(this)); } diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp index 871dacd..a19c5a6 100644 --- a/lib/Target/X86/X86TargetObjectFile.cpp +++ b/lib/Target/X86/X86TargetObjectFile.cpp @@ -47,3 +47,9 @@ X86LinuxTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) { TargetLoweringObjectFileELF::Initialize(Ctx, TM); InitializeELF(TM.Options.UseInitArray); } + +const MCExpr * +X86LinuxTargetObjectFile::getDebugThreadLocalSymbol( + const MCSymbol *Sym) const { + return MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_DTPOFF, getContext()); +} diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h index 9d26d38..79c861d 100644 --- a/lib/Target/X86/X86TargetObjectFile.h +++ b/lib/Target/X86/X86TargetObjectFile.h @@ -36,6 +36,9 @@ namespace llvm { /// and x86-64. class X86LinuxTargetObjectFile : public TargetLoweringObjectFileELF { virtual void Initialize(MCContext &Ctx, const TargetMachine &TM); + + /// \brief Describe a TLS variable address within debug info. + virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const; }; } // end namespace llvm diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index eba9d78..3bbddad 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -33,7 +33,6 @@ void initializeX86TTIPass(PassRegistry &); namespace { class X86TTI : public ImmutablePass, public TargetTransformInfo { - const X86TargetMachine *TM; const X86Subtarget *ST; const X86TargetLowering *TLI; @@ -42,12 +41,12 @@ class X86TTI : public ImmutablePass, public TargetTransformInfo { unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; public: - X86TTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { + X86TTI() : ImmutablePass(ID), ST(0), TLI(0) { llvm_unreachable("This pass cannot be directly constructed"); } X86TTI(const X86TargetMachine *TM) - : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()), + : ImmutablePass(ID), ST(TM->getSubtargetImpl()), TLI(TM->getTargetLowering()) { initializeX86TTIPass(*PassRegistry::getPassRegistry()); } @@ -101,6 +100,8 @@ public: unsigned Alignment, unsigned AddressSpace) const; + virtual unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex) const; + /// @} }; @@ -196,6 +197,16 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. + + // Vectorizing division is a bad idea. See the SSE2 table for more comments. + { ISD::SDIV, MVT::v32i8, 32*20 }, + { ISD::SDIV, MVT::v16i16, 16*20 }, + { ISD::SDIV, MVT::v8i32, 8*20 }, + { ISD::SDIV, MVT::v4i64, 4*20 }, + { ISD::UDIV, MVT::v32i8, 32*20 }, + { ISD::UDIV, MVT::v16i16, 16*20 }, + { ISD::UDIV, MVT::v8i32, 8*20 }, + { ISD::UDIV, MVT::v4i64, 4*20 }, }; // Look for AVX2 lowering tricks. @@ -258,6 +269,21 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. + + // It is not a good idea to vectorize division. We have to scalarize it and + // in the process we will often end up having to spilling regular + // registers. The overhead of division is going to dominate most kernels + // anyways so try hard to prevent vectorization of division - it is + // generally a bad idea. Assume somewhat arbitrarily that we have to be able + // to hide "20 cycles" for each lane. + { ISD::SDIV, MVT::v16i8, 16*20 }, + { ISD::SDIV, MVT::v8i16, 8*20 }, + { ISD::SDIV, MVT::v4i32, 4*20 }, + { ISD::SDIV, MVT::v2i64, 2*20 }, + { ISD::UDIV, MVT::v16i8, 16*20 }, + { ISD::UDIV, MVT::v8i16, 8*20 }, + { ISD::UDIV, MVT::v4i32, 4*20 }, + { ISD::UDIV, MVT::v2i64, 2*20 }, }; if (ST->hasSSE2()) { @@ -467,19 +493,22 @@ unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, }; if (ST->hasAVX2()) { - int Idx = CostTableLookup<MVT>(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy); + int Idx = CostTableLookup<MVT>(AVX2CostTbl, array_lengthof(AVX2CostTbl), + ISD, MTy); if (Idx != -1) return LT.first * AVX2CostTbl[Idx].Cost; } if (ST->hasAVX()) { - int Idx = CostTableLookup<MVT>(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy); + int Idx = CostTableLookup<MVT>(AVX1CostTbl, array_lengthof(AVX1CostTbl), + ISD, MTy); if (Idx != -1) return LT.first * AVX1CostTbl[Idx].Cost; } if (ST->hasSSE42()) { - int Idx = CostTableLookup<MVT>(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy); + int Idx = CostTableLookup<MVT>(SSE42CostTbl, array_lengthof(SSE42CostTbl), + ISD, MTy); if (Idx != -1) return LT.first * SSE42CostTbl[Idx].Cost; } @@ -511,8 +540,51 @@ unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); } +unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, + bool Extract) const { + assert (Ty->isVectorTy() && "Can only scalarize vectors"); + unsigned Cost = 0; + + for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { + if (Insert) + Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); + if (Extract) + Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); + } + + return Cost; +} + unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace) const { + // Handle non power of two vectors such as <3 x float> + if (VectorType *VTy = dyn_cast<VectorType>(Src)) { + unsigned NumElem = VTy->getVectorNumElements(); + + // Handle a few common cases: + // <3 x float> + if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) + // Cost = 64 bit store + extract + 32 bit store. + return 3; + + // <3 x double> + if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) + // Cost = 128 bit store + unpack + 64 bit store. + return 3; + + // Assume that all other non power-of-two numbers are scalarized. + if (!isPowerOf2_32(NumElem)) { + unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, + VTy->getScalarType(), + Alignment, + AddressSpace); + unsigned SplitCost = getScalarizationOverhead(Src, + Opcode == Instruction::Load, + Opcode==Instruction::Store); + return NumElem * Cost + SplitCost; + } + } + // Legalize the type. std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && @@ -528,3 +600,16 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, return Cost; } + +unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { + // Address computations in vectorized code with non-consecutive addresses will + // likely result in more instructions compared to scalar code where the + // computation can more often be merged into the index mode. The resulting + // extra micro-ops can significantly decrease throughput. + unsigned NumVectorInstToHideOverhead = 10; + + if (Ty->isVectorTy() && IsComplex) + return NumVectorInstToHideOverhead; + + return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); +} diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp index 0f77948..477f75a 100644 --- a/lib/Target/X86/X86VZeroUpper.cpp +++ b/lib/Target/X86/X86VZeroUpper.cpp @@ -105,23 +105,28 @@ FunctionPass *llvm::createX86IssueVZeroUpperPass() { } static bool isYmmReg(unsigned Reg) { - if (Reg >= X86::YMM0 && Reg <= X86::YMM15) - return true; + return (Reg >= X86::YMM0 && Reg <= X86::YMM31); +} - return false; +static bool isZmmReg(unsigned Reg) { + return (Reg >= X86::ZMM0 && Reg <= X86::ZMM31); } static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) { for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) - if (isYmmReg(I->first)) + if (isYmmReg(I->first) || isZmmReg(I->first)) return true; return false; } static bool clobbersAllYmmRegs(const MachineOperand &MO) { - for (unsigned reg = X86::YMM0; reg < X86::YMM15; ++reg) { + for (unsigned reg = X86::YMM0; reg < X86::YMM31; ++reg) { + if (!MO.clobbersPhysReg(reg)) + return false; + } + for (unsigned reg = X86::ZMM0; reg < X86::ZMM31; ++reg) { if (!MO.clobbersPhysReg(reg)) return false; } diff --git a/lib/Target/XCore/CMakeLists.txt b/lib/Target/XCore/CMakeLists.txt index d5bfddc..85d2a1d 100644 --- a/lib/Target/XCore/CMakeLists.txt +++ b/lib/Target/XCore/CMakeLists.txt @@ -25,7 +25,7 @@ add_llvm_target(XCoreCodeGen XCoreSelectionDAGInfo.cpp ) -add_dependencies(LLVMXCoreCodeGen intrinsics_gen) +add_dependencies(LLVMXCoreCodeGen XCoreCommonTableGen intrinsics_gen) add_subdirectory(Disassembler) add_subdirectory(InstPrinter) diff --git a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp index dcc0955..9c20abd 100644 --- a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp +++ b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp @@ -29,7 +29,7 @@ namespace { /// \brief A disassembler class for XCore. class XCoreDisassembler : public MCDisassembler { - const MCRegisterInfo *RegInfo; + OwningPtr<const MCRegisterInfo> RegInfo; public: XCoreDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info) : MCDisassembler(STI), RegInfo(Info) {} @@ -42,7 +42,7 @@ public: raw_ostream &vStream, raw_ostream &cStream) const; - const MCRegisterInfo *getRegInfo() const { return RegInfo; } + const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); } }; } diff --git a/lib/Target/XCore/README.txt b/lib/Target/XCore/README.txt index b69205b..28d551a 100644 --- a/lib/Target/XCore/README.txt +++ b/lib/Target/XCore/README.txt @@ -5,3 +5,4 @@ To-do * Tailcalls * Investigate loop alignment * Add builtins + diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp index e177ad3..35ba299 100644 --- a/lib/Target/XCore/XCoreAsmPrinter.cpp +++ b/lib/Target/XCore/XCoreAsmPrinter.cpp @@ -49,7 +49,6 @@ namespace { class XCoreAsmPrinter : public AsmPrinter { const XCoreSubtarget &Subtarget; XCoreMCInstLower MCInstLowering; - void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); public: explicit XCoreAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) : AsmPrinter(TM, Streamer), Subtarget(TM.getSubtarget<XCoreSubtarget>()), @@ -76,7 +75,6 @@ namespace { void EmitInstruction(const MachineInstr *MI); void EmitFunctionBodyStart(); void EmitFunctionBodyEnd(); - virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const; }; } // end of anonymous namespace @@ -85,12 +83,15 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) { GV->hasWeakLinkage()) || GV->hasLinkOnceLinkage()) && "Unexpected linkage"); if (ArrayType *ATy = dyn_cast<ArrayType>( - cast<PointerType>(GV->getType())->getElementType())) { - OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global); - // FIXME: MCStreamerize. - OutStreamer.EmitRawText(StringRef(".globound")); - OutStreamer.EmitRawText("\t.set\t" + Twine(Sym->getName())); - OutStreamer.EmitRawText(".globound," + Twine(ATy->getNumElements())); + cast<PointerType>(GV->getType())->getElementType())) { + + MCSymbol *SymGlob = OutContext.GetOrCreateSymbol( + Twine(Sym->getName() + StringRef(".globound"))); + OutStreamer.EmitSymbolAttribute(SymGlob, MCSA_Global); + + OutStreamer.EmitRawText("\t.set\t" + Twine(Sym->getName()) + + ".globound," + Twine(ATy->getNumElements())); + if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) { // TODO Use COMDAT groups for LinkOnceLinkage OutStreamer.EmitRawText(MAI->getWeakDefDirective() +Twine(Sym->getName())+ @@ -242,45 +243,14 @@ void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum, bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,const char *ExtraCode, raw_ostream &O) { - // Does this asm operand have a single letter operand modifier? - if (ExtraCode && ExtraCode[0]) - if (ExtraCode[1] != 0) return true; // Unknown modifier. - - switch (ExtraCode[0]) { - default: - // See if this is a generic print operand - return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O); - } - - printOperand(MI, OpNo, O); - return false; -} - -void XCoreAsmPrinter::PrintDebugValueComment(const MachineInstr *MI, - raw_ostream &OS) { - unsigned NOps = MI->getNumOperands(); - assert(NOps == 4); - OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - OS << V.getName(); - OS << " <- "; - // Frame address. Currently handles register +- offset only. - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm()); - OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS); - OS << ']'; - OS << "+"; - printOperand(MI, NOps-2, OS); -} + // Print the operand if there is no operand modifier. + if (!ExtraCode || !ExtraCode[0]) { + printOperand(MI, OpNo, O); + return false; + } -MachineLocation XCoreAsmPrinter:: -getDebugValueLocation(const MachineInstr *MI) const { - // Handles frame addresses emitted in XCoreInstrInfo::emitFrameIndexDebugValue. - assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!"); - assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() && - "Unexpected MachineOperand types"); - return MachineLocation(MI->getOperand(0).getReg(), - MI->getOperand(1).getImm()); + // Otherwise fallback on the default implementation. + return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O); } void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) { @@ -288,15 +258,8 @@ void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) { raw_svector_ostream O(Str); switch (MI->getOpcode()) { - case XCore::DBG_VALUE: { - if (isVerbose() && OutStreamer.hasRawTextSupport()) { - SmallString<128> TmpStr; - raw_svector_ostream OS(TmpStr); - PrintDebugValueComment(MI, OS); - OutStreamer.EmitRawText(StringRef(OS.str())); - } - return; - } + case XCore::DBG_VALUE: + llvm_unreachable("Should be handled target independently"); case XCore::ADD_2rus: if (MI->getOperand(2).getImm() == 0) { O << "\tmov " diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 736a4ef..b57cf9d 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -223,7 +223,9 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, assert(MBBI->getOpcode() == XCore::RETSP_u6 || MBBI->getOpcode() == XCore::RETSP_lu6); int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6; - BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize); + MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize); + for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i) + MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands MBB.erase(MBBI); } else { int Opcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp index ee183aa..e28f84f 100644 --- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp +++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -37,13 +37,11 @@ using namespace llvm; /// namespace { class XCoreDAGToDAGISel : public SelectionDAGISel { - const XCoreTargetLowering &Lowering; const XCoreSubtarget &Subtarget; public: XCoreDAGToDAGISel(XCoreTargetMachine &TM, CodeGenOpt::Level OptLevel) : SelectionDAGISel(TM, OptLevel), - Lowering(*TM.getTargetLowering()), Subtarget(*TM.getSubtargetImpl()) { } SDNode *Select(SDNode *N); @@ -117,7 +115,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) { if (immMskBitp(N)) { // Transformation function: get the size of a mask // Look for the first non-zero bit - SDValue MskSize = getI32Imm(32 - countLeadingZeros(Val)); + SDValue MskSize = getI32Imm(32 - countLeadingZeros((uint32_t)Val)); return CurDAG->getMachineNode(XCore::MKMSK_rus, dl, MVT::i32, MskSize); } @@ -125,7 +123,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) { SDValue CPIdx = CurDAG->getTargetConstantPool(ConstantInt::get( Type::getInt32Ty(*CurDAG->getContext()), Val), - TLI->getPointerTy()); + getTargetLowering()->getPointerTy()); SDNode *node = CurDAG->getMachineNode(XCore::LDWCP_lru6, dl, MVT::i32, MVT::Other, CPIdx, CurDAG->getEntryNode()); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 7b89b1a..6fc7eef5 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -707,24 +707,26 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const SDValue XCoreTargetLowering:: LowerVAARG(SDValue Op, SelectionDAG &DAG) const { - llvm_unreachable("unimplemented"); - // FIXME Arguments passed by reference need a extra dereference. + // Whist llvm does not support aggregate varargs we can ignore + // the possibility of the ValueType being an implicit byVal vararg. SDNode *Node = Op.getNode(); + EVT VT = Node->getValueType(0); // not an aggregate + SDValue InChain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); + EVT PtrVT = VAListPtr.getValueType(); + const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); SDLoc dl(Node); - const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - EVT VT = Node->getValueType(0); - SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), - Node->getOperand(1), MachinePointerInfo(V), + SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, + VAListPtr, MachinePointerInfo(SV), false, false, false, 0); // Increment the pointer, VAList, to the next vararg - SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, - DAG.getConstant(VT.getSizeInBits(), - getPointerTy())); + SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, + DAG.getIntPtrConstant(VT.getSizeInBits() / 8)); // Store the incremented VAList to the legalized pointer - Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), - MachinePointerInfo(V), false, false, 0); + InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, + MachinePointerInfo(SV), false, false, 0); // Load the actual argument out of the pointer VAList - return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), + return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), false, false, false, 0); } @@ -847,10 +849,10 @@ SDValue XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - SDLoc &dl = CLI.DL; - SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDLoc &dl = CLI.DL; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -1031,6 +1033,10 @@ XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Formal Arguments Calling Convention Implementation //===----------------------------------------------------------------------===// +namespace { + struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; +} + /// XCore formal arguments implementation SDValue XCoreTargetLowering::LowerFormalArguments(SDValue Chain, @@ -1080,9 +1086,22 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned LRSaveSize = StackSlotSize; + // All getCopyFromReg ops must precede any getMemcpys to prevent the + // scheduler clobbering a register before it has been copied. + // The stages are: + // 1. CopyFromReg (and load) arg & vararg registers. + // 2. Chain CopyFromReg nodes into a TokenFactor. + // 3. Memcpy 'byVal' args & push final InVals. + // 4. Chain mem ops nodes into a TokenFactor. + SmallVector<SDValue, 4> CFRegNode; + SmallVector<ArgDataPair, 4> ArgData; + SmallVector<SDValue, 4> MemOps; + + // 1a. CopyFromReg (and load) arg registers. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; + SDValue ArgIn; if (VA.isRegLoc()) { // Arguments passed in registers @@ -1099,7 +1118,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, case MVT::i32: unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); - InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); + ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); + CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); } } else { // sanity check @@ -1119,14 +1139,17 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, // Create the SelectionDAG nodes corresponding to a load //from this parameter SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); - InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, - MachinePointerInfo::getFixedStack(FI), - false, false, false, 0)); + ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, + MachinePointerInfo::getFixedStack(FI), + false, false, false, 0); } + const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; + ArgData.push_back(ADP); } + // 1b. CopyFromReg vararg registers. if (isVarArg) { - /* Argument registers */ + // Argument registers static const uint16_t ArgRegs[] = { XCore::R0, XCore::R1, XCore::R2, XCore::R3 }; @@ -1134,7 +1157,6 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, array_lengthof(ArgRegs)); if (FirstVAReg < array_lengthof(ArgRegs)) { - SmallVector<SDValue, 4> MemOps; int offset = 0; // Save remaining registers, storing higher register numbers at a higher // address @@ -1150,14 +1172,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(ArgRegs[i], VReg); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); + CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); // Move argument from virt reg -> stack SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo(), false, false, 0); MemOps.push_back(Store); } - if (!MemOps.empty()) - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &MemOps[0], MemOps.size()); } else { // This will point to the next argument passed via stack. XFI->setVarArgsFrameIndex( @@ -1166,6 +1186,42 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, } } + // 2. chain CopyFromReg nodes into a TokenFactor. + if (!CFRegNode.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0], + CFRegNode.size()); + + // 3. Memcpy 'byVal' args & push final InVals. + // Aggregates passed "byVal" need to be copied by the callee. + // The callee will use a pointer to this copy, rather than the original + // pointer. + for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), + ArgDE = ArgData.end(); + ArgDI != ArgDE; ++ArgDI) { + if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { + unsigned Size = ArgDI->Flags.getByValSize(); + unsigned Align = ArgDI->Flags.getByValAlign(); + // Create a new object on the stack and copy the pointee into it. + int FI = MFI->CreateStackObject(Size, Align, false, false); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + InVals.push_back(FIN); + MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, + DAG.getConstant(Size, MVT::i32), + Align, false, false, + MachinePointerInfo(), + MachinePointerInfo())); + } else { + InVals.push_back(ArgDI->SDV); + } + } + + // 4, chain mem ops nodes into a TokenFactor. + if (!MemOps.empty()) { + MemOps.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], + MemOps.size()); + } + return Chain; } @@ -1582,7 +1638,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, std::pair<unsigned, const TargetRegisterClass*> XCoreTargetLowering:: getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { default : break; diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h index f765f02..7761b7c 100644 --- a/lib/Target/XCore/XCoreISelLowering.h +++ b/lib/Target/XCore/XCoreISelLowering.h @@ -158,7 +158,7 @@ namespace llvm { // Inline asm support std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const; + MVT VT) const; // Expand specifics SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const; diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp index eb7a936..d6b8c2d 100644 --- a/lib/Target/XCore/XCoreInstrInfo.cpp +++ b/lib/Target/XCore/XCoreInstrInfo.cpp @@ -386,15 +386,6 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, .addImm(0); } -MachineInstr* -XCoreInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, - uint64_t Offset, const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(XCore::DBG_VALUE)) - .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - /// ReverseBranchCondition - Return the inverse opcode of the /// specified Branch instruction. bool XCoreInstrInfo:: diff --git a/lib/Target/XCore/XCoreInstrInfo.h b/lib/Target/XCore/XCoreInstrInfo.h index 42eeed8..51d66a1 100644 --- a/lib/Target/XCore/XCoreInstrInfo.h +++ b/lib/Target/XCore/XCoreInstrInfo.h @@ -78,12 +78,6 @@ public: const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const; - virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, - uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; - virtual bool ReverseBranchCondition( SmallVectorImpl<MachineOperand> &Cond) const; }; diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td index e06419a..81fa84d 100644 --- a/lib/Target/XCore/XCoreInstrInfo.td +++ b/lib/Target/XCore/XCoreInstrInfo.td @@ -84,7 +84,7 @@ def msksize_xform : SDNodeXForm<imm, [{ // Transformation function: get the size of a mask assert(isMask_32(N->getZExtValue())); // look for the first non-zero bit - return getI32Imm(32 - countLeadingZeros(N->getZExtValue())); + return getI32Imm(32 - countLeadingZeros((uint32_t)N->getZExtValue())); }]>; def neg_xform : SDNodeXForm<imm, [{ @@ -279,12 +279,6 @@ multiclass FRU6_LRU6_backwards_branch<bits<6> opc, string OpcStr> { !strconcat(OpcStr, " $a, $b"), []>; } -multiclass FRU6_LRU6_cp<bits<6> opc, string OpcStr> { - def _ru6: _FRU6<opc, (outs RRegs:$a), (ins i32imm:$b), - !strconcat(OpcStr, " $a, cp[$b]"), []>; - def _lru6: _FLRU6<opc, (outs RRegs:$a), (ins i32imm:$b), - !strconcat(OpcStr, " $a, cp[$b]"), []>; -} // U6 multiclass FU6_LU6<bits<10> opc, string OpcStr, SDNode OpNode> { @@ -539,8 +533,13 @@ def STWDP_lru6 : _FLRU6<0b010100, (outs), (ins RRegs:$a, i32imm:$b), [(store RRegs:$a, (dprelwrapper tglobaladdr:$b))]>; //let Uses = [CP] in .. -let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in -defm LDWCP : FRU6_LRU6_cp<0b011011, "ldw">; +let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in { +def LDWCP_ru6 : _FRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b), + "ldw $a, cp[$b]", []>; +def LDWCP_lru6: _FLRU6<0b011011, (outs RRegs:$a), (ins i32imm:$b), + "ldw $a, cp[$b]", + [(set RRegs:$a, (load (cprelwrapper tglobaladdr:$b)))]>; +} let Uses = [SP] in { let mayStore=1 in { |