diff options
author | Stephen Hines <srhines@google.com> | 2013-06-12 13:32:42 -0700 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2013-06-12 13:32:42 -0700 |
commit | 1878f9a7874b1ff569d745c0269f49d3daf7203d (patch) | |
tree | 19a8dbaaedf6a056c617e87596b32d3f452af137 /lib/Target/X86 | |
parent | 7a57f27b857ec4b243d83d392a399f02fc196c0a (diff) | |
parent | 100fbdd06be7590b23c4707a98cd605bdb519498 (diff) | |
download | external_llvm-1878f9a7874b1ff569d745c0269f49d3daf7203d.zip external_llvm-1878f9a7874b1ff569d745c0269f49d3daf7203d.tar.gz external_llvm-1878f9a7874b1ff569d745c0269f49d3daf7203d.tar.bz2 |
Merge commit '100fbdd06be7590b23c4707a98cd605bdb519498' into merge_20130612
Diffstat (limited to 'lib/Target/X86')
37 files changed, 1507 insertions, 996 deletions
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index fef5cfe..263eb5e 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -219,7 +219,9 @@ private: const MCExpr *getSym() { return Sym; } StringRef getSymName() { return SymName; } int64_t getImm() { return Imm + IC.execute(); } - bool isValidEndState() { return State == IES_RBRAC; } + bool isValidEndState() { + return State == IES_RBRAC || State == IES_INTEGER; + } bool getStopOnLBrac() { return StopOnLBrac; } bool getAddImmPrefix() { return AddImmPrefix; } bool hadError() { return State == IES_ERROR; } @@ -477,7 +479,7 @@ private: MCAsmLexer &getLexer() const { return Parser.getLexer(); } bool Error(SMLoc L, const Twine &Msg, - ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(), + ArrayRef<SMRange> Ranges = None, bool MatchingInlineAsm = false) { if (MatchingInlineAsm) return true; return Parser.Error(L, Msg, Ranges); @@ -500,7 +502,8 @@ private: X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start, int64_t ImmDisp, unsigned Size); X86Operand *ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, - InlineAsmIdentifierInfo &Info, SMLoc &End); + InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End); X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); @@ -1193,6 +1196,7 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites, } } assert (Found && "Unable to rewrite ImmDisp."); + (void)Found; } else { // We have a symbolic and an immediate displacement, but no displacement // before the bracketed expression. Put the immediate displacement @@ -1267,7 +1271,8 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); } else { InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated*/ false, End)) return Err; } SM.onIdentifierExpr(Val, Identifier); @@ -1367,27 +1372,26 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End) { assert (isParsingInlineAsm() && "Expected to be parsing inline assembly."); Val = 0; StringRef LineBuf(Identifier.data()); - SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info); - unsigned BufLen = LineBuf.size(); - assert (BufLen && "Expected a non-zero length identifier."); + SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand); - // Advance the token stream based on what the frontend parsed. const AsmToken &Tok = Parser.getTok(); - AsmToken IdentEnd = Tok; - while (BufLen > 0) { - IdentEnd = Tok; - BufLen -= Tok.getString().size(); - getLexer().Lex(); // Consume the token. + + // Advance the token stream until the end of the current token is + // after the end of what the frontend claimed. + const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size(); + while (true) { + End = Tok.getEndLoc(); + getLexer().Lex(); + + assert(End.getPointer() <= EndPtr && "frontend claimed part of a token?"); + if (End.getPointer() == EndPtr) break; } - if (BufLen != 0) - return ErrorOperand(IdentEnd.getLoc(), - "Frontend parser mismatch with asm lexer!"); - End = IdentEnd.getEndLoc(); // Create the symbol reference. Identifier = LineBuf; @@ -1447,7 +1451,8 @@ X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, InlineAsmIdentifierInfo Info; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated*/ false, End)) return Err; return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0, /*Scale=*/1, Start, End, Size, Identifier, Info); @@ -1506,7 +1511,8 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { InlineAsmIdentifierInfo Info; SMLoc Start = Tok.getLoc(), End; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated*/ false, End)) return Err; // Don't emit the offset operator. @@ -1541,7 +1547,8 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { InlineAsmIdentifierInfo Info; SMLoc Start = Tok.getLoc(), End; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated*/ true, End)) return Err; unsigned CVal = 0; @@ -2200,7 +2207,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, assert(!Operands.empty() && "Unexpect empty operand list!"); X86Operand *Op = static_cast<X86Operand*>(Operands[0]); assert(Op->isToken() && "Leading operand should always be a mnemonic!"); - ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>(); + ArrayRef<SMRange> EmptyRanges = None; // First, handle aliases that expand to multiple instructions. // FIXME: This should be replaced with a real .td file alias mechanism. @@ -2302,25 +2309,25 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, unsigned Match1, Match2, Match3, Match4; Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match1 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[1]; Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match2 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[2]; Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match3 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[3]; Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match4 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index ca6f80c..ca71c4f 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -190,94 +190,8 @@ static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch, uint64_t Address, uint64_t Offset, uint64_t Width, MCInst &MI, const MCDisassembler *Dis) { - LLVMOpInfoCallback getOpInfo = Dis->getLLVMOpInfoCallback(); - struct LLVMOpInfo1 SymbolicOp; - memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1)); - SymbolicOp.Value = Value; - void *DisInfo = Dis->getDisInfoBlock(); - - if (!getOpInfo || - !getOpInfo(DisInfo, Address, Offset, Width, 1, &SymbolicOp)) { - // Clear SymbolicOp.Value from above and also all other fields. - memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1)); - LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback(); - if (!SymbolLookUp) - return false; - uint64_t ReferenceType; - if (isBranch) - ReferenceType = LLVMDisassembler_ReferenceType_In_Branch; - else - ReferenceType = LLVMDisassembler_ReferenceType_InOut_None; - const char *ReferenceName; - const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address, - &ReferenceName); - if (Name) { - SymbolicOp.AddSymbol.Name = Name; - SymbolicOp.AddSymbol.Present = true; - } - // For branches always create an MCExpr so it gets printed as hex address. - else if (isBranch) { - SymbolicOp.Value = Value; - } - if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub) - (*Dis->CommentStream) << "symbol stub for: " << ReferenceName; - if (!Name && !isBranch) - return false; - } - - MCContext *Ctx = Dis->getMCContext(); - const MCExpr *Add = NULL; - if (SymbolicOp.AddSymbol.Present) { - if (SymbolicOp.AddSymbol.Name) { - StringRef Name(SymbolicOp.AddSymbol.Name); - MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name); - Add = MCSymbolRefExpr::Create(Sym, *Ctx); - } else { - Add = MCConstantExpr::Create((int)SymbolicOp.AddSymbol.Value, *Ctx); - } - } - - const MCExpr *Sub = NULL; - if (SymbolicOp.SubtractSymbol.Present) { - if (SymbolicOp.SubtractSymbol.Name) { - StringRef Name(SymbolicOp.SubtractSymbol.Name); - MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name); - Sub = MCSymbolRefExpr::Create(Sym, *Ctx); - } else { - Sub = MCConstantExpr::Create((int)SymbolicOp.SubtractSymbol.Value, *Ctx); - } - } - - const MCExpr *Off = NULL; - if (SymbolicOp.Value != 0) - Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx); - - const MCExpr *Expr; - if (Sub) { - const MCExpr *LHS; - if (Add) - LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx); - else - LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx); - if (Off != 0) - Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx); - else - Expr = LHS; - } else if (Add) { - if (Off != 0) - Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx); - else - Expr = Add; - } else { - if (Off != 0) - Expr = Off; - else - Expr = MCConstantExpr::Create(0, *Ctx); - } - - MI.addOperand(MCOperand::CreateExpr(Expr)); - - return true; + return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch, + Offset, Width); } /// tryAddingPcLoadReferenceComment - trys to add a comment as to what is being @@ -290,15 +204,7 @@ static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch, static void tryAddingPcLoadReferenceComment(uint64_t Address, uint64_t Value, const void *Decoder) { const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder); - LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback(); - if (SymbolLookUp) { - void *DisInfo = Dis->getDisInfoBlock(); - uint64_t ReferenceType = LLVMDisassembler_ReferenceType_In_PCrel_Load; - const char *ReferenceName; - (void)SymbolLookUp(DisInfo, Value, &ReferenceType, Address, &ReferenceName); - if(ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr) - (*Dis->CommentStream) << "literal pool for: " << ReferenceName; - } + Dis->tryAddingPcLoadReferenceComment(Value, Address); } /// translateImmediate - Appends an immediate operand to an MCInst. diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt index 1c240e5..2eb5f25 100644 --- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt @@ -6,6 +6,8 @@ add_llvm_library(LLVMX86Desc X86MachObjectWriter.cpp X86ELFObjectWriter.cpp X86WinCOFFObjectWriter.cpp + X86MachORelocationInfo.cpp + X86ELFRelocationInfo.cpp ) add_dependencies(LLVMX86Desc X86CommonTableGen) diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp new file mode 100644 index 0000000..8f4ab46 --- /dev/null +++ b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp @@ -0,0 +1,135 @@ +//===-- X86ELFRelocationInfo.cpp ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/X86MCTargetDesc.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCRelocationInfo.h" +#include "llvm/Object/ELF.h" +#include "llvm/Support/ELF.h" + +using namespace llvm; +using namespace object; +using namespace ELF; + +namespace { +class X86_64ELFRelocationInfo : public MCRelocationInfo { +public: + X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {} + + const MCExpr *createExprForRelocation(RelocationRef Rel) { + uint64_t RelType; Rel.getType(RelType); + symbol_iterator SymI = Rel.getSymbol(); + + StringRef SymName; SymI->getName(SymName); + uint64_t SymAddr; SymI->getAddress(SymAddr); + uint64_t SymSize; SymI->getSize(SymSize); + int64_t Addend; getELFRelocationAddend(Rel, Addend); + + MCSymbol *Sym = Ctx.GetOrCreateSymbol(SymName); + // FIXME: check that the value is actually the same. + if (Sym->isVariable() == false) + Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx)); + + const MCExpr *Expr = 0; + // If hasAddend is true, then we need to add Addend (r_addend) to Expr. + bool hasAddend = false; + + // The AMD64 SysV ABI says: + // A: the addend used to compute the value of the relocatable field. + // B: the base address at which a shared object has been loaded into memory + // during execution. Generally, a shared object is built with a 0 base + // virtual address, but the execution address will be different. + // G: the offset into the global offset table at which the relocation + // entry's symbol will reside during execution. + // GOT: the address of the global offset table. + // L: the place (section offset or address) of the Procedure Linkage Table + // entry for a symbol. + // P: the place (section offset or address) of the storage unit being + // relocated (computed using r_offset). + // S: the value of the symbol whose index resides in the relocation entry. + // Z: the size of the symbol whose index resides in the relocation entry. + + switch(RelType) { + case R_X86_64_NONE: + case R_X86_64_COPY: + // none + break; + case R_X86_64_64: + case R_X86_64_16: + case R_X86_64_8: + // S + A + case R_X86_64_32: + case R_X86_64_32S: + // S + A (We don't care about the result not fitting in 32 bits.) + case R_X86_64_PC32: + case R_X86_64_PC16: + case R_X86_64_PC8: + case R_X86_64_PC64: + // S + A - P (P/pcrel is implicit) + hasAddend = true; + Expr = MCSymbolRefExpr::Create(Sym, Ctx); + break; + case R_X86_64_GOT32: + case R_X86_64_GOT64: + case R_X86_64_GOTPC32: + case R_X86_64_GOTPC64: + case R_X86_64_GOTPLT64: + // G + A + hasAddend = true; + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, Ctx); + break; + case R_X86_64_PLT32: + // L + A - P -> S@PLT + A + hasAddend = true; + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_PLT, Ctx); + break; + case R_X86_64_GLOB_DAT: + case R_X86_64_JUMP_SLOT: + // S + Expr = MCSymbolRefExpr::Create(Sym, Ctx); + break; + case R_X86_64_GOTPCREL: + case R_X86_64_GOTPCREL64: + // G + GOT + A - P -> S@GOTPCREL + A + hasAddend = true; + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx); + break; + case R_X86_64_GOTOFF64: + // S + A - GOT + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTOFF, Ctx); + break; + case R_X86_64_PLTOFF64: + // L + A - GOT + break; + case R_X86_64_SIZE32: + case R_X86_64_SIZE64: + // Z + A + Expr = MCConstantExpr::Create(SymSize, Ctx); + break; + default: + Expr = MCSymbolRefExpr::Create(Sym, Ctx); + break; + } + if (Expr && hasAddend && Addend != 0) + Expr = MCBinaryExpr::CreateAdd(Expr, + MCConstantExpr::Create(Addend, Ctx), + Ctx); + return Expr; + } +}; +} // End unnamed namespace + +/// createX86ELFRelocationInfo - Construct an X86 Mach-O RelocationInfo. +MCRelocationInfo *llvm::createX86_64ELFRelocationInfo(MCContext &Ctx) { + // We only handle x86-64 for now. + return new X86_64ELFRelocationInfo(Ctx); +} diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp index 5e84530..bd23ce4 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -263,7 +263,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(StringRef TT) { return X; } -static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) { +static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { Triple TheTriple(TT); bool is64Bit = TheTriple.getArch() == Triple::x86_64; @@ -290,14 +290,16 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) { int stackGrowth = is64Bit ? -8 : -4; // Initial state of the frame pointer is esp+stackGrowth. - MachineLocation Dst(MachineLocation::VirtualFP); - MachineLocation Src(is64Bit ? X86::RSP : X86::ESP, stackGrowth); - MAI->addInitialFrameState(0, Dst, Src); + unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP; + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa( + 0, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth); + MAI->addInitialFrameState(Inst); // Add return address to move list - MachineLocation CSDst(is64Bit ? X86::RSP : X86::ESP, stackGrowth); - MachineLocation CSSrc(is64Bit ? X86::RIP : X86::EIP); - MAI->addInitialFrameState(0, CSDst, CSSrc); + unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP; + MCCFIInstruction Inst2 = MCCFIInstruction::createOffset( + 0, MRI.getDwarfRegNum(InstPtr, true), stackGrowth); + MAI->addInitialFrameState(Inst2); return MAI; } @@ -382,6 +384,17 @@ static MCInstPrinter *createX86MCInstPrinter(const Target &T, return 0; } +static MCRelocationInfo *createX86MCRelocationInfo(StringRef TT, + MCContext &Ctx) { + Triple TheTriple(TT); + if (TheTriple.isEnvironmentMachO() && TheTriple.getArch() == Triple::x86_64) + return createX86_64MachORelocationInfo(Ctx); + else if (TheTriple.isOSBinFormatELF()) + return createX86_64ELFRelocationInfo(Ctx); + // Default to the stock relocation info. + return llvm::createMCRelocationInfo(TT, Ctx); +} + static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) { return new MCInstrAnalysis(Info); } @@ -439,4 +452,10 @@ extern "C" void LLVMInitializeX86TargetMC() { createX86MCInstPrinter); TargetRegistry::RegisterMCInstPrinter(TheX86_64Target, createX86MCInstPrinter); + + // Register the MC relocation info. + TargetRegistry::RegisterMCRelocationInfo(TheX86_32Target, + createX86MCRelocationInfo); + TargetRegistry::RegisterMCRelocationInfo(TheX86_64Target, + createX86MCRelocationInfo); } diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h index 981aa1a..2f459b4 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h +++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h @@ -25,6 +25,7 @@ class MCInstrInfo; class MCObjectWriter; class MCRegisterInfo; class MCSubtargetInfo; +class MCRelocationInfo; class Target; class StringRef; class raw_ostream; @@ -94,6 +95,12 @@ MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS, uint16_t EMachine); /// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer. MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit); + +/// createX86_64MachORelocationInfo - Construct X86-64 Mach-O relocation info. +MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx); + +/// createX86_64ELFORelocationInfo - Construct X86-64 ELF relocation info. +MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx); } // End llvm namespace diff --git a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp new file mode 100644 index 0000000..75b5acf --- /dev/null +++ b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp @@ -0,0 +1,116 @@ +//===-- X86MachORelocationInfo.cpp ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/X86MCTargetDesc.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCRelocationInfo.h" +#include "llvm/Object/MachO.h" + +using namespace llvm; +using namespace object; +using namespace macho; + +namespace { +class X86_64MachORelocationInfo : public MCRelocationInfo { +public: + X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {} + + const MCExpr *createExprForRelocation(RelocationRef Rel) { + const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObjectFile()); + + uint64_t RelType; Rel.getType(RelType); + symbol_iterator SymI = Rel.getSymbol(); + + StringRef SymName; SymI->getName(SymName); + uint64_t SymAddr; SymI->getAddress(SymAddr); + + RelocationEntry RE = Obj->getRelocation(Rel.getRawDataRefImpl()); + bool isPCRel = Obj->getAnyRelocationPCRel(RE); + + MCSymbol *Sym = Ctx.GetOrCreateSymbol(SymName); + // FIXME: check that the value is actually the same. + if (Sym->isVariable() == false) + Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx)); + const MCExpr *Expr = 0; + + switch(RelType) { + case RIT_X86_64_TLV: + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); + break; + case RIT_X86_64_Signed4: + Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx), + MCConstantExpr::Create(4, Ctx), + Ctx); + break; + case RIT_X86_64_Signed2: + Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx), + MCConstantExpr::Create(2, Ctx), + Ctx); + break; + case RIT_X86_64_Signed1: + Expr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Sym, Ctx), + MCConstantExpr::Create(1, Ctx), + Ctx); + break; + case RIT_X86_64_GOTLoad: + Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx); + break; + case RIT_X86_64_GOT: + Expr = MCSymbolRefExpr::Create(Sym, isPCRel ? + MCSymbolRefExpr::VK_GOTPCREL : + MCSymbolRefExpr::VK_GOT, + Ctx); + break; + case RIT_X86_64_Subtractor: + { + RelocationRef RelNext; + Obj->getRelocationNext(Rel.getRawDataRefImpl(), RelNext); + RelocationEntry RENext = Obj->getRelocation(RelNext.getRawDataRefImpl()); + + // X86_64_SUBTRACTOR must be followed by a relocation of type + // X86_64_RELOC_UNSIGNED . + // NOTE: Scattered relocations don't exist on x86_64. + unsigned RType = Obj->getAnyRelocationType(RENext); + if (RType != RIT_X86_64_Unsigned) + report_fatal_error("Expected X86_64_RELOC_UNSIGNED after " + "X86_64_RELOC_SUBTRACTOR."); + + const MCExpr *LHS = MCSymbolRefExpr::Create(Sym, Ctx); + + symbol_iterator RSymI = RelNext.getSymbol(); + uint64_t RSymAddr; + RSymI->getAddress(RSymAddr); + StringRef RSymName; + RSymI->getName(RSymName); + + MCSymbol *RSym = Ctx.GetOrCreateSymbol(RSymName); + if (RSym->isVariable() == false) + RSym->setVariableValue(MCConstantExpr::Create(RSymAddr, Ctx)); + + const MCExpr *RHS = MCSymbolRefExpr::Create(RSym, Ctx); + + Expr = MCBinaryExpr::CreateSub(LHS, RHS, Ctx); + break; + } + default: + Expr = MCSymbolRefExpr::Create(Sym, Ctx); + break; + } + return Expr; + } +}; +} // End unnamed namespace + +/// createX86_64MachORelocationInfo - Construct an X86-64 Mach-O RelocationInfo. +MCRelocationInfo *llvm::createX86_64MachORelocationInfo(MCContext &Ctx) { + return new X86_64MachORelocationInfo(Ctx); +} diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index 87bb68d..c865500 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -255,11 +255,16 @@ def : Proc<"amdfam10", [FeatureSSE4A, // Bobcat def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT]>; +// Jaguar +def : Proc<"btver2", [FeatureAVX, FeatureSSE4A, FeatureCMPXCHG16B, + FeatureAES, FeaturePCLMUL, FeatureBMI, + FeatureF16C, FeatureMOVBE, FeatureLZCNT, + FeaturePOPCNT]>; // Bulldozer def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B, FeatureAES, FeaturePCLMUL, FeatureLZCNT, FeaturePOPCNT]>; -// Enhanced Bulldozer +// Piledriver def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B, FeatureAES, FeaturePCLMUL, FeatureF16C, FeatureLZCNT, diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index 8fea6ed..5d72b44 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -53,13 +53,8 @@ namespace { static char ID; explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce) : MachineFunctionPass(ID), II(0), TD(0), TM(tm), - MCE(mce), PICBaseOffset(0), Is64BitMode(false), - IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} - Emitter(X86TargetMachine &tm, CodeEmitter &mce, - const X86InstrInfo &ii, const DataLayout &td, bool is64) - : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm), - MCE(mce), PICBaseOffset(0), Is64BitMode(is64), - IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} + MCE(mce), PICBaseOffset(0), Is64BitMode(false), + IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} bool runOnMachineFunction(MachineFunction &MF); @@ -1270,7 +1265,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI, unsigned rt = Is64BitMode ? X86::reloc_pcrel_word : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word); - if (Opcode == X86::MOV64ri64i32) + if (Opcode == X86::MOV32ri64) rt = X86::reloc_absolute_word; // FIXME: add X86II flag? // This should not occur on Darwin for relocatable objects. if (Opcode == X86::MOV64ri) diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index cf44bd0..295a577 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -45,10 +45,6 @@ class X86FastISel : public FastISel { /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; - /// RegInfo - X86 register info. - /// - const X86RegisterInfo *RegInfo; - /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// floating point ops. /// When SSE is available, use it for f32 operations. @@ -63,7 +59,6 @@ public: Subtarget = &TM.getSubtarget<X86Subtarget>(); X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); - RegInfo = static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); } virtual bool TargetSelectInstruction(const Instruction *I); @@ -1005,10 +1000,6 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { } bool X86FastISel::X86SelectZExt(const Instruction *I) { - // Handle zero-extension from i1 to i8, which is common. - if (!I->getOperand(0)->getType()->isIntegerTy(1)) - return false; - EVT DstVT = TLI.getValueType(I->getType()); if (!TLI.isTypeLegal(DstVT)) return false; @@ -1017,12 +1008,37 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { if (ResultReg == 0) return false; - // Set the high bits to zero. - ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); - if (ResultReg == 0) - return false; + // Handle zero-extension from i1 to i8, which is common. + MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()).getSimpleVT(); + if (SrcVT.SimpleTy == MVT::i1) { + // Set the high bits to zero. + ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); + SrcVT = MVT::i8; + + if (ResultReg == 0) + return false; + } + + if (DstVT == MVT::i64) { + // Handle extension to 64-bits via sub-register shenanigans. + unsigned MovInst; - if (DstVT != MVT::i8) { + switch (SrcVT.SimpleTy) { + case MVT::i8: MovInst = X86::MOVZX32rr8; break; + case MVT::i16: MovInst = X86::MOVZX32rr16; break; + case MVT::i32: MovInst = X86::MOV32rr; break; + default: llvm_unreachable("Unexpected zext to i64 source type"); + } + + unsigned Result32 = createResultReg(&X86::GR32RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovInst), Result32) + .addReg(ResultReg); + + ResultReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::SUBREG_TO_REG), + ResultReg) + .addImm(0).addReg(Result32).addImm(X86::sub_32bit); + } else if (DstVT != MVT::i8) { ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, ResultReg, /*Kill=*/true); if (ResultReg == 0) @@ -1273,8 +1289,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { { &X86::GR16RegClass, X86::AX, X86::DX, { { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem - { X86::DIV16r, X86::MOV16r0, Copy, X86::AX, U }, // UDiv - { X86::DIV16r, X86::MOV16r0, Copy, X86::DX, U }, // URem + { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv + { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem } }, // i16 { &X86::GR32RegClass, X86::EAX, X86::EDX, { @@ -1287,8 +1303,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { { &X86::GR64RegClass, X86::RAX, X86::RDX, { { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem - { X86::DIV64r, X86::MOV64r0, Copy, X86::RAX, U }, // UDiv - { X86::DIV64r, X86::MOV64r0, Copy, X86::RDX, U }, // URem + { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv + { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem } }, // i64 }; @@ -1334,9 +1350,28 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { if (OpEntry.IsOpSigned) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpEntry.OpSignExtend)); - else + else { + unsigned Zero32 = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(OpEntry.OpSignExtend), TypeEntry.HighInReg); + TII.get(X86::MOV32r0), Zero32); + + // Copy the zero into the appropriate sub/super/identical physical + // register. Unfortunately the operations needed are not uniform enough to + // fit neatly into the table above. + if (VT.SimpleTy == MVT::i16) { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Copy), TypeEntry.HighInReg) + .addReg(Zero32, 0, X86::sub_16bit); + } else if (VT.SimpleTy == MVT::i32) { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Copy), TypeEntry.HighInReg) + .addReg(Zero32); + } else if (VT.SimpleTy == MVT::i64) { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg) + .addImm(0).addReg(Zero32).addImm(X86::sub_32bit); + } + } } // Generate the DIV/IDIV instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -1985,6 +2020,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { } else { unsigned LocMemOffset = VA.getLocMemOffset(); X86AddressMode AM; + const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo*>( + getTargetMachine()->getRegisterInfo()); AM.Base.Reg = RegInfo->getStackRegister(); AM.Disp = LocMemOffset; const Value *ArgVal = ArgVals[VA.getValNo()]; diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index 0dd034c..d21cb8a 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -135,8 +135,8 @@ FunctionPass *llvm::createX86FixupLEAs() { bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { MF = &Func; - TII = Func.getTarget().getInstrInfo(); TM = &MF->getTarget(); + TII = TM->getInstrInfo(); DEBUG(dbgs() << "Start X86FixupLEAs\n";); // Process all basic blocks. diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 0585b43..8522c8c 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -893,8 +893,8 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Produce implicit-defs for free by using killed registers. while (Kills && Defs) { - unsigned KReg = CountTrailingZeros_32(Kills); - unsigned DReg = CountTrailingZeros_32(Defs); + unsigned KReg = countTrailingZeros(Kills); + unsigned DReg = countTrailingZeros(Defs); DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n"); std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]); std::swap(RegMap[KReg], RegMap[DReg]); @@ -917,7 +917,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Manually kill the rest. while (Kills) { - unsigned KReg = CountTrailingZeros_32(Kills); + unsigned KReg = countTrailingZeros(Kills); DEBUG(dbgs() << "Killing %FP" << KReg << "\n"); freeStackSlotBefore(I, KReg); Kills &= ~(1 << KReg); @@ -925,7 +925,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Load zeros for all the imp-defs. while(Defs) { - unsigned DReg = CountTrailingZeros_32(Defs); + unsigned DReg = countTrailingZeros(Defs); DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n"); BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0)); pushReg(DReg); @@ -1636,7 +1636,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { // Note: this might be a non-optimal pop sequence. We might be able to do // better by trying to pop in stack order or something. while (FPKills) { - unsigned FPReg = CountTrailingZeros_32(FPKills); + unsigned FPReg = countTrailingZeros(FPKills); if (isLive(FPReg)) freeStackSlotAfter(InsertPt, FPReg); FPKills &= ~(1U << FPReg); diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 16e1e42..3061117 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -307,12 +307,12 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, unsigned FramePtr) const { MachineFrameInfo *MFI = MF.getFrameInfo(); MachineModuleInfo &MMI = MF.getMMI(); + const MCRegisterInfo &MRI = MMI.getContext().getRegisterInfo(); // Add callee saved registers to move list. const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); if (CSI.empty()) return; - std::vector<MachineMove> &Moves = MMI.getFrameMoves(); const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); bool HasFP = hasFP(MF); @@ -360,16 +360,22 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, if (HasFP && FramePtr == Reg) continue; - MachineLocation CSDst(MachineLocation::VirtualFP, Offset); - MachineLocation CSSrc(Reg); - Moves.push_back(MachineMove(Label, CSDst, CSSrc)); + unsigned DwarfReg = MRI.getDwarfRegNum(Reg, true); + MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset)); } } /// getCompactUnwindRegNum - Get the compact unwind number for a given /// register. The number corresponds to the enum lists in /// compact_unwind_encoding.h. -static int getCompactUnwindRegNum(const uint16_t *CURegs, unsigned Reg) { +static int getCompactUnwindRegNum(unsigned Reg, bool is64Bit) { + static const uint16_t CU32BitRegs[] = { + X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 + }; + static const uint16_t CU64BitRegs[] = { + X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 + }; + const uint16_t *CURegs = is64Bit ? CU64BitRegs : CU32BitRegs; for (int Idx = 1; *CURegs; ++CURegs, ++Idx) if (*CURegs == Reg) return Idx; @@ -398,16 +404,8 @@ encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], // 4 3 // 5 3 // - static const uint16_t CU32BitRegs[] = { - X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 - }; - static const uint16_t CU64BitRegs[] = { - X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 - }; - const uint16_t *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); - for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { - int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]); + int CUReg = getCompactUnwindRegNum(SavedRegs[i], Is64Bit); if (CUReg == -1) return ~0U; SavedRegs[i] = CUReg; } @@ -466,14 +464,6 @@ encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], static uint32_t encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], bool Is64Bit) { - static const uint16_t CU32BitRegs[] = { - X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 - }; - static const uint16_t CU64BitRegs[] = { - X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 - }; - const uint16_t *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); - // Encode the registers in the order they were saved, 3-bits per register. The // registers are numbered from 1 to CU_NUM_SAVED_REGS. uint32_t RegEnc = 0; @@ -481,7 +471,7 @@ encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], unsigned Reg = SavedRegs[I]; if (Reg == 0) continue; - int CURegNum = getCompactUnwindRegNum(CURegs, Reg); + int CURegNum = getCompactUnwindRegNum(Reg, Is64Bit); if (CURegNum == -1) return ~0U; // Encode the 3-bit register number in order, skipping over 3-bits for each @@ -534,6 +524,12 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { // If there are too many saved registers, we cannot use compact encoding. if (SavedRegIdx >= CU_NUM_SAVED_REGS) return CU::UNWIND_MODE_DWARF; + unsigned Reg = MI.getOperand(0).getReg(); + if (Reg == (Is64Bit ? X86::RAX : X86::EAX)) { + ExpectEnd = true; + continue; + } + SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg(); StackAdjust += OffsetSize; InstrOffset += PushInstrSize; @@ -735,7 +731,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // REG < 64 => DW_CFA_offset + Reg // ELSE => DW_CFA_offset_extended - std::vector<MachineMove> &Moves = MMI.getFrameMoves(); uint64_t NumBytes = 0; int stackGrowth = -SlotSize; @@ -768,20 +763,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { .addSym(FrameLabel); // Define the current CFA rule to use the provided offset. - if (StackSize) { - MachineLocation SPDst(MachineLocation::VirtualFP); - MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); - Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); - } else { - MachineLocation SPDst(StackPtr); - MachineLocation SPSrc(StackPtr, stackGrowth); - Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); - } + assert(StackSize); + MMI.addFrameInst( + MCCFIInstruction::createDefCfaOffset(FrameLabel, 2 * stackGrowth)); // Change the rule for the FramePtr to be an "offset" rule. - MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); - MachineLocation FPSrc(FramePtr); - Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); + unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); + MMI.addFrameInst(MCCFIInstruction::createOffset(FrameLabel, DwarfFramePtr, + 2 * stackGrowth)); } // Update EBP with the new base value. @@ -797,9 +786,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { .addSym(FrameLabel); // Define the current CFA to use the EBP/RBP register. - MachineLocation FPDst(FramePtr); - MachineLocation FPSrc(MachineLocation::VirtualFP); - Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); + unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); + MMI.addFrameInst( + MCCFIInstruction::createDefCfaRegister(FrameLabel, DwarfFramePtr)); } // Mark the FramePtr as live-in in every block except the entry. @@ -827,10 +816,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); // Define the current CFA rule to use the provided offset. - unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr; - MachineLocation SPDst(Ptr); - MachineLocation SPSrc(Ptr, StackOffset); - Moves.push_back(MachineMove(Label, SPDst, SPSrc)); + assert(StackSize); + MMI.addFrameInst( + MCCFIInstruction::createDefCfaOffset(Label, StackOffset)); StackOffset += stackGrowth; } } @@ -964,16 +952,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { if (!HasFP && NumBytes) { // Define the current CFA rule to use the provided offset. - if (StackSize) { - MachineLocation SPDst(MachineLocation::VirtualFP); - MachineLocation SPSrc(MachineLocation::VirtualFP, - -StackSize + stackGrowth); - Moves.push_back(MachineMove(Label, SPDst, SPSrc)); - } else { - MachineLocation SPDst(StackPtr); - MachineLocation SPSrc(StackPtr, stackGrowth); - Moves.push_back(MachineMove(Label, SPDst, SPSrc)); - } + assert(StackSize); + MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset( + Label, -StackSize + stackGrowth)); } // Emit DWARF info specifying the offsets of the callee-saved registers. diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 968b358..4ffffa1 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -200,9 +200,13 @@ namespace { bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment); + bool SelectMOV64Imm32(SDValue N, SDValue &Imm); bool SelectLEAAddr(SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment); + bool SelectLEA64_32Addr(SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, SDValue &Disp, + SDValue &Segment); bool SelectTLSADDRAddr(SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment); @@ -229,14 +233,14 @@ namespace { SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment) { Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? - CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : + CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI->getPointerTy()) : AM.Base_Reg; Scale = getI8Imm(AM.Scale); Index = AM.IndexReg; // These are 32-bit even in 64-bit mode since RIP relative offset // is 32-bit. if (AM.GV) - Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), + Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), MVT::i32, AM.Disp, AM.SymbolFlags); else if (AM.CP) @@ -373,7 +377,7 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, else Ops.push_back(Chain.getOperand(i)); SDValue NewChain = - CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), + CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, &Ops[0], Ops.size()); Ops.clear(); Ops.push_back(NewChain); @@ -524,7 +528,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() { MemVT = SrcIsSSE ? SrcVT : DstVT; SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // FIXME: optimize the case where the src/dest is a load or store? SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, @@ -782,7 +786,7 @@ static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, return true; EVT VT = N.getValueType(); - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue Eight = DAG.getConstant(8, MVT::i8); SDValue NewMask = DAG.getConstant(0xff, VT); SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); @@ -830,7 +834,7 @@ static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, return true; EVT VT = N.getValueType(); - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); @@ -886,8 +890,8 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, return true; unsigned ShiftAmt = Shift.getConstantOperandVal(1); - unsigned MaskLZ = CountLeadingZeros_64(Mask); - unsigned MaskTZ = CountTrailingZeros_64(Mask); + unsigned MaskLZ = countLeadingZeros(Mask); + unsigned MaskTZ = countTrailingZeros(Mask); // The amount of shift we're trying to fit into the addressing mode is taken // from the trailing zeros of the mask. @@ -932,11 +936,11 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, if (ReplacingAnyExtend) { assert(X.getValueType() != VT); // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. - SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); + SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); InsertDAGNode(DAG, N, NewX); X = NewX; } - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); @@ -960,7 +964,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, unsigned Depth) { - DebugLoc dl = N.getDebugLoc(); + SDLoc dl(N); DEBUG({ dbgs() << "MatchAddress: "; AM.dump(); @@ -1380,6 +1384,71 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, } +bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) { + if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { + uint64_t ImmVal = CN->getZExtValue(); + if ((uint32_t)ImmVal != (uint64_t)ImmVal) + return false; + + Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64); + return true; + } + + // In static codegen with small code model, we can get the address of a label + // into a register with 'movl'. TableGen has already made sure we're looking + // at a label of some kind. + assert(N->getOpcode() == X86ISD::Wrapper && + "Unexpected node type for MOV32ri64"); + N = N.getOperand(0); + + if (N->getOpcode() != ISD::TargetConstantPool && + N->getOpcode() != ISD::TargetJumpTable && + N->getOpcode() != ISD::TargetGlobalAddress && + N->getOpcode() != ISD::TargetExternalSymbol && + N->getOpcode() != ISD::TargetBlockAddress) + return false; + + Imm = N; + return TM.getCodeModel() == CodeModel::Small; +} + +bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, + SDValue &Disp, SDValue &Segment) { + if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment)) + return false; + + SDLoc DL(N); + RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); + if (RN && RN->getReg() == 0) + Base = CurDAG->getRegister(0, MVT::i64); + else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(N)) { + // Base could already be %rip, particularly in the x32 ABI. + Base = SDValue(CurDAG->getMachineNode( + TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, + CurDAG->getTargetConstant(0, MVT::i64), + Base, + CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), + 0); + } + + RN = dyn_cast<RegisterSDNode>(Index); + if (RN && RN->getReg() == 0) + Index = CurDAG->getRegister(0, MVT::i64); + else { + assert(Index.getValueType() == MVT::i32 && + "Expect to be extending 32-bit registers for use in LEA"); + Index = SDValue(CurDAG->getMachineNode( + TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, + CurDAG->getTargetConstant(0, MVT::i64), + Index, + CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), + 0); + } + + return true; +} + /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing /// mode it matches can be cost effectively emitted as an LEA instruction. bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, @@ -1487,7 +1556,7 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, /// SDNode *X86DAGToDAGISel::getGlobalBaseReg() { unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); - return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); + return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); } SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { @@ -1502,7 +1571,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), + SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), MVT::i32, MVT::i32, MVT::Other, Ops); cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); return ResNode; @@ -1637,7 +1706,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { // + empty, the operand is not needed any more with the new op selected. // + non-empty, otherwise. static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, - DebugLoc dl, + SDLoc dl, enum AtomicOpc &Op, EVT NVT, SDValue Val) { if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { @@ -1689,7 +1758,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { if (Node->hasAnyUseOfValue(0)) return 0; - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); // Optimize common patterns for __sync_or_and_fetch and similar arith // operations where the result is not used. This allows us to use the "lock" @@ -1920,7 +1989,7 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, if (ChainCheck) // Make a new TokenFactor with all the other input chains except // for the load. - InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(), + InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, &ChainOps[0], ChainOps.size()); } if (!ChainCheck) @@ -1968,7 +2037,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { SDValue Segment = CurDAG->getRegister(0, MVT::i32); const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, Disp, Segment, VMask, Chain}; - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), VTs, Ops); + SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops); // Node has 2 outputs: VDst and MVT::Other. // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. // We replace VDst of Node with VDst of ResNode, and Other of Node with Other @@ -1982,7 +2051,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { EVT NVT = Node->getValueType(0); unsigned Opc, MOpc; unsigned Opcode = Node->getOpcode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); @@ -2013,6 +2082,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { case Intrinsic::x86_avx2_gather_d_d_256: case Intrinsic::x86_avx2_gather_q_d: case Intrinsic::x86_avx2_gather_q_d_256: { + if (!Subtarget->hasAVX2()) + break; unsigned Opc; switch (IntNo) { default: llvm_unreachable("Impossible intrinsic"); @@ -2335,9 +2406,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); } - // Propagate ordering to the last node, for now. - CurDAG->AssignOrdering(InFlag.getNode(), CurDAG->GetOrdering(Node)); - return NULL; } @@ -2366,27 +2434,24 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { } unsigned LoReg, HiReg, ClrReg; - unsigned ClrOpcode, SExtOpcode; + unsigned SExtOpcode; switch (NVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unsupported VT!"); case MVT::i8: LoReg = X86::AL; ClrReg = HiReg = X86::AH; - ClrOpcode = 0; SExtOpcode = X86::CBW; break; case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; - ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; + ClrReg = X86::DX; SExtOpcode = X86::CWD; break; case MVT::i32: LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; - ClrOpcode = X86::MOV32r0; SExtOpcode = X86::CDQ; break; case MVT::i64: LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; - ClrOpcode = X86::MOV64r0; SExtOpcode = X86::CQO; break; } @@ -2424,8 +2489,29 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); } else { // Zero out the high part, effectively zero extending the input. - SDValue ClrNode = - SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); + SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0); + switch (NVT.getSimpleVT().SimpleTy) { + case MVT::i16: + ClrNode = + SDValue(CurDAG->getMachineNode( + TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, + CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)), + 0); + break; + case MVT::i32: + break; + case MVT::i64: + ClrNode = + SDValue(CurDAG->getMachineNode( + TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, + CurDAG->getTargetConstant(0, MVT::i64), ClrNode, + CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), + 0); + break; + default: + llvm_unreachable("Unexpected division source"); + } + InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, ClrNode, InFlag).getValue(1); } @@ -2666,7 +2752,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { EVT LdVT = LoadNode->getMemoryVT(); unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); MachineSDNode *Result = CurDAG->getMachineNode(newOpc, - Node->getDebugLoc(), + SDLoc(Node), MVT::i32, MVT::Other, Ops); Result->setMemRefs(MemOp, MemOp + 2); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b587336..346dfbb 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -55,7 +55,7 @@ using namespace llvm; STATISTIC(NumTailCalls, "Number of tail calls"); // Forward declarations. -static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2); /// Generate a DAG to grab 128-bits from a vector > 128 bits. This @@ -64,7 +64,7 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, /// want. It need not be aligned to a 128-bit bounday. That makes /// lowering EXTRACT_VECTOR_ELT operations easier. static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, - SelectionDAG &DAG, DebugLoc dl) { + SelectionDAG &DAG, SDLoc dl) { EVT VT = Vec.getValueType(); assert(VT.is256BitVector() && "Unexpected vector size!"); EVT ElVT = VT.getVectorElementType(); @@ -104,7 +104,7 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, /// lowering INSERT_VECTOR_ELT operations easier. static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { // Inserting UNDEF is Result if (Vec.getOpcode() == ISD::UNDEF) return Result; @@ -134,7 +134,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, /// large BUILD_VECTORS. static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, unsigned NumElems, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); return Insert128BitVector(V, V2, NumElems/2, DAG, dl); } @@ -163,7 +163,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) Subtarget = &TM.getSubtarget<X86Subtarget>(); X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); - RegInfo = TM.getRegisterInfo(); TD = getDataLayout(); resetOperationActions(); @@ -202,6 +201,8 @@ void X86TargetLowering::resetOperationActions() { setSchedulingPreference(Sched::ILP); else setSchedulingPreference(Sched::RegPressure); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); // Bypass expensive divides on Atom when compiling with O2 @@ -1361,7 +1362,7 @@ void X86TargetLowering::resetOperationActions() { setPrefFunctionAlignment(4); // 2^4 bytes. } -EVT X86TargetLowering::getSetCCResultType(EVT VT) const { +EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { if (!VT.isVector()) return MVT::i8; return VT.changeVectorElementTypeToInteger(); } @@ -1504,9 +1505,9 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const { if (!Subtarget->is64Bit()) - // This doesn't have DebugLoc associated with it, but is not really the + // This doesn't have SDLoc associated with it, but is not really the // same as a Register. - return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); + return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()); return Table; } @@ -1593,7 +1594,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); @@ -1761,7 +1762,7 @@ SDValue X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. @@ -1868,7 +1869,7 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), @@ -1906,7 +1907,7 @@ SDValue X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, unsigned i) const { @@ -1948,7 +1949,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -2225,7 +2226,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const { unsigned LocMemOffset = VA.getLocMemOffset(); @@ -2245,7 +2246,7 @@ SDValue X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff, DebugLoc dl) const { + int FPDiff, SDLoc dl) const { // Adjust the Return address stack slot. EVT VT = getPointerTy(); OutRetAddr = getReturnAddressFrameIndex(DAG); @@ -2261,7 +2262,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, - unsigned SlotSize, int FPDiff, DebugLoc dl) { + unsigned SlotSize, int FPDiff, SDLoc dl) { // Store the return address to the appropriate stack slot. if (!FPDiff) return Chain; // Calculate the new stack slot for the return address. @@ -2278,7 +2279,7 @@ SDValue X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -2354,7 +2355,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } if (!IsSibcall) - Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); + Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), + dl); SDValue RetAddrFrIdx; // Load return address for tail calls. @@ -2368,6 +2370,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Walk the register/memloc assignments, inserting copies/loads. In the case // of tail call optimization arguments are handle later. + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; EVT RegVT = VA.getLocVT(); @@ -2443,7 +2447,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // GOT pointer. if (!isTailCall) { RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()))); } else { // If we are tail calling and generating PIC/GOT style code load the // address of the callee into ECX. The value in ecx is used as target of @@ -2640,7 +2644,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (!IsSibcall && isTailCall) { Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), - DAG.getIntPtrConstant(0, true), InFlag); + DAG.getIntPtrConstant(0, true), InFlag, dl); InFlag = Chain.getValue(1); } @@ -2699,7 +2703,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, DAG.getIntPtrConstant(NumBytes, true), DAG.getIntPtrConstant(NumBytesForCalleeToPush, true), - InFlag); + InFlag, dl); InFlag = Chain.getValue(1); } @@ -2747,6 +2751,8 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG) const { MachineFunction &MF = DAG.getMachineFunction(); const TargetMachine &TM = MF.getTarget(); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); const TargetFrameLowering &TFI = *TM.getFrameLowering(); unsigned StackAlignment = TFI.getStackAlignment(); uint64_t AlignMask = StackAlignment - 1; @@ -2859,6 +2865,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to // emit a special epilogue. + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); if (RegInfo->needsStackRealignment(MF)) return false; @@ -3062,7 +3070,7 @@ static bool isTargetShuffle(unsigned Opcode) { } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SelectionDAG &DAG) { switch(Opc) { default: llvm_unreachable("Unknown x86 shuffle node"); @@ -3073,7 +3081,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { switch(Opc) { @@ -3087,7 +3095,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { switch(Opc) { @@ -3100,7 +3108,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG) { switch(Opc) { default: llvm_unreachable("Unknown x86 shuffle node"); @@ -3119,6 +3127,8 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); int ReturnAddrIndex = FuncInfo->getRAIndex(); @@ -3628,7 +3638,7 @@ static SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { MVT VT = SVOp->getValueType(0).getSimpleVT(); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); if (VT != MVT::v8i32 && VT != MVT::v8f32) return SDValue(); @@ -4263,7 +4273,7 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, } MaskVec.push_back(Idx); } - return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), + return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1), SVOp->getOperand(0), &MaskVec[0]); } @@ -4396,7 +4406,7 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, - SelectionDAG &DAG, DebugLoc dl) { + SelectionDAG &DAG, SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); // Always build SSE zero vectors as <4 x i32> bitcasted @@ -4435,7 +4445,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. /// Then bitcast to their original type, ensuring they get CSE'd. static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); @@ -4469,7 +4479,7 @@ static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. -static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4480,7 +4490,7 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, } /// getUnpackl - Returns a vector_shuffle node for an unpackl operation. -static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4492,7 +4502,7 @@ static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, } /// getUnpackh - Returns a vector_shuffle node for an unpackh operation. -static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4510,7 +4520,7 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { EVT VT = V.getValueType(); int NumElems = VT.getVectorNumElements(); - DebugLoc dl = V.getDebugLoc(); + SDLoc dl(V); while (NumElems > 4) { if (EltNo < NumElems/2) { @@ -4527,7 +4537,7 @@ static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { /// getLegalSplat - Generate a legal splat with supported x86 shuffles static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { EVT VT = V.getValueType(); - DebugLoc dl = V.getDebugLoc(); + SDLoc dl(V); if (VT.is128BitVector()) { V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); @@ -4554,7 +4564,7 @@ static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { EVT SrcVT = SV->getValueType(0); SDValue V1 = SV->getOperand(0); - DebugLoc dl = SV->getDebugLoc(); + SDLoc dl(SV); int EltNo = SV->getSplatIndex(); int NumElems = SrcVT.getVectorNumElements(); @@ -4599,13 +4609,13 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, SelectionDAG &DAG) { EVT VT = V2.getValueType(); SDValue V1 = IsZero - ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); + ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 16> MaskVec; for (unsigned i = 0; i != NumElems; ++i) // If this is the insertion idx, put the low elt of V2 here. MaskVec.push_back(i == Idx ? NumElems : i); - return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); + return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]); } /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the @@ -4756,19 +4766,27 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, /// getNumOfConsecutiveZeros - Return the number of elements of a vector /// shuffle operation which come from a consecutively from a zero. The /// search can start in two different directions, from left or right. -static -unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, - bool ZerosFromLeft, SelectionDAG &DAG) { - unsigned i; - for (i = 0; i != NumElems; ++i) { - unsigned Index = ZerosFromLeft ? i : NumElems-i-1; +/// We count undefs as zeros until PreferredNum is reached. +static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, + unsigned NumElems, bool ZerosFromLeft, + SelectionDAG &DAG, + unsigned PreferredNum = -1U) { + unsigned NumZeros = 0; + for (unsigned i = 0; i != NumElems; ++i) { + unsigned Index = ZerosFromLeft ? i : NumElems - i - 1; SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); - if (!(Elt.getNode() && - (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) + if (!Elt.getNode()) + break; + + if (X86::isZeroNode(Elt)) + ++NumZeros; + else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum. + NumZeros = std::min(NumZeros + 1, PreferredNum); + else break; } - return i; + return NumZeros; } /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) @@ -4806,8 +4824,9 @@ bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); - unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, - false /* check zeros from right */, DAG); + unsigned NumZeros = getNumOfConsecutiveZeros( + SVOp, NumElems, false /* check zeros from right */, DAG, + SVOp->getMaskElt(0)); unsigned OpSrc; if (!NumZeros) @@ -4839,8 +4858,9 @@ static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); - unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, - true /* check zeros from left */, DAG); + unsigned NumZeros = getNumOfConsecutiveZeros( + SVOp, NumElems, true /* check zeros from left */, DAG, + NumElems - SVOp->getMaskElt(NumElems - 1) - 1); unsigned OpSrc; if (!NumZeros) @@ -4893,7 +4913,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, if (NumNonZero > 8) return SDValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 16; ++i) { @@ -4941,7 +4961,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, if (NumNonZero > 4) return SDValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 8; ++i) { @@ -4967,7 +4987,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, /// static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits, SelectionDAG &DAG, - const TargetLowering &TLI, DebugLoc dl) { + const TargetLowering &TLI, SDLoc dl) { assert(VT.is128BitVector() && "Unknown type for VShift"); EVT ShVT = MVT::v2i64; unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; @@ -4979,7 +4999,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, } SDValue -X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, +X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, SelectionDAG &DAG) const { // Check if the scalar load can be widened into a vector load. And if @@ -5032,7 +5052,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, return SDValue(); int64_t StartOffset = Offset & ~(RequiredAlign-1); if (StartOffset) - Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), + Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(), Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); int EltNo = (Offset - StartOffset) >> 2; @@ -5063,7 +5083,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, /// rather than undef via VZEXT_LOAD, but we do not detect that case today. /// There's even a handy isZeroNode for that purpose. static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, - DebugLoc &DL, SelectionDAG &DAG) { + SDLoc &DL, SelectionDAG &DAG) { EVT EltVT = VT.getVectorElementType(); unsigned NumElems = Elts.size(); @@ -5099,15 +5119,27 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, // load of the entire vector width starting at the base pointer. If we found // consecutive loads for the low half, generate a vzext_load node. if (LastLoadedElt == NumElems - 1) { + SDValue NewLd = SDValue(); if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) - return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), - LDBase->getPointerInfo(), - LDBase->isVolatile(), LDBase->isNonTemporal(), - LDBase->isInvariant(), 0); - return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), - LDBase->getPointerInfo(), - LDBase->isVolatile(), LDBase->isNonTemporal(), - LDBase->isInvariant(), LDBase->getAlignment()); + NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), + LDBase->getPointerInfo(), + LDBase->isVolatile(), LDBase->isNonTemporal(), + LDBase->isInvariant(), 0); + NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), + LDBase->getPointerInfo(), + LDBase->isVolatile(), LDBase->isNonTemporal(), + LDBase->isInvariant(), LDBase->getAlignment()); + + if (LDBase->hasAnyUseOfValue(1)) { + SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, + SDValue(LDBase, 1), + SDValue(NewLd.getNode(), 1)); + DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); + DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), + SDValue(NewLd.getNode(), 1)); + } + + return NewLd; } if (NumElems == 4 && LastLoadedElt == 1 && DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { @@ -5150,7 +5182,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { return SDValue(); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for broadcast."); @@ -5277,7 +5309,7 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) return SDValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned NumElems = Op.getNumOperands(); SDValue VecIn1; @@ -5345,7 +5377,7 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT ExtVT = VT.getVectorElementType(); @@ -5404,7 +5436,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // Special case for single non-zero, non-undef, element. if (NumNonZero == 1) { - unsigned Idx = CountTrailingZeros_32(NonZeros); + unsigned Idx = countTrailingZeros(NonZeros); SDValue Item = Op.getOperand(Idx); // If this is an insertion of an i64 value on x86-32, and if the top bits of @@ -5513,7 +5545,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> // Check if it's possible to issue this instead. // shuffle (vload ptr)), undef, <1, 1, 1, 1> - unsigned Idx = CountTrailingZeros_32(NonZeros); + unsigned Idx = countTrailingZeros(NonZeros); SDValue Item = Op.getOperand(Idx); if (Op.getNode()->isOnlyUserOf(Item.getNode())) return LowerAsSplatVectorLoad(Item, VT, dl, DAG); @@ -5548,7 +5580,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (EVTBits == 64) { if (NumNonZero == 1) { // One half is zero or undef. - unsigned Idx = CountTrailingZeros_32(NonZeros); + unsigned Idx = countTrailingZeros(NonZeros); SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(Idx)); return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); @@ -5678,7 +5710,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction // to create 256-bit vectors from two other 128-bit ones. static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT ResVT = Op.getValueType().getSimpleVT(); assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); @@ -5704,7 +5736,7 @@ LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, const X86Subtarget *Subtarget, SelectionDAG &DAG) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT VT = SVOp->getValueType(0).getSimpleVT(); MVT EltVT = VT.getVectorElementType(); unsigned NumElems = VT.getVectorNumElements(); @@ -5765,7 +5797,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); SmallVector<int, 8> MaskVals; // Determine if more than 1 of the words in each of the low and high quadwords @@ -6020,7 +6052,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, const X86TargetLowering &TLI) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); ArrayRef<int> MaskVals = SVOp->getMask(); // Promote splats to a larger type which usually leads to more efficient code. @@ -6149,7 +6181,7 @@ SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, MVT VT = SVOp->getValueType(0).getSimpleVT(); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; @@ -6195,7 +6227,7 @@ static SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { MVT VT = SVOp->getValueType(0).getSimpleVT(); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); unsigned NumElems = VT.getVectorNumElements(); MVT NewVT; unsigned Scale; @@ -6233,7 +6265,7 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, /// static SDValue getVZextMovL(MVT VT, EVT OpVT, SDValue SrcOp, SelectionDAG &DAG, - const X86Subtarget *Subtarget, DebugLoc dl) { + const X86Subtarget *Subtarget, SDLoc dl) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { LoadSDNode *LD = NULL; if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) @@ -6278,7 +6310,7 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { unsigned NumElems = VT.getVectorNumElements(); unsigned NumLaneElems = NumElems / 2; - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT EltVT = VT.getVectorElementType(); MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); SDValue Output[2]; @@ -6384,7 +6416,7 @@ static SDValue LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT VT = SVOp->getValueType(0).getSimpleVT(); assert(VT.is128BitVector() && "Unsupported vector size"); @@ -6535,7 +6567,7 @@ static bool MayFoldVectorLoad(SDValue V) { } static -SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { +SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) { EVT VT = Op.getValueType(); // Canonizalize to v2f64. @@ -6546,7 +6578,7 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { } static -SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, +SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); @@ -6565,7 +6597,7 @@ SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, } static -SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { +SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); EVT VT = Op.getValueType(); @@ -6581,7 +6613,7 @@ SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { } static -SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { +SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); EVT VT = Op.getValueType(); @@ -6651,7 +6683,7 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { return SDValue(); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); unsigned NumElems = VT.getVectorNumElements(); @@ -6712,10 +6744,10 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast (extract_subvector x)). unsigned Ratio = V.getValueSizeInBits() / V1.getValueSizeInBits(); EVT FullVT = V.getValueType(); - EVT SubVecVT = EVT::getVectorVT(*Context, + EVT SubVecVT = EVT::getVectorVT(*Context, FullVT.getVectorElementType(), FullVT.getVectorNumElements()/Ratio); - V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V, + V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V, DAG.getIntPtrConstant(0)); } V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); @@ -6730,7 +6762,7 @@ SDValue X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); @@ -6789,7 +6821,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned NumElems = VT.getVectorNumElements(); bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; @@ -6871,6 +6903,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { TargetMask, DAG); } + if (isPALIGNRMask(M, VT, Subtarget)) + return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2, + getShufflePALIGNRImmediate(SVOp), + DAG); + // Check if this can be converted into a logical shift. bool isLeft = false; unsigned ShAmt = 0; @@ -6988,11 +7025,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { // inlined here right now to enable us to directly emit target specific // nodes, and remove one by one until they don't return Op anymore. - if (isPALIGNRMask(M, VT, Subtarget)) - return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2, - getShufflePALIGNRImmediate(SVOp), - DAG); - if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && SVOp->getSplatIndex() == 0 && V2IsUndef) { if (VT == MVT::v2f64 || VT == MVT::v2i64) @@ -7100,7 +7132,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (!Op.getOperand(0).getValueType().getSimpleVT().is128BitVector()) return SDValue(); @@ -7172,7 +7204,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // If this is a 256-bit vector result, first extract the 128-bit vector and // then extract the element from the 128-bit vector. if (VecVT.is256BitVector()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); unsigned NumElems = VecVT.getVectorNumElements(); SDValue Idx = Op.getOperand(1); unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); @@ -7195,7 +7227,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, } MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // TODO: handle v16i8. if (VT.getSizeInBits() == 16) { SDValue Vec = Op.getOperand(0); @@ -7254,7 +7286,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT.getVectorElementType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); @@ -7309,7 +7341,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT.getVectorElementType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); SDValue N2 = Op.getOperand(2); @@ -7354,7 +7386,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT OpVT = Op.getValueType().getSimpleVT(); // If this is a 256-bit vector result, first insert into a 128-bit @@ -7387,7 +7419,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { if (Subtarget->hasFp256()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); SDValue Vec = Op.getNode()->getOperand(0); SDValue Idx = Op.getNode()->getOperand(1); @@ -7407,7 +7439,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { if (Subtarget->hasFp256()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); SDValue Vec = Op.getNode()->getOperand(0); SDValue SubVec = Op.getNode()->getOperand(1); SDValue Idx = Op.getNode()->getOperand(2); @@ -7449,13 +7481,13 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), CP->getAlignment(), CP->getOffset(), OpFlag); - DebugLoc DL = CP->getDebugLoc(); + SDLoc DL(CP); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (OpFlag) { Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); } @@ -7481,14 +7513,14 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), OpFlag); - DebugLoc DL = JT->getDebugLoc(); + SDLoc DL(JT); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (OpFlag) Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); return Result; @@ -7519,7 +7551,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. @@ -7527,7 +7559,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { !Subtarget->is64Bit()) { Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); } @@ -7548,7 +7580,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { CodeModel::Model M = getTargetMachine().getCodeModel(); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, OpFlags); @@ -7569,7 +7601,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { } SDValue -X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, +X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, int64_t Offset, SelectionDAG &DAG) const { // Create the TargetGlobalAddress node, folding in the constant // offset if it is legal. @@ -7618,7 +7650,7 @@ SDValue X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); - return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); + return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); } static SDValue @@ -7627,7 +7659,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, unsigned char OperandFlags, bool LocalDynamic = false) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0), GA->getOffset(), @@ -7656,10 +7688,10 @@ static SDValue LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT) { SDValue InFlag; - DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better + SDLoc dl(GA); // ? function entry point might be better SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), PtrVT), InFlag); + SDLoc(), PtrVT), InFlag); InFlag = Chain.getValue(1); return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); @@ -7677,7 +7709,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, bool is64Bit) { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); // Get the start address of the TLS block for this module. X86MachineFunctionInfo* MFI = DAG.getMachineFunction() @@ -7691,7 +7723,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, } else { SDValue InFlag; SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag); InFlag = Chain.getValue(1); Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSLDM, /*LocalDynamic=*/true); @@ -7716,7 +7748,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, TLSModel::Model model, bool is64Bit, bool isPIC) { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), @@ -7755,7 +7787,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, if (model == TLSModel::InitialExec) { if (isPIC && !is64Bit) { Offset = DAG.getNode(ISD::ADD, dl, PtrVT, - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Offset); } @@ -7809,7 +7841,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { OpFlag = X86II::MO_TLVP_PIC_BASE; else OpFlag = X86II::MO_TLVP; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, GA->getValueType(0), GA->getOffset(), OpFlag); @@ -7819,7 +7851,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { if (PIC32) Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Offset); // Lowering the machine isd will make sure everything is in the right @@ -7856,7 +7888,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { // thread-localness. if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) GV = GA->resolveAliasedGlobal(false); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); SDValue Chain = DAG.getEntryNode(); // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or @@ -7914,7 +7946,7 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ assert(Op.getNumOperands() == 3 && "Not a double-shift!"); EVT VT = Op.getValueType(); unsigned VTBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; SDValue ShOpLo = Op.getOperand(0); SDValue ShOpHi = Op.getOperand(1); @@ -7973,7 +8005,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, return Op; } - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Size = SrcVT.getSizeInBits()/8; MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); @@ -7989,7 +8021,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SelectionDAG &DAG) const { // Build the FILD - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDVTList Tys; bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); if (useSSE) @@ -8064,7 +8096,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, #endif */ - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); LLVMContext *Context = DAG.getContext(); // Build some magic constants. @@ -8118,7 +8150,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // FP constant to bias correct the final result. SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), MVT::f64); @@ -8166,7 +8198,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const { SDValue N0 = Op.getOperand(0); EVT SVT = N0.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || SVT == MVT::v8i8 || SVT == MVT::v8i16) && @@ -8181,7 +8213,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getValueType().isVector()) return lowerUINT_TO_FP_vec(Op, DAG); @@ -8240,7 +8272,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, APInt FF(32, 0x5F800000ULL); // Check whether the sign bit is set. - SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), + SDValue SignSet = DAG.getSetCC(dl, + getSetCCResultType(*DAG.getContext(), MVT::i64), Op.getOperand(0), DAG.getConstant(0, MVT::i64), ISD::SETLT); @@ -8269,7 +8302,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, std::pair<SDValue,SDValue> X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT DstTy = Op.getValueType(); @@ -8366,7 +8399,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, MVT VT = Op->getValueType(0).getSimpleVT(); SDValue In = Op->getOperand(0); MVT InVT = In.getValueType().getSimpleVT(); - DebugLoc dl = Op->getDebugLoc(); + SDLoc dl(Op); // Optimize vectors in AVX mode: // @@ -8415,7 +8448,7 @@ SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op, } SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8447,7 +8480,7 @@ SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, } SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8568,8 +8601,8 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, MVT VT = Op.getValueType().getSimpleVT(); if (VT.isVector()) { if (VT == MVT::v8i16) - return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), VT, - DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), + return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, + DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op), MVT::v8i32, Op.getOperand(0))); return SDValue(); } @@ -8582,7 +8615,7 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, if (StackSlot.getNode()) // Load the result. - return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), + return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); @@ -8599,7 +8632,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, if (StackSlot.getNode()) // Load the result. - return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), + return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); @@ -8608,7 +8641,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, } static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8622,7 +8655,7 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT; unsigned NumElts = VT == MVT::f64 ? 2 : 4; @@ -8656,7 +8689,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT; unsigned NumElts = VT == MVT::f64 ? 2 : 4; @@ -8693,7 +8726,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT SrcVT = Op1.getValueType().getSimpleVT(); @@ -8770,7 +8803,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). @@ -8792,7 +8825,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, return SDValue(); SDNode *N = Op.getNode(); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SmallVector<SDValue, 8> Opnds; DenseMap<SDValue, unsigned> VecInMap; @@ -8876,7 +8909,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, /// equivalent. SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // CF and OF aren't always set the way we want. Determine which // of these we need. @@ -9091,7 +9124,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, if (C->getAPIntValue() == 0) return EmitTest(Op0, X86CC, DAG); - DebugLoc dl = Op0.getDebugLoc(); + SDLoc dl(Op0); if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { // Use SUB instead of CMP to enable CSE between SUB and CMP. @@ -9118,7 +9151,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence // build an SDNode sequence that transfers the result from FPSW into EFLAGS: // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) - DebugLoc dl = Cmp.getDebugLoc(); + SDLoc dl(Cmp); SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, @@ -9135,7 +9168,7 @@ static bool isAllOnes(SDValue V) { /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node /// if it's possible. SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { SDValue Op0 = And.getOperand(0); SDValue Op1 = And.getOperand(1); if (Op0.getOpcode() == ISD::TRUNCATE) @@ -9180,14 +9213,6 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, } if (LHS.getNode()) { - // If the LHS is of the form (x ^ -1) then replace the LHS with x and flip - // the condition code later. - bool Invert = false; - if (LHS.getOpcode() == ISD::XOR && isAllOnes(LHS.getOperand(1))) { - Invert = true; - LHS = LHS.getOperand(0); - } - // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT // instruction. Since the shift amount is in-range-or-undefined, we know // that doing a bittest on the i32 value is ok. We extend to i32 because @@ -9204,9 +9229,6 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; - // Flip the condition if the LHS was a not instruction - if (Invert) - Cond = X86::GetOppositeBranchCondition(Cond); return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, DAG.getConstant(Cond, MVT::i8), BT); } @@ -9223,7 +9245,7 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { "Unsupported value type for operation"); unsigned NumElems = VT.getVectorNumElements(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue CC = Op.getOperand(2); // Extract the LHS vectors @@ -9253,7 +9275,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, MVT VT = Op.getValueType().getSimpleVT(); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); bool isFP = Op.getOperand(1).getValueType().getSimpleVT().isFloatingPoint(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (isFP) { #ifndef NDEBUG @@ -9347,29 +9369,31 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, if (Swap) std::swap(Op0, Op1); - // Since SSE has no unsigned integer comparisons, we need to flip the sign - // bits of the inputs before performing those operations. - if (FlipSigns) { - EVT EltVT = VT.getVectorElementType(); - SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), - EltVT); - std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); - SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], - SignBits.size()); - Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); - Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); - } - // Check that the operation in question is available (most are plain SSE2, // but PCMPGTQ and PCMPEQQ have different requirements). if (VT == MVT::v2i64) { if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) { assert(Subtarget->hasSSE2() && "Don't know how to lower!"); - // First cast everything to the right type, + // First cast everything to the right type. Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); + // Since SSE has no unsigned integer comparisons, we need to flip the sign + // bits of the inputs before performing those operations. The lower + // compare is always unsigned. + SDValue SB; + if (FlipSigns) { + SB = DAG.getConstant(0x80000000U, MVT::v4i32); + } else { + SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32); + SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32); + SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + Sign, Zero, Sign, Zero); + } + Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB); + Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB); + // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); @@ -9395,7 +9419,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, // pcmpeqd + pshufd + pand. assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!"); - // First cast everything to the right type, + // First cast everything to the right type. Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); @@ -9414,6 +9438,15 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, } } + // Since SSE has no unsigned integer comparisons, we need to flip the sign + // bits of the inputs before performing those operations. + if (FlipSigns) { + EVT EltVT = VT.getVectorElementType(); + SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT); + Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); + Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); + } + SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); // If the logical-not of the result is required, perform that now. @@ -9432,7 +9465,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); // Optimize to BT if possible. @@ -9526,7 +9559,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Cond = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); SDValue Op2 = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue CC; if (Cond.getOpcode() == ISD::SETCC) { @@ -9716,7 +9749,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND(SDValue Op, MVT VT = Op->getValueType(0).getSimpleVT(); SDValue In = Op->getOperand(0); MVT InVT = In.getValueType().getSimpleVT(); - DebugLoc dl = Op->getDebugLoc(); + SDLoc dl(Op); if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && (VT != MVT::v8i32 || InVT != MVT::v8i16)) @@ -9789,7 +9822,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Cond = Op.getOperand(1); SDValue Dest = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue CC; bool Inverted = false; @@ -10059,7 +10092,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, "This should be used only on Windows targets or when segmented stacks " "are being used"); assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Get the inputs. SDValue Chain = Op.getOperand(0); @@ -10104,6 +10137,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); Flag = Chain.getValue(1); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), SPTy).getValue(1); @@ -10117,7 +10152,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { // vastart just stores the address of the VarArgsFrameIndex slot into the @@ -10184,7 +10219,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { SDValue SrcPtr = Op.getOperand(1); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); unsigned Align = Op.getConstantOperandVal(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT ArgVT = Op.getNode()->getValueType(0); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); @@ -10250,7 +10285,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, SDValue SrcPtr = Op.getOperand(2); const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, @@ -10260,7 +10295,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, // getTargetVShiftNode - Handle vector element shifts where the shift amount // may or may not be a constant. Takes immediate version of shift as input. -static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT, SDValue SrcOp, SDValue ShAmt, SelectionDAG &DAG) { assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); @@ -10304,7 +10339,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, } static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -10949,7 +10984,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { } static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -11004,13 +11039,14 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, MFI->setReturnAddressIsTaken(true); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PtrVT = getPointerTy(); if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); - SDValue Offset = - DAG.getConstant(RegInfo->getSlotSize(), PtrVT); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); + SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT); return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), @@ -11028,8 +11064,10 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful + SDLoc dl(Op); // FIXME probably not meaningful unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); assert(((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && @@ -11044,6 +11082,8 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); } @@ -11051,9 +11091,11 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); EVT PtrVT = getPointerTy(); + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && @@ -11074,7 +11116,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1)); @@ -11082,7 +11124,7 @@ SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1)); } @@ -11097,7 +11139,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, SDValue Trmp = Op.getOperand(1); // trampoline SDValue FPtr = Op.getOperand(2); // nested function SDValue Nest = Op.getOperand(3); // 'nest' parameter value - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); @@ -11267,7 +11309,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, const TargetFrameLowering &TFI = *TM.getFrameLowering(); unsigned StackAlignment = TFI.getStackAlignment(); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Save FP Control Word to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); @@ -11314,7 +11356,7 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -11348,7 +11390,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -11372,7 +11414,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); // Issue a bsf (scan bits forward) which also sets EFLAGS. @@ -11398,7 +11440,7 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { "Unsupported value type for operation"); unsigned NumElems = VT.getVectorNumElements(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); @@ -11434,7 +11476,7 @@ static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); // Decompose 256-bit ops into smaller 128-bit ops. @@ -11510,7 +11552,7 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { EVT EltTy = VT.getVectorElementType(); unsigned NumElts = VT.getVectorNumElements(); SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Lower sdiv X, pow2-const. BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1)); @@ -11551,7 +11593,7 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); @@ -11717,7 +11759,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, const X86Subtarget* Subtarget) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); @@ -11853,7 +11895,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); SDValue V; @@ -11989,7 +12031,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue RHS = N->getOperand(1); unsigned BaseOp = 0; unsigned Cond = 0; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); switch (Op.getOpcode()) { default: llvm_unreachable("Unknown ovf instruction!"); case ISD::SADDO: @@ -12056,7 +12098,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); EVT VT = Op.getValueType(); @@ -12123,7 +12165,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); SynchronizationScope FenceScope = static_cast<SynchronizationScope>( @@ -12160,7 +12202,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { EVT T = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned Reg = 0; unsigned size = 0; switch(T.getSimpleVT().SimpleTy) { @@ -12194,7 +12236,7 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, assert(Subtarget->is64Bit() && "Result not type legalized?"); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue TheChain = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, @@ -12230,7 +12272,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT T = Node->getValueType(0); SDValue negOp = DAG.getNode(ISD::SUB, dl, T, DAG.getConstant(0, T), Node->getOperand(2)); @@ -12246,7 +12288,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); // Convert seq_cst store -> xchg @@ -12289,9 +12331,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { } if (!ExtraOp) - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), Op.getOperand(2)); } @@ -12301,7 +12343,7 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { // For MacOSX, we want to call an alternative entry point: __sincos_stret, // which returns the values as { float, float } (in XMM0) or // { double, double } (which is returned in XMM0, XMM1). - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Arg = Op.getOperand(0); EVT ArgVT = Arg.getValueType(); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); @@ -12434,7 +12476,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { static void ReplaceATOMIC_LOAD(SDNode *Node, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); // Convert wide load -> cmpxchg8b/cmpxchg16b @@ -12455,7 +12497,7 @@ static void ReplaceATOMIC_LOAD(SDNode *Node, static void ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, SelectionDAG &DAG, unsigned NewOp) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); assert (Node->getValueType(0) == MVT::i64 && "Only know how to expand i64 atomics"); @@ -12480,7 +12522,7 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, void X86TargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, SelectionDAG &DAG) const { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); switch (N->getOpcode()) { default: @@ -14589,6 +14631,9 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, // Setup MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) .addMBB(restoreMBB); + + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); MIB.addRegMask(RegInfo->getNoPreservedMask()); thisMBB->addSuccessor(mainMBB); thisMBB->addSuccessor(restoreMBB); @@ -14634,6 +14679,8 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; unsigned Tmp = MRI.createVirtualRegister(RC); // Since FP is only updated here but NOT referenced, it's treated as GPR. + const X86RegisterInfo *RegInfo = + static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; unsigned SP = RegInfo->getStackRegister(); @@ -15034,7 +15081,7 @@ static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget* Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); @@ -15130,7 +15177,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); // Don't create instructions with illegal types after legalize types has run. @@ -15249,7 +15296,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, // All checks match so transform back to vector_shuffle so that DAG combiner // can finish the job - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // Create shuffle node taking into account the case that its a unary shuffle SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); @@ -15276,7 +15323,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) - return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(), + return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector), N->getValueType(0), InputVector.getNode()->getOperand(0)); @@ -15321,7 +15368,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); // Ok, we've now decided to do the transformation. - DebugLoc dl = InputVector.getDebugLoc(); + SDLoc dl(InputVector); // Store the value to a temporary stack slot. SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); @@ -15432,7 +15479,7 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Cond = N->getOperand(0); // Get the LHS/RHS of the select. SDValue LHS = N->getOperand(1); @@ -15700,7 +15747,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, case ISD::SETLT: case ISD::SETGT: { ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; - Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), + Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0), Cond.getOperand(1), NewCC); return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); } @@ -15976,7 +16023,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // If the flag operand isn't dead, don't touch this CMOV. if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) @@ -16179,7 +16226,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, } if (MulAmt2 && (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); if (isPowerOf2_64(MulAmt2) && !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) @@ -16229,7 +16276,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { APInt ShAmt = N1C->getAPIntValue(); Mask = Mask.shl(ShAmt); if (Mask != 0) - return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, + return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, DAG.getConstant(Mask, VT)); } } @@ -16245,15 +16292,14 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { // hardware support for this operation. This is better expressed as an ADD // of two values. if (N1C && (1 == N1C->getZExtValue())) { - return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); + return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); } } return SDValue(); } -/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts -/// when possible. +/// PerformShiftCombine - Combine shifts. static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { @@ -16280,7 +16326,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, SDValue N1 = N->getOperand(1); SDValue CMP0 = N0->getOperand(1); SDValue CMP1 = N1->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // The SETCCs should both refer to the same CMP. if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) @@ -16399,7 +16445,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, SDValue N0 = Narrow->getOperand(0); SDValue N1 = Narrow->getOperand(1); - DebugLoc DL = Narrow->getDebugLoc(); + SDLoc DL(Narrow); // The Left side has to be a trunc. if (N0.getOpcode() != ISD::TRUNCATE) @@ -16471,7 +16517,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check LHS for neg if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && @@ -16505,7 +16551,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check LHS for vnot if (N0.getOpcode() == ISD::XOR && @@ -16589,7 +16635,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, if ((SraAmt + 1) != EltBits) return SDValue(); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Now we know we at least have a plendvb with the mask val. See if // we can form a psignb/w/d. @@ -16638,7 +16684,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, if (ShAmt1.getOpcode() == ISD::TRUNCATE) ShAmt1 = ShAmt1.getOperand(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); unsigned Opc = X86ISD::SHLD; SDValue Op0 = N0.getOperand(0); SDValue Op1 = N1.getOperand(0); @@ -16685,7 +16731,7 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) // and change it to SUB and CMOV. @@ -16735,7 +16781,7 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, // Create BLSMSK instructions by finding X ^ (X-1) SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && isAllOnes(N0.getOperand(1))) @@ -16755,7 +16801,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, LoadSDNode *Ld = cast<LoadSDNode>(N); EVT RegVT = Ld->getValueType(0); EVT MemVT = Ld->getMemoryVT(); - DebugLoc dl = Ld->getDebugLoc(); + SDLoc dl(Ld); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned RegSz = RegVT.getSizeInBits(); @@ -16950,7 +16996,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, StoreSDNode *St = cast<StoreSDNode>(N); EVT VT = St->getValue().getValueType(); EVT StVT = St->getMemoryVT(); - DebugLoc dl = St->getDebugLoc(); + SDLoc dl(St); SDValue StoredVal = St->getOperand(1); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -17113,8 +17159,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) return SDValue(); - DebugLoc LdDL = Ld->getDebugLoc(); - DebugLoc StDL = N->getDebugLoc(); + SDLoc LdDL(Ld); + SDLoc StDL(N); // If we are a 64-bit capable x86, lower to a single movq load/store pair. // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store // pair instead. @@ -17313,7 +17359,7 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && isHorizontalBinOp(LHS, RHS, true)) - return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); + return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS); return SDValue(); } @@ -17328,7 +17374,7 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && isHorizontalBinOp(LHS, RHS, false)) - return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); + return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS); return SDValue(); } @@ -17365,7 +17411,7 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; } - return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0), N->getOperand(0), N->getOperand(1)); } @@ -17409,12 +17455,12 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { if (Op.getOpcode() == X86ISD::VZEXT_LOAD && VT.getVectorElementType().getSizeInBits() == OpVT.getVectorElementType().getSizeInBits()) { - return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); + return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); } return SDValue(); } -static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = N->getValueType(0); if (!VT.isVector()) @@ -17423,7 +17469,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the // both SSE and AVX2 since there is no sign-extended shift right @@ -17434,14 +17480,14 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, N0.getOpcode() == ISD::SIGN_EXTEND)) { SDValue N00 = N0.getOperand(0); - // EXTLOAD has a better solution on AVX2, + // EXTLOAD has a better solution on AVX2, // it may be replaced with X86ISD::VSEXT node. if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256()) if (!ISD::isNormalLoad(N00.getNode())) return SDValue(); if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) { - SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, + SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1); return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp); } @@ -17470,7 +17516,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget* Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); // Let legalize expand this if it isn't a legal type yet. @@ -17515,7 +17561,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, // (and (i32 x86isd::setcc_carry), 1) // This eliminates the zext. This transformation is necessary because // ISD::SETCC is always legalized to i8. - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); @@ -17553,17 +17599,17 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), LHS.getValueType(), RHS, LHS.getOperand(1)); - return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, DAG.getConstant(0, addV.getValueType()), CC); } if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), RHS.getValueType(), LHS, RHS.getOperand(1)); - return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, DAG.getConstant(0, addV.getValueType()), CC); } return SDValue(); @@ -17572,7 +17618,7 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { // Helper function of PerformSETCCCombine. It is to materialize "setb reg" // as "sbb reg,reg", since it can be extended without zext and produces // an all-ones bit which is more useful than 0/1 in some cases. -static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { +static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { return DAG.getNode(ISD::AND, DL, MVT::i8, DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), @@ -17583,7 +17629,7 @@ static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); SDValue EFLAGS = N->getOperand(1); @@ -17597,7 +17643,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && EFLAGS.getValueType().isInteger() && !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { - SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(), + SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), EFLAGS.getOperand(1), EFLAGS.getOperand(0)); SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); @@ -17627,7 +17673,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Dest = N->getOperand(1); SDValue EFLAGS = N->getOperand(3); @@ -17652,7 +17698,7 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); @@ -17687,7 +17733,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, // We don't have a good way to replace an EFLAGS use, so only do this when // dead right now. SDValue(N, 1).use_empty()) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, @@ -17706,7 +17752,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, // (sub (sete X, 0), Y) -> sbb 0, Y // (sub (setne X, 0), Y) -> adc -1, Y static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Look through ZExts. SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); @@ -17752,7 +17798,7 @@ static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && isHorizontalBinOp(Op0, Op1, true)) - return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); + return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); return OptimizeConditionalInDecrement(N, DAG); } @@ -17772,10 +17818,10 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, isa<ConstantSDNode>(Op1.getOperand(1))) { APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); EVT VT = Op0.getValueType(); - SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, + SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0), DAG.getConstant(~XorC, VT)); - return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, + return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, DAG.getConstant(C->getAPIntValue()+1, VT)); } } @@ -17785,7 +17831,7 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && isHorizontalBinOp(Op0, Op1, true)) - return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); + return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); return OptimizeConditionalInDecrement(N, DAG); } @@ -17802,7 +17848,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, if (In.getOpcode() != X86ISD::VZEXT) return SDValue(); - return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(X86ISD::VZEXT, SDLoc(N), N->getValueType(0), In.getOperand(0)); } @@ -18362,7 +18408,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, getTargetMachine()))) return; - Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), + Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), GA->getValueType(0), Offset); break; } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 2727e22..c0e1015 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -511,7 +511,7 @@ namespace llvm { /// It returns EVT::Other if the type should be determined using generic /// target-independent logic. virtual EVT - getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, + getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const; @@ -563,7 +563,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the value type to use for ISD::SETCC. - virtual EVT getSetCCResultType(EVT VT) const; + virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the @@ -734,7 +734,6 @@ namespace llvm { /// Subtarget - Keep a pointer to the X86Subtarget around so that we can /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; - const X86RegisterInfo *RegInfo; const DataLayout *TD; /// Used to store the TargetOptions so that we don't waste time resetting @@ -760,16 +759,16 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl<ISD::InputArg> &ArgInfo, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, unsigned i) const; SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const; @@ -791,7 +790,7 @@ namespace llvm { bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff, DebugLoc dl) const; + int FPDiff, SDLoc dl) const; unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG) const; @@ -800,7 +799,7 @@ namespace llvm { bool isSigned, bool isReplace) const; - SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, + SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; @@ -808,7 +807,7 @@ namespace llvm { SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, + SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, int64_t Offset, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; @@ -830,7 +829,7 @@ namespace llvm { SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerToBT(SDValue And, ISD::CondCode CC, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; @@ -865,7 +864,7 @@ namespace llvm { LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerCall(CallLoweringInfo &CLI, @@ -876,7 +875,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index 225e972..fa2b2d8 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -294,7 +294,7 @@ def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8 // unsigned division/remainder let hasSideEffects = 1 in { // so that we don't speculatively execute let SchedRW = [WriteIDiv] in { -let Defs = [AL,EFLAGS,AX], Uses = [AX] in +let Defs = [AL,AH,EFLAGS], Uses = [AX] in def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "div{b}\t$src", [], IIC_DIV8_REG>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in @@ -310,7 +310,7 @@ def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), } // SchedRW let mayLoad = 1 in { -let Defs = [AL,EFLAGS,AX], Uses = [AX] in +let Defs = [AL,AH,EFLAGS], Uses = [AX] in def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "div{b}\t$src", [], IIC_DIV8_MEM>, SchedLoadReg<WriteIDivLd>; @@ -331,7 +331,7 @@ def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // Signed division/remainder. let SchedRW = [WriteIDiv] in { -let Defs = [AL,EFLAGS,AX], Uses = [AX] in +let Defs = [AL,AH,EFLAGS], Uses = [AX] in def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "idiv{b}\t$src", [], IIC_IDIV8>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in @@ -347,7 +347,7 @@ def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), } // SchedRW let mayLoad = 1 in { -let Defs = [AL,EFLAGS,AX], Uses = [AX] in +let Defs = [AL,AH,EFLAGS], Uses = [AX] in def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "idiv{b}\t$src", [], IIC_IDIV8>, SchedLoadReg<WriteIDivLd>; @@ -960,7 +960,7 @@ class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo, [(set EFLAGS, (opnode (load addr:$dst), typeinfo.Imm8Operator:$src))]>; -// BinOpAI - Instructions like "add %eax, %eax, imm". +// BinOpAI - Instructions like "add %eax, %eax, imm", that imp-def EFLAGS. class BinOpAI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, Register areg, string operands> : ITy<opcode, RawFrm, typeinfo, @@ -968,10 +968,18 @@ class BinOpAI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, mnemonic, operands, []>, Sched<[WriteALU]> { let ImmT = typeinfo.ImmEncoding; let Uses = [areg]; - let Defs = [areg]; + let Defs = [areg, EFLAGS]; let hasSideEffects = 0; } +// BinOpAI_FF - Instructions like "adc %eax, %eax, imm", that implicitly define +// and use EFLAGS. +class BinOpAI_FF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, + Register areg, string operands> + : BinOpAI<opcode, mnemonic, typeinfo, areg, operands> { + let Uses = [areg, EFLAGS]; +} + /// ArithBinOp_RF - This is an arithmetic binary operator where the pattern is /// defined with "(set GPR:$dst, EFLAGS, (...". /// @@ -1030,16 +1038,16 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, def NAME#16mi : BinOpMI_RMW<mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_RMW<mnemonic, Xi32, opnode, MemMRM>; def NAME#64mi32 : BinOpMI_RMW<mnemonic, Xi64, opnode, MemMRM>; + } // Defs = [EFLAGS] - def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; - def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; - def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; - def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; - } + def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, + "{$src, %al|AL, $src}">; + def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, + "{$src, %ax|AX, $src}">; + def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, + "{$src, %eax|EAX, $src}">; + def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, + "{$src, %rax|RAX, $src}">; } /// ArithBinOp_RFF - This is an arithmetic binary operator where the pattern is @@ -1052,7 +1060,7 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, string mnemonic, Format RegMRM, Format MemMRM, SDNode opnode, bit CommutableRR, bit ConvertibleToThreeAddress> { - let Defs = [EFLAGS] in { + let Uses = [EFLAGS], Defs = [EFLAGS] in { let Constraints = "$src1 = $dst" in { let isCommutable = CommutableRR, isConvertibleToThreeAddress = ConvertibleToThreeAddress in { @@ -1101,16 +1109,16 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, def NAME#16mi : BinOpMI_RMW_FF<mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_RMW_FF<mnemonic, Xi32, opnode, MemMRM>; def NAME#64mi32 : BinOpMI_RMW_FF<mnemonic, Xi64, opnode, MemMRM>; - - def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; - def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; - def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; - def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; - } + } // Uses = [EFLAGS], Defs = [EFLAGS] + + def NAME#8i8 : BinOpAI_FF<BaseOpc4, mnemonic, Xi8 , AL, + "{$src, %al|AL, $src}">; + def NAME#16i16 : BinOpAI_FF<BaseOpc4, mnemonic, Xi16, AX, + "{$src, %ax|AX, $src}">; + def NAME#32i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi32, EAX, + "{$src, %eax|EAX, $src}">; + def NAME#64i32 : BinOpAI_FF<BaseOpc4, mnemonic, Xi64, RAX, + "{$src, %rax|RAX, $src}">; } /// ArithBinOp_F - This is an arithmetic binary operator where the pattern is @@ -1168,16 +1176,16 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, def NAME#16mi : BinOpMI_F<mnemonic, Xi16, opnode, MemMRM>; def NAME#32mi : BinOpMI_F<mnemonic, Xi32, opnode, MemMRM>; def NAME#64mi32 : BinOpMI_F<mnemonic, Xi64, opnode, MemMRM>; + } // Defs = [EFLAGS] - def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, - "{$src, %al|AL, $src}">; - def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, - "{$src, %ax|AX, $src}">; - def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, - "{$src, %eax|EAX, $src}">; - def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, - "{$src, %rax|RAX, $src}">; - } + def NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL, + "{$src, %al|AL, $src}">; + def NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX, + "{$src, %ax|AX, $src}">; + def NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX, + "{$src, %eax|EAX, $src}">; + def NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX, + "{$src, %rax|RAX, $src}">; } @@ -1195,12 +1203,10 @@ defm SUB : ArithBinOp_RF<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m, } // Arithmetic. -let Uses = [EFLAGS] in { - defm ADC : ArithBinOp_RFF<0x10, 0x12, 0x14, "adc", MRM2r, MRM2m, X86adc_flag, - 1, 0>; - defm SBB : ArithBinOp_RFF<0x18, 0x1A, 0x1C, "sbb", MRM3r, MRM3m, X86sbb_flag, - 0, 0>; -} +defm ADC : ArithBinOp_RFF<0x10, 0x12, 0x14, "adc", MRM2r, MRM2m, X86adc_flag, + 1, 0>; +defm SBB : ArithBinOp_RFF<0x18, 0x1A, 0x1C, "sbb", MRM3r, MRM3m, X86sbb_flag, + 0, 0>; let isCompare = 1 in { defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>; @@ -1215,28 +1221,36 @@ defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>; def X86testpat : PatFrag<(ops node:$lhs, node:$rhs), (X86cmp (and_su node:$lhs, node:$rhs), 0)>; -let isCompare = 1, Defs = [EFLAGS] in { - let isCommutable = 1 in { - def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>; - def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>; - def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>; - def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>; - } // isCommutable - - def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>; - def TEST16rm : BinOpRM_F<0x84, "test", Xi16, X86testpat>; - def TEST32rm : BinOpRM_F<0x84, "test", Xi32, X86testpat>; - def TEST64rm : BinOpRM_F<0x84, "test", Xi64, X86testpat>; - - def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>; - def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>; - def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>; - def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>; - - def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>; - def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>; - def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>; - def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>; +let isCompare = 1 in { + let Defs = [EFLAGS] in { + let isCommutable = 1 in { + def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>; + def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>; + def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>; + def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>; + } // isCommutable + + def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>; + def TEST16rm : BinOpRM_F<0x84, "test", Xi16, X86testpat>; + def TEST32rm : BinOpRM_F<0x84, "test", Xi32, X86testpat>; + def TEST64rm : BinOpRM_F<0x84, "test", Xi64, X86testpat>; + + def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>; + def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>; + def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>; + def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>; + + def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>; + def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>; + def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>; + def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>; + + // When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the + // register class is constrained to GR8_NOREX. + let isPseudo = 1 in + def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask), + "", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>; + } // Defs = [EFLAGS] def TEST8i8 : BinOpAI<0xA8, "test", Xi8 , AL, "{$src, %al|AL, $src}">; @@ -1246,13 +1260,7 @@ let isCompare = 1, Defs = [EFLAGS] in { "{$src, %eax|EAX, $src}">; def TEST64i32 : BinOpAI<0xA8, "test", Xi64, RAX, "{$src, %rax|RAX, $src}">; - - // When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the - // register class is constrained to GR8_NOREX. - let isPseudo = 1 in - def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask), - "", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>; -} +} // isCompare //===----------------------------------------------------------------------===// // ANDN Instruction diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index d9ff0c6..8a7ee7d 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -216,48 +216,38 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), // Alias Instructions //===----------------------------------------------------------------------===// -// Alias instructions that map movr0 to xor. +// Alias instruction mapping movr0 to xor. // FIXME: remove when we can teach regalloc that xor reg, reg is ok. // FIXME: Set encoding to pseudo. let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, - isCodeGenOnly = 1 in { -def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "", - [(set GR8:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; - -// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller -// encoding and avoids a partial-register update sometimes, but doing so -// at isel time interferes with rematerialization in the current register -// allocator. For now, this is rewritten when the instruction is lowered -// to an MCInst. -def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), - "", - [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize, - Sched<[WriteZero]>; - -// FIXME: Set encoding to pseudo. + isCodeGenOnly = 1 in def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "", [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; -} -// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a -// smaller encoding, but doing so at isel time interferes with rematerialization -// in the current register allocator. For now, this is rewritten when the -// instruction is lowered to an MCInst. -// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove -// when we have a better way to specify isel priority. -let Defs = [EFLAGS], isCodeGenOnly=1, - AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in -def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "", - [(set GR64:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; +// Other widths can also make use of the 32-bit xor, which may have a smaller +// encoding and avoid partial register updates. +def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>; +def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>; +def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> { + let AddedComplexity = 20; +} // Materialize i64 constant where top 32-bits are zero. This could theoretically // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however // that would make it more difficult to rematerialize. let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, - isCodeGenOnly = 1 in -def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), - "", [(set GR64:$dst, i64immZExt32:$src)], - IIC_ALU_NONMEM>, Sched<[WriteALU]>; + isCodeGenOnly = 1, neverHasSideEffects = 1 in +def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src), + "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>; + +// This 64-bit pseudo-move can be used for both a 64-bit constant that is +// actually the zero-extension of a 32-bit constant, and for labels in the +// x86-64 small code model. +def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>; + +let AddedComplexity = 1 in +def : Pat<(i64 mov64imm32:$src), + (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>; // Use sbb to materialize carry bit. let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in { @@ -942,20 +932,6 @@ def : Pat<(i64 (X86Wrapper texternalsym:$dst)), def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>; -// In static codegen with small code model, we can get the address of a label -// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of -// the MOV64ri64i32 should accept these. -def : Pat<(i64 (X86Wrapper tconstpool :$dst)), - (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>; -def : Pat<(i64 (X86Wrapper tjumptable :$dst)), - (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>; -def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), - (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>; -def : Pat<(i64 (X86Wrapper texternalsym:$dst)), - (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>; -def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), - (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>; - // In kernel code model, we can get the address of a label // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of // the MOV64ri32 should accept these. @@ -1119,7 +1095,8 @@ defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>; def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; -def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; +def : Pat<(zextloadi64i1 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; // extload bool -> extload byte // When extloading from 16-bit and smaller memory locations into 64-bit @@ -1133,14 +1110,16 @@ def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>; def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; -def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; -def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; -def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; // For other extloads, use subregs, since the high contents of the register are // defined after an extload. +def : Pat<(extloadi64i1 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; +def : Pat<(extloadi64i8 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; +def : Pat<(extloadi64i16 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; def : Pat<(extloadi64i32 addr:$src), - (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), - sub_32bit)>; + (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; // anyext. Define these to do an explicit zero-extend to // avoid partial-register updates. @@ -1152,8 +1131,10 @@ def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; def : Pat<(i32 (anyext GR16:$src)), (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; -def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>; -def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>; +def : Pat<(i64 (anyext GR8 :$src)), + (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>; +def : Pat<(i64 (anyext GR16:$src)), + (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>; def : Pat<(i64 (anyext GR32:$src)), (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; @@ -1318,13 +1299,19 @@ def : Pat<(and GR16:$src1, 0xff), // r & (2^32-1) ==> movz def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), - (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; + (SUBREG_TO_REG (i64 0), + (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)), + sub_32bit)>; // r & (2^16-1) ==> movz def : Pat<(and GR64:$src, 0xffff), - (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>; + (SUBREG_TO_REG (i64 0), + (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))), + sub_32bit)>; // r & (2^8-1) ==> movz def : Pat<(and GR64:$src, 0xff), - (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>; + (SUBREG_TO_REG (i64 0), + (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))), + sub_32bit)>; // r & (2^8-1) ==> movz def : Pat<(and GR32:$src1, 0xff), (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>, diff --git a/lib/Target/X86/X86InstrExtension.td b/lib/Target/X86/X86InstrExtension.td index 6dc7175..28954c6 100644 --- a/lib/Target/X86/X86InstrExtension.td +++ b/lib/Target/X86/X86InstrExtension.td @@ -149,38 +149,24 @@ def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, TB, Sched<[WriteALULd]>; -// FIXME: These should be Pat patterns. -let isCodeGenOnly = 1 in { - -// Use movzbl instead of movzbq when the destination is a register; it's -// equivalent due to implicit zero-extending, and it has a smaller encoding. -def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), - "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB, - Sched<[WriteALU]>; -def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), - "", [(set GR64:$dst, (zextloadi64i8 addr:$src))], IIC_MOVZX>, - TB, Sched<[WriteALULd]>; -// Use movzwl instead of movzwq when the destination is a register; it's -// equivalent due to implicit zero-extending, and it has a smaller encoding. -def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), - "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB, - Sched<[WriteALU]>; -def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), - "", [(set GR64:$dst, (zextloadi64i16 addr:$src))], - IIC_MOVZX>, TB, Sched<[WriteALULd]>; - -// There's no movzlq instruction, but movl can be used for this purpose, using -// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero -// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit -// zero-extension, however this isn't possible when the 32-bit value is -// defined by a truncate or is copied from something where the high bits aren't -// necessarily all zero. In such cases, we fall back to these explicit zext -// instructions. -def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src), - "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>, - Sched<[WriteALU]>; -def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), - "", [(set GR64:$dst, (zextloadi64i32 addr:$src))], - IIC_MOVZX>, Sched<[WriteALULd]>; -} - +// 64-bit zero-extension patterns use SUBREG_TO_REG and an operation writing a +// 32-bit register. +def : Pat<(i64 (zext GR8:$src)), + (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8:$src), sub_32bit)>; +def : Pat<(zextloadi64i8 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; + +def : Pat<(i64 (zext GR16:$src)), + (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16:$src), sub_32bit)>; +def : Pat<(zextloadi64i16 addr:$src), + (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; + +// The preferred way to do 32-bit-to-64-bit zero extension on x86-64 is to use a +// SUBREG_TO_REG to utilize implicit zero-extension, however this isn't possible +// when the 32-bit value is defined by a truncate or is copied from something +// where the high bits aren't necessarily all zero. In such cases, we fall back +// to these explicit zext instructions. +def : Pat<(i64 (zext GR32:$src)), + (SUBREG_TO_REG (i64 0), (MOV32rr GR32:$src), sub_32bit)>; +def : Pat<(i64 (zextloadi64i32 addr:$src)), + (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index a71e024..1432414 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -292,13 +292,16 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm, } def __xs : XS; +def __xd : XD; // SI - SSE 1 & 2 scalar instructions class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin> { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])); + !if(!eq(Prefix, __xs.Prefix), [UseSSE1], + !if(!eq(Prefix, __xd.Prefix), [UseSSE2], + !if(hasOpSizePrefix, [UseSSE2], [UseSSE1])))); // AVX instructions have a 'v' prefix in the mnemonic let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); @@ -350,7 +353,7 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // PSI - SSE1 instructions with TB prefix. // PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix. // VSSI - SSE1 instructions with XS prefix in AVX form. -// VPSI - SSE1 instructions with TB prefix in AVX form. +// VPSI - SSE1 instructions with TB prefix in AVX form, packed single. class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> @@ -381,10 +384,13 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm, // SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix. // S2SI - SSE2 instructions with XS prefix. // SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix. -// PDI - SSE2 instructions with TB and OpSize prefixes. +// PDI - SSE2 instructions with TB and OpSize prefixes, packed double domain. // PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes. -// VSDI - SSE2 instructions with XD prefix in AVX form. -// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form. +// VSDI - SSE2 scalar instructions with XD prefix in AVX form. +// VPDI - SSE2 vector instructions with TB and OpSize prefixes in AVX form, +// packed double domain. +// VS2I - SSE2 scalar instructions with TB and OpSize prefixes in AVX form. +// S2I - SSE2 scalar instructions with TB and OpSize prefixes. // MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as // MMX operands. // MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as @@ -422,6 +428,14 @@ class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB, OpSize, Requires<[HasAVX]>; +class VS2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, TB, + OpSize, Requires<[HasAVX]>; +class S2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : I<o, F, outs, ins, asm, pattern, itin>, TB, + OpSize, Requires<[UseSSE2]>; class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>; @@ -626,6 +640,12 @@ class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm, class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin = NoItinerary> : VPDI<o, F, outs, ins, asm, pattern, itin>, VEX_W; +class RS2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : S2I<o, F, outs, ins, asm, pattern, itin>, REX_W; +class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern, InstrItinClass itin = NoItinerary> + : VS2I<o, F, outs, ins, asm, pattern, itin>, VEX_W; // MMX Instruction templates // diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 7c0423f..df7b721 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -97,7 +97,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) (tm.getSubtarget<X86Subtarget>().is64Bit() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32)), - TM(tm), RI(tm, *this) { + TM(tm), RI(tm) { static const X86OpTblEntry OpTbl2Addr[] = { { X86::ADC32ri, X86::ADC32mi, 0 }, @@ -451,9 +451,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::MOVZX32rr16, X86::MOVZX32rm16, 0 }, { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 }, { X86::MOVZX32rr8, X86::MOVZX32rm8, 0 }, - { X86::MOVZX64rr16, X86::MOVZX64rm16, 0 }, - { X86::MOVZX64rr32, X86::MOVZX64rm32, 0 }, - { X86::MOVZX64rr8, X86::MOVZX64rm8, 0 }, { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 }, { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 }, { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 }, @@ -1381,7 +1378,6 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, case X86::MOVSX32rr8: case X86::MOVZX32rr8: case X86::MOVSX64rr8: - case X86::MOVZX64rr8: if (!TM.getSubtarget<X86Subtarget>().is64Bit()) // It's not always legal to reference the low 8-bit of the larger // register in 32-bit mode. @@ -1389,9 +1385,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, case X86::MOVSX32rr16: case X86::MOVZX32rr16: case X86::MOVSX64rr16: - case X86::MOVZX64rr16: - case X86::MOVSX64rr32: - case X86::MOVZX64rr32: { + case X86::MOVSX64rr32: { if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) // Be conservative. return false; @@ -1404,17 +1398,14 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, case X86::MOVSX32rr8: case X86::MOVZX32rr8: case X86::MOVSX64rr8: - case X86::MOVZX64rr8: SubIdx = X86::sub_8bit; break; case X86::MOVSX32rr16: case X86::MOVZX32rr16: case X86::MOVSX64rr16: - case X86::MOVZX64rr16: SubIdx = X86::sub_16bit; break; case X86::MOVSX64rr32: - case X86::MOVZX64rr32: SubIdx = X86::sub_32bit; break; } @@ -1722,37 +1713,16 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig, const TargetRegisterInfo &TRI) const { - DebugLoc DL = Orig->getDebugLoc(); - - // MOV32r0 etc. are implemented with xor which clobbers condition code. - // Re-materialize them as movri instructions to avoid side effects. - bool Clone = true; + // MOV32r0 is implemented with a xor which clobbers condition code. + // Re-materialize it as movri instructions to avoid side effects. unsigned Opc = Orig->getOpcode(); - switch (Opc) { - default: break; - case X86::MOV8r0: - case X86::MOV16r0: - case X86::MOV32r0: - case X86::MOV64r0: { - if (!isSafeToClobberEFLAGS(MBB, I)) { - switch (Opc) { - default: llvm_unreachable("Unreachable!"); - case X86::MOV8r0: Opc = X86::MOV8ri; break; - case X86::MOV16r0: Opc = X86::MOV16ri; break; - case X86::MOV32r0: Opc = X86::MOV32ri; break; - case X86::MOV64r0: Opc = X86::MOV64ri64i32; break; - } - Clone = false; - } - break; - } - } - - if (Clone) { + if (Opc == X86::MOV32r0 && !isSafeToClobberEFLAGS(MBB, I)) { + DebugLoc DL = Orig->getDebugLoc(); + BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0)) + .addImm(0); + } else { MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); MBB.insert(I, MI); - } else { - BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0); } MachineInstr *NewMI = prior(I); @@ -1772,6 +1742,98 @@ static bool hasLiveCondCodeDef(MachineInstr *MI) { return false; } +/// getTruncatedShiftCount - check whether the shift count for a machine operand +/// is non-zero. +inline static unsigned getTruncatedShiftCount(MachineInstr *MI, + unsigned ShiftAmtOperandIdx) { + // The shift count is six bits with the REX.W prefix and five bits without. + unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31; + unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm(); + return Imm & ShiftCountMask; +} + +/// isTruncatedShiftCountForLEA - check whether the given shift count is appropriate +/// can be represented by a LEA instruction. +inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { + // Left shift instructions can be transformed into load-effective-address + // instructions if we can encode them appropriately. + // A LEA instruction utilizes a SIB byte to encode it's scale factor. + // The SIB.scale field is two bits wide which means that we can encode any + // shift amount less than 4. + return ShAmt < 4 && ShAmt > 0; +} + +bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, + unsigned Opc, bool AllowSP, + unsigned &NewSrc, bool &isKill, bool &isUndef, + MachineOperand &ImplicitOp) const { + MachineFunction &MF = *MI->getParent()->getParent(); + const TargetRegisterClass *RC; + if (AllowSP) { + RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; + } else { + RC = Opc != X86::LEA32r ? + &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; + } + unsigned SrcReg = Src.getReg(); + + // For both LEA64 and LEA32 the register already has essentially the right + // type (32-bit or 64-bit) we may just need to forbid SP. + if (Opc != X86::LEA64_32r) { + NewSrc = SrcReg; + isKill = Src.isKill(); + isUndef = Src.isUndef(); + + if (TargetRegisterInfo::isVirtualRegister(NewSrc) && + !MF.getRegInfo().constrainRegClass(NewSrc, RC)) + return false; + + return true; + } + + // This is for an LEA64_32r and incoming registers are 32-bit. One way or + // another we need to add 64-bit registers to the final MI. + if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { + ImplicitOp = Src; + ImplicitOp.setImplicit(); + + NewSrc = getX86SubSuperRegister(Src.getReg(), MVT::i64); + MachineBasicBlock::LivenessQueryResult LQR = + MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI); + + switch (LQR) { + case MachineBasicBlock::LQR_Unknown: + // We can't give sane liveness flags to the instruction, abandon LEA + // formation. + return false; + case MachineBasicBlock::LQR_Live: + isKill = MI->killsRegister(SrcReg); + isUndef = false; + break; + default: + // The physreg itself is dead, so we have to use it as an <undef>. + isKill = false; + isUndef = true; + break; + } + } else { + // Virtual register of the wrong class, we have to create a temporary 64-bit + // vreg to feed into the LEA. + NewSrc = MF.getRegInfo().createVirtualRegister(RC); + BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), + get(TargetOpcode::COPY)) + .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) + .addOperand(Src); + + // Which is obviously going to be dead after we're done with it. + isKill = true; + isUndef = false; + } + + // We've set all the parameters without issue. + return true; +} + /// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when /// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting /// to a 32-bit superregister and then truncating back down to a 16-bit @@ -1787,11 +1849,16 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, bool isDead = MI->getOperand(0).isDead(); bool isKill = MI->getOperand(1).isKill(); - unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() - ? X86::LEA64_32r : X86::LEA32r; MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); - unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); + unsigned Opc, leaInReg; + if (TM.getSubtarget<X86Subtarget>().is64Bit()) { + Opc = X86::LEA64_32r; + leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); + } else { + Opc = X86::LEA32r; + leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); + } // Build and insert into an implicit UNDEF value. This is OK because // well be shifting and then extracting the lower 16-bits. @@ -1841,7 +1908,10 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, // just a single insert_subreg. addRegReg(MIB, leaInReg, true, leaInReg, false); } else { - leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); + if (TM.getSubtarget<X86Subtarget>().is64Bit()) + leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); + else + leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); // Build and insert into an implicit UNDEF value. This is OK because // well be shifting and then extracting the lower 16-bits. BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2); @@ -1891,6 +1961,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const { MachineInstr *MI = MBBI; + + // The following opcodes also sets the condition code register(s). Only + // convert them to equivalent lea if the condition code register def's + // are dead! + if (hasLiveCondCodeDef(MI)) + return 0; + MachineFunction &MF = *MI->getParent()->getParent(); // All instructions input are two-addr instructions. Get the known operands. const MachineOperand &Dest = MI->getOperand(0); @@ -1935,10 +2012,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, } case X86::SHL64ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); - // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses - // the flags produced by a shift yet, so this is safe. - unsigned ShAmt = MI->getOperand(2).getImm(); - if (ShAmt == 0 || ShAmt >= 4) return 0; + unsigned ShAmt = getTruncatedShiftCount(MI, 2); + if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; // LEA can't handle RSP. if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && @@ -1953,29 +2028,34 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, } case X86::SHL32ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); - // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses - // the flags produced by a shift yet, so this is safe. - unsigned ShAmt = MI->getOperand(2).getImm(); - if (ShAmt == 0 || ShAmt >= 4) return 0; + unsigned ShAmt = getTruncatedShiftCount(MI, 2); + if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; + + unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; // LEA can't handle ESP. - if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && - !MF.getRegInfo().constrainRegClass(Src.getReg(), - &X86::GR32_NOSPRegClass)) + bool isKill, isUndef; + unsigned SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, + SrcReg, isKill, isUndef, ImplicitOp)) return 0; - unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc)) + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest) - .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); + .addReg(0).addImm(1 << ShAmt) + .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) + .addImm(0).addReg(0); + if (ImplicitOp.getReg() != 0) + MIB.addOperand(ImplicitOp); + NewMI = MIB; + break; } case X86::SHL16ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); - // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses - // the flags produced by a shift yet, so this is safe. - unsigned ShAmt = MI->getOperand(2).getImm(); - if (ShAmt == 0 || ShAmt >= 4) return 0; + unsigned ShAmt = getTruncatedShiftCount(MI, 2); + if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; if (DisableLEA16) return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; @@ -1985,11 +2065,6 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, break; } default: { - // The following opcodes also sets the condition code register(s). Only - // convert them to equivalent lea if the condition code register def's - // are dead! - if (hasLiveCondCodeDef(MI)) - return 0; switch (MIOpc) { default: return 0; @@ -1999,17 +2074,20 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : (is64Bit ? X86::LEA64_32r : X86::LEA32r); - const TargetRegisterClass *RC = MIOpc == X86::INC64r ? - (const TargetRegisterClass*)&X86::GR64_NOSPRegClass : - (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; - - // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && - !MF.getRegInfo().constrainRegClass(Src.getReg(), RC)) + bool isKill, isUndef; + unsigned SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, + SrcReg, isKill, isUndef, ImplicitOp)) return 0; - NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addOperand(Dest).addOperand(Src), 1); + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest) + .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); + if (ImplicitOp.getReg() != 0) + MIB.addOperand(ImplicitOp); + + NewMI = addOffset(MIB, 1); break; } case X86::INC16r: @@ -2026,16 +2104,22 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r : (is64Bit ? X86::LEA64_32r : X86::LEA32r); - const TargetRegisterClass *RC = MIOpc == X86::DEC64r ? - (const TargetRegisterClass*)&X86::GR64_NOSPRegClass : - (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; - // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && - !MF.getRegInfo().constrainRegClass(Src.getReg(), RC)) + + bool isKill, isUndef; + unsigned SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, + SrcReg, isKill, isUndef, ImplicitOp)) return 0; - NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addOperand(Dest).addOperand(Src), -1); + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest) + .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); + if (ImplicitOp.getReg() != 0) + MIB.addOperand(ImplicitOp); + + NewMI = addOffset(MIB, -1); + break; } case X86::DEC16r: @@ -2052,36 +2136,41 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD32rr_DB: { assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Opc; - const TargetRegisterClass *RC; - if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) { + if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) Opc = X86::LEA64r; - RC = &X86::GR64_NOSPRegClass; - } else { + else Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - RC = &X86::GR32_NOSPRegClass; - } + bool isKill, isUndef; + unsigned SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, + SrcReg, isKill, isUndef, ImplicitOp)) + return 0; - unsigned Src2 = MI->getOperand(2).getReg(); - bool isKill2 = MI->getOperand(2).isKill(); - - // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src2) && - !MF.getRegInfo().constrainRegClass(Src2, RC)) + const MachineOperand &Src2 = MI->getOperand(2); + bool isKill2, isUndef2; + unsigned SrcReg2; + MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, + SrcReg2, isKill2, isUndef2, ImplicitOp2)) return 0; - NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addOperand(Dest), - Src.getReg(), Src.isKill(), Src2, isKill2); + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest); + if (ImplicitOp.getReg() != 0) + MIB.addOperand(ImplicitOp); + if (ImplicitOp2.getReg() != 0) + MIB.addOperand(ImplicitOp2); + + NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); // Preserve undefness of the operands. - bool isUndef = MI->getOperand(1).isUndef(); - bool isUndef2 = MI->getOperand(2).isUndef(); NewMI->getOperand(1).setIsUndef(isUndef); NewMI->getOperand(3).setIsUndef(isUndef2); - if (LV && isKill2) - LV->replaceKillInstruction(Src2, MI, NewMI); + if (LV && Src2.isKill()) + LV->replaceKillInstruction(SrcReg2, MI, NewMI); break; } case X86::ADD16rr: @@ -2120,9 +2209,21 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD32ri8_DB: { assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addOperand(Dest).addOperand(Src), - MI->getOperand(2).getImm()); + + bool isKill, isUndef; + unsigned SrcReg; + MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); + if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, + SrcReg, isKill, isUndef, ImplicitOp)) + return 0; + + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest) + .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); + if (ImplicitOp.getReg() != 0) + MIB.addOperand(ImplicitOp); + + NewMI = addOffset(MIB, MI->getOperand(2).getImm()); break; } case X86::ADD16ri: @@ -3171,6 +3272,25 @@ inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg, inline static bool isDefConvertible(MachineInstr *MI) { switch (MI->getOpcode()) { default: return false; + + // The shift instructions only modify ZF if their shift count is non-zero. + // N.B.: The processor truncates the shift count depending on the encoding. + case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: + case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: + return getTruncatedShiftCount(MI, 2) != 0; + + // Some left shift instructions can be turned into LEA instructions but only + // if their flags aren't used. Avoid transforming such instructions. + case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ + unsigned ShAmt = getTruncatedShiftCount(MI, 2); + if (isTruncatedShiftCountForLEA(ShAmt)) return false; + return ShAmt != 0; + } + + case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: + case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: + return getTruncatedShiftCount(MI, 3) != 0; + case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: @@ -3200,8 +3320,37 @@ inline static bool isDefConvertible(MachineInstr *MI) { case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: + case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: + case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: + case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: + case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: + case X86::ADC32ri: case X86::ADC32ri8: + case X86::ADC32rr: case X86::ADC64ri32: + case X86::ADC64ri8: case X86::ADC64rr: + case X86::SBB32ri: case X86::SBB32ri8: + case X86::SBB32rr: case X86::SBB64ri32: + case X86::SBB64ri8: case X86::SBB64rr: case X86::ANDN32rr: case X86::ANDN32rm: case X86::ANDN64rr: case X86::ANDN64rm: + case X86::BEXTR32rr: case X86::BEXTR64rr: + case X86::BEXTR32rm: case X86::BEXTR64rm: + case X86::BLSI32rr: case X86::BLSI32rm: + case X86::BLSI64rr: case X86::BLSI64rm: + case X86::BLSMSK32rr:case X86::BLSMSK32rm: + case X86::BLSMSK64rr:case X86::BLSMSK64rm: + case X86::BLSR32rr: case X86::BLSR32rm: + case X86::BLSR64rr: case X86::BLSR64rm: + case X86::BZHI32rr: case X86::BZHI32rm: + case X86::BZHI64rr: case X86::BZHI64rm: + case X86::LZCNT16rr: case X86::LZCNT16rm: + case X86::LZCNT32rr: case X86::LZCNT32rm: + case X86::LZCNT64rr: case X86::LZCNT64rm: + case X86::POPCNT16rr:case X86::POPCNT16rm: + case X86::POPCNT32rr:case X86::POPCNT32rm: + case X86::POPCNT64rr:case X86::POPCNT64rm: + case X86::TZCNT16rr: case X86::TZCNT16rm: + case X86::TZCNT32rr: case X86::TZCNT32rm: + case X86::TZCNT64rr: case X86::TZCNT64rm: return true; } } @@ -3308,10 +3457,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // MOV32r0 etc. are implemented with xor which clobbers condition code. // They are safe to move up, if the definition to EFLAGS is dead and // earlier instructions do not read or write EFLAGS. - if (!Movr0Inst && (Instr->getOpcode() == X86::MOV8r0 || - Instr->getOpcode() == X86::MOV16r0 || - Instr->getOpcode() == X86::MOV32r0 || - Instr->getOpcode() == X86::MOV64r0) && + if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 && Instr->registerDefIsDead(X86::EFLAGS, TRI)) { Movr0Inst = Instr; continue; @@ -3420,20 +3566,38 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // The instruction to be updated is either Sub or MI. Sub = IsCmpZero ? MI : Sub; - // Move Movr0Inst to the place right before Sub. + // Move Movr0Inst to the appropriate place before Sub. if (Movr0Inst) { - Sub->getParent()->remove(Movr0Inst); - Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst); + // Look backwards until we find a def that doesn't use the current EFLAGS. + Def = Sub; + MachineBasicBlock::reverse_iterator + InsertI = MachineBasicBlock::reverse_iterator(++Def), + InsertE = Sub->getParent()->rend(); + for (; InsertI != InsertE; ++InsertI) { + MachineInstr *Instr = &*InsertI; + if (!Instr->readsRegister(X86::EFLAGS, TRI) && + Instr->modifiesRegister(X86::EFLAGS, TRI)) { + Sub->getParent()->remove(Movr0Inst); + Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), + Movr0Inst); + break; + } + } + if (InsertI == InsertE) + return false; } // Make sure Sub instruction defines EFLAGS and mark the def live. - unsigned LastOperand = Sub->getNumOperands() - 1; - assert(Sub->getNumOperands() >= 2 && - Sub->getOperand(LastOperand).isReg() && - Sub->getOperand(LastOperand).getReg() == X86::EFLAGS && - "EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND"); - Sub->getOperand(LastOperand).setIsDef(true); - Sub->getOperand(LastOperand).setIsDead(false); + unsigned i = 0, e = Sub->getNumOperands(); + for (; i != e; ++i) { + MachineOperand &MO = Sub->getOperand(i); + if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { + MO.setIsDead(false); + break; + } + } + assert(i != e && "Unable to locate a def EFLAGS operand"); + CmpInstr->eraseFromParent(); // Modify the condition code of instructions in OpsToUpdate. @@ -3686,18 +3850,11 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, OpcodeTablePtr = &RegOp2MemOpTable2Addr; isTwoAddrFold = true; } else if (i == 0) { // If operand 0 - unsigned Opc = 0; - switch (MI->getOpcode()) { - default: break; - case X86::MOV64r0: Opc = X86::MOV64mi32; break; - case X86::MOV32r0: Opc = X86::MOV32mi; break; - case X86::MOV16r0: Opc = X86::MOV16mi; break; - case X86::MOV8r0: Opc = X86::MOV8mi; break; + if (MI->getOpcode() == X86::MOV32r0) { + NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); + if (NewMI) + return NewMI; } - if (Opc) - NewMI = MakeM0Inst(*this, Opc, MOs, MI); - if (NewMI) - return NewMI; OpcodeTablePtr = &RegOp2MemOpTable0; } else if (i == 1) { @@ -4083,13 +4240,9 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, if (isTwoAddr && NumOps >= 2 && OpNum < 2) { OpcodeTablePtr = &RegOp2MemOpTable2Addr; } else if (OpNum == 0) { // If operand 0 - switch (Opc) { - case X86::MOV8r0: - case X86::MOV16r0: - case X86::MOV32r0: - case X86::MOV64r0: return true; - default: break; - } + if (Opc == X86::MOV32r0) + return true; + OpcodeTablePtr = &RegOp2MemOpTable0; } else if (OpNum == 1) { OpcodeTablePtr = &RegOp2MemOpTable1; @@ -4250,7 +4403,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, std::vector<SDValue> AddrOps; std::vector<SDValue> BeforeOps; std::vector<SDValue> AfterOps; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); unsigned NumOps = N->getNumOperands(); for (unsigned i = 0; i != NumOps-1; ++i) { SDValue Op = N->getOperand(i); diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 260f054..332874f 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -192,6 +192,19 @@ public: const MachineInstr *Orig, const TargetRegisterInfo &TRI) const; + /// Given an operand within a MachineInstr, insert preceding code to put it + /// into the right format for a particular kind of LEA instruction. This may + /// involve using an appropriate super-register instead (with an implicit use + /// of the original) or creating a new virtual register and inserting COPY + /// instructions to get the data into the right class. + /// + /// Reference parameters are set to indicate how caller should add this + /// operand to the LEA instruction. + bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, + unsigned LEAOpcode, bool AllowSP, + unsigned &NewSrc, bool &isKill, + bool &isUndef, MachineOperand &ImplicitOp) const; + /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target /// may be able to convert a two-address instruction into a true diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 3380d8c..817bd6c 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -523,8 +523,7 @@ def i64i8imm : Operand<i64> { def lea64_32mem : Operand<i32> { let PrintMethod = "printi32mem"; - let AsmOperandLowerMethod = "lower_lea64_32mem"; - let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm); + let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm); let ParserMatchClass = X86MemAsmOperand; } @@ -546,7 +545,7 @@ def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr", [add, sub, mul, X86mul_imm, shl, or, frameindex], []>; // In 64-bit mode 32-bit LEAs can use RIP-relative addressing. -def lea64_32addr : ComplexPattern<i32, 5, "SelectLEAAddr", +def lea64_32addr : ComplexPattern<i32, 5, "SelectLEA64_32Addr", [add, sub, mul, X86mul_imm, shl, or, frameindex, X86WrapperRIP], []>; @@ -884,12 +883,12 @@ def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>, let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP], mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in { -def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l|d}", [], IIC_POP_A>, +def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", [], IIC_POP_A>, Requires<[In32BitMode]>; } let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in { -def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l|d}", [], IIC_PUSH_A>, +def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", [], IIC_PUSH_A>, Requires<[In32BitMode]>; } @@ -1867,6 +1866,9 @@ def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>; def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>; def : MnemonicAlias<"pushfd", "pushfl", "att">; +def : MnemonicAlias<"popad", "popa", "intel">, Requires<[In32BitMode]>; +def : MnemonicAlias<"pushad", "pusha", "intel">, Requires<[In32BitMode]>; + def : MnemonicAlias<"repe", "rep", "att">; def : MnemonicAlias<"repz", "rep", "att">; def : MnemonicAlias<"repnz", "repne", "att">; diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 49721df..07314a0 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -357,21 +357,21 @@ defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw, defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b, MMX_INTALU_ITINS>; defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q, - MMX_INTALUQ_ITINS, 1>; + MMX_INTALUQ_ITINS>; defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w, - MMX_INTALU_ITINS, 1>; + MMX_INTALU_ITINS>; defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w, MMX_PHADDSUBW>; diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index cce938b..79b1ca3 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2342,65 +2342,62 @@ let Constraints = "$src1 = $dst" in { // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode, ValueType vt, X86MemOperand x86memop, - PatFrag ld_frag, string OpcodeStr, Domain d> { - def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2), + PatFrag ld_frag, string OpcodeStr> { + def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], - IIC_SSE_COMIS_RR, d>, + IIC_SSE_COMIS_RR>, Sched<[WriteFAdd]>; - def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2), + def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), [(set EFLAGS, (OpNode (vt RC:$src1), (ld_frag addr:$src2)))], - IIC_SSE_COMIS_RM, d>, + IIC_SSE_COMIS_RM>, Sched<[WriteFAddLd, ReadAfterLd]>; } let Defs = [EFLAGS] in { defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32, - "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG; + "ucomiss">, TB, VEX, VEX_LIG; defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64, - "ucomisd", SSEPackedDouble>, TB, OpSize, VEX, - VEX_LIG; + "ucomisd">, TB, OpSize, VEX, VEX_LIG; let Pattern = []<dag> in { defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load, - "comiss", SSEPackedSingle>, TB, VEX, - VEX_LIG; + "comiss">, TB, VEX, VEX_LIG; defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load, - "comisd", SSEPackedDouble>, TB, OpSize, VEX, - VEX_LIG; + "comisd">, TB, OpSize, VEX, VEX_LIG; } defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem, - load, "ucomiss", SSEPackedSingle>, TB, VEX; + load, "ucomiss">, TB, VEX; defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem, - load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX; + load, "ucomisd">, TB, OpSize, VEX; defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, - load, "comiss", SSEPackedSingle>, TB, VEX; + load, "comiss">, TB, VEX; defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, - load, "comisd", SSEPackedDouble>, TB, OpSize, VEX; + load, "comisd">, TB, OpSize, VEX; defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32, - "ucomiss", SSEPackedSingle>, TB; + "ucomiss">, TB; defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64, - "ucomisd", SSEPackedDouble>, TB, OpSize; + "ucomisd">, TB, OpSize; let Pattern = []<dag> in { defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load, - "comiss", SSEPackedSingle>, TB; + "comiss">, TB; defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load, - "comisd", SSEPackedDouble>, TB, OpSize; + "comisd">, TB, OpSize; } defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem, - load, "ucomiss", SSEPackedSingle>, TB; + load, "ucomiss">, TB; defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem, - load, "ucomisd", SSEPackedDouble>, TB, OpSize; + load, "ucomisd">, TB, OpSize; defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load, - "comiss", SSEPackedSingle>, TB; + "comiss">, TB; defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load, - "comisd", SSEPackedDouble>, TB, OpSize; + "comisd">, TB, OpSize; } // Defs = [EFLAGS] // sse12_cmp_packed - sse 1 & 2 compare packed instructions @@ -3049,12 +3046,20 @@ let isCodeGenOnly = 1 in { /// And, we have a special variant form for a full-vector intrinsic form. let Sched = WriteFSqrt in { -def SSE_SQRTP : OpndItins< - IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM +def SSE_SQRTPS : OpndItins< + IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM >; -def SSE_SQRTS : OpndItins< - IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM +def SSE_SQRTSS : OpndItins< + IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM +>; + +def SSE_SQRTPD : OpndItins< + IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM +>; + +def SSE_SQRTSD : OpndItins< + IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM >; } @@ -3319,18 +3324,18 @@ let Predicates = [HasAVX] in { // Square root. defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss, - SSE_SQRTS>, - sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTP>, + SSE_SQRTSS>, + sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>, sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd, - SSE_SQRTS>, - sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTP>; + SSE_SQRTSD>, + sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>; // Reciprocal approximations. Note that these typically require refinement // in order to obtain suitable precision. -defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>, - sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTP>, +defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_SQRTSS>, + sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTPS>, sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps, - int_x86_avx_rsqrt_ps_256, SSE_SQRTP>; + int_x86_avx_rsqrt_ps_256, SSE_SQRTPS>; defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, SSE_RCPS>, sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>, sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, @@ -4369,43 +4374,43 @@ def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), //===---------------------------------------------------------------------===// // Move Int Doubleword to Packed Double Int // -def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), +def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>; -def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), +def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector (loadi32 addr:$src))))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>; -def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), +def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2i64 (scalar_to_vector GR64:$src)))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>; -def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), +def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert GR64:$src))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>; -def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), +def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; -def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), +def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector (loadi32 addr:$src))))], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; -def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), +def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2i64 (scalar_to_vector GR64:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; -def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), +def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert GR64:$src))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; @@ -4413,22 +4418,22 @@ def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), //===---------------------------------------------------------------------===// // Move Int Doubleword to Single Scalar // -def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), +def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert GR32:$src))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>; -def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), +def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>; -def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), +def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert GR32:$src))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; -def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), +def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; @@ -4436,23 +4441,23 @@ def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), //===---------------------------------------------------------------------===// // Move Packed Doubleword Int to Packed Double Int // -def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src), +def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (vector_extract (v4i32 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>; -def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs), +def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (vector_extract (v4i32 VR128:$src), (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>; -def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src), +def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (vector_extract (v4i32 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>; -def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), +def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (vector_extract (v4i32 VR128:$src), (iPTR 0))), addr:$dst)], @@ -4462,14 +4467,14 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), // Move Packed Doubleword Int first element to Doubleword Int // let SchedRW = [WriteMove] in { -def VMOVPQIto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), +def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (vector_extract (v2i64 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX; -def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), +def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (vector_extract (v2i64 VR128:$src), (iPTR 0)))], @@ -4484,11 +4489,11 @@ def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src), "vmovq\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>, VEX, Sched<[WriteLoad]>; -def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), +def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bitconvert FR64:$src))], IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>; -def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), +def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), "movq\t{$src, $dst|$dst, $src}", [(store (i64 (bitconvert FR64:$src)), addr:$dst)], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>; @@ -4497,11 +4502,11 @@ def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src), "movq\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; -def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), +def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bitconvert FR64:$src))], IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>; -def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), +def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), "movq\t{$src, $dst|$dst, $src}", [(store (i64 (bitconvert FR64:$src)), addr:$dst)], IIC_SSE_MOVDQ>, Sched<[WriteStore]>; @@ -4509,19 +4514,19 @@ def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), //===---------------------------------------------------------------------===// // Move Scalar Single to Double Int // -def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), +def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bitconvert FR32:$src))], IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>; -def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), +def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (bitconvert FR32:$src)), addr:$dst)], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>; -def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), +def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bitconvert FR32:$src))], IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>; -def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), +def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (bitconvert FR32:$src)), addr:$dst)], IIC_SSE_MOVDQ>, Sched<[WriteStore]>; @@ -4531,12 +4536,12 @@ def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), // let SchedRW = [WriteMove] in { let AddedComplexity = 15 in { -def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), +def VMOVZDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))))], IIC_SSE_MOVDQ>, VEX; -def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), +def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))))], @@ -4544,12 +4549,12 @@ def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), VEX, VEX_W; } let AddedComplexity = 15 in { -def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), +def MOVZDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))))], IIC_SSE_MOVDQ>; -def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), +def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))))], @@ -4558,13 +4563,13 @@ def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), } // SchedRW let AddedComplexity = 20, SchedRW = [WriteLoad] in { -def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), +def VMOVZDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))))], IIC_SSE_MOVDQ>, VEX; -def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), +def MOVZDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (X86vzmovl (v4i32 (scalar_to_vector @@ -4638,12 +4643,12 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), // Move Packed Quadword Int to Quadword Int // let SchedRW = [WriteStore] in { -def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), +def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(store (i64 (vector_extract (v2i64 VR128:$src), (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>, VEX; -def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), +def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(store (i64 (vector_extract (v2i64 VR128:$src), (iPTR 0))), addr:$dst)], @@ -4653,11 +4658,11 @@ def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), //===---------------------------------------------------------------------===// // Store / copy lower 64-bits of a XMM register. // -def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), +def VMOVLQ128mr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX, Sched<[WriteStore]>; -def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), +def MOVLQ128mr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)], IIC_SSE_MOVDQ>, Sched<[WriteStore]>; @@ -4758,14 +4763,14 @@ let AddedComplexity = 20 in { // Instructions to match in the assembler let SchedRW = [WriteMove] in { -def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), +def VMOVQs64rr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, VEX, VEX_W; -def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), +def VMOVQd64rr : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, VEX, VEX_W; // Recognize "movd" with GR64 destination, but encode as a "movq" -def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), +def VMOVQd64rr_alt : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "movd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, VEX, VEX_W; } // SchedRW @@ -8367,7 +8372,9 @@ multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256, []>, VEX_4VOp3, VEX_L; } -let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in { +let mayLoad = 1, Constraints + = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb" + in { defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W; defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W; defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>; diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index 5b6298b..89c1a689 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -34,7 +34,7 @@ def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1), def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), "shl{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))], IIC_SR>; - + let isConvertibleToThreeAddress = 1 in { // Can transform into LEA. def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), "shl{w}\t{$src2, $dst|$dst, $src2}", @@ -43,7 +43,7 @@ def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), "shl{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))], IIC_SR>; -def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), +def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "shl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))], diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index 44d8cce..fc86e1e 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -339,6 +339,7 @@ extern "C" { /// must locate the start of the stub or call site and pass it into the JIT /// compiler function. extern "C" { +LLVM_ATTRIBUTE_USED // Referenced from inline asm. LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) { intptr_t *RetAddrLoc = &StackPtr[1]; diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index a8a9fd8..a453245 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -225,32 +225,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, } - -static void lower_subreg32(MCInst *MI, unsigned OpNo) { - // Convert registers in the addr mode according to subreg32. - unsigned Reg = MI->getOperand(OpNo).getReg(); - if (Reg != 0) - MI->getOperand(OpNo).setReg(getX86SubSuperRegister(Reg, MVT::i32)); -} - -static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) { - // Convert registers in the addr mode according to subreg64. - for (unsigned i = 0; i != 4; ++i) { - if (!MI->getOperand(OpNo+i).isReg()) continue; - - unsigned Reg = MI->getOperand(OpNo+i).getReg(); - // LEAs can use RIP-relative addressing, and RIP has no sub/super register. - if (Reg == 0 || Reg == X86::RIP) continue; - - MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64)); - } -} - -/// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8. -static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) { - OutMI.setOpcode(NewOpc); - lower_subreg32(&OutMI, 0); -} /// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) { OutMI.setOpcode(NewOpc); @@ -376,9 +350,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { // Handle a few special cases to eliminate operand modifiers. ReSimplify: switch (OutMI.getOpcode()) { - case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand. - lower_lea64_32mem(&OutMI, 1); - // FALL THROUGH. + case X86::LEA64_32r: case X86::LEA64r: case X86::LEA16r: case X86::LEA32r: @@ -388,23 +360,10 @@ ReSimplify: assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!"); break; - case X86::MOVZX64rr32: LowerSubReg32_Op0(OutMI, X86::MOV32rr); break; - case X86::MOVZX64rm32: LowerSubReg32_Op0(OutMI, X86::MOV32rm); break; - case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break; - case X86::MOVZX64rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break; - case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break; - case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break; - case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break; - case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break; case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break; - case X86::MOV16r0: - LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0 - LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr - break; - case X86::MOV64r0: - LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0 - LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr + case X86::MOV32ri64: + OutMI.setOpcode(X86::MOV32ri); break; // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 16886e4..eacae2c 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -54,15 +54,14 @@ static cl::opt<bool> EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames")); -X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, - const TargetInstrInfo &tii) +X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm) : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit() ? X86::RIP : X86::EIP), X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false), X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true), (tm.getSubtarget<X86Subtarget>().is64Bit() ? X86::RIP : X86::EIP)), - TM(tm), TII(tii) { + TM(tm) { X86_MC::InitLLVM2SEHRegisterMapping(this); // Cache some information. @@ -306,19 +305,19 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); // Set the stack-pointer register and its aliases as reserved. - Reserved.set(X86::RSP); - for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I) + for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid(); + ++I) Reserved.set(*I); // Set the instruction pointer register and its aliases as reserved. - Reserved.set(X86::RIP); - for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I) + for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid(); + ++I) Reserved.set(*I); // Set the frame-pointer register and its aliases as reserved if needed. if (TFI->hasFP(MF)) { - Reserved.set(X86::RBP); - for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I) + for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid(); + ++I) Reserved.set(*I); } @@ -331,8 +330,8 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { "Stack realignment in presence of dynamic allocas is not supported with" "this calling convention."); - Reserved.set(getBaseRegister()); - for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I) + for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true); + I.isValid(); ++I) Reserved.set(*I); } @@ -373,8 +372,11 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(*AI); // XMM8, XMM9, ... - assert(X86::XMM15 == X86::XMM8+7); - for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) + static const uint16_t XMMReg[] = { + X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11, + X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15 + }; + for (MCRegAliasIterator AI(XMMReg[n], this, true); AI.isValid(); ++AI) Reserved.set(*AI); } } diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index b9d7b8c..6a1b328 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -27,7 +27,6 @@ namespace llvm { class X86RegisterInfo : public X86GenRegisterInfo { public: X86TargetMachine &TM; - const TargetInstrInfo &TII; private: /// Is64Bit - Is the target 64-bits. @@ -56,7 +55,7 @@ private: unsigned BasePtr; public: - X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii); + X86RegisterInfo(X86TargetMachine &tm); // FIXME: This should be tablegen'd like getDwarfRegNum is int getSEHRegNum(unsigned i) const; diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index be6282a..fbbb257 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -21,11 +21,11 @@ class X86Reg<string n, bits<16> Enc, list<Register> subregs = []> : Register<n> // Subregister indices. let Namespace = "X86" in { - def sub_8bit : SubRegIndex; - def sub_8bit_hi : SubRegIndex; - def sub_16bit : SubRegIndex; - def sub_32bit : SubRegIndex; - def sub_xmm : SubRegIndex; + def sub_8bit : SubRegIndex<8>; + def sub_8bit_hi : SubRegIndex<8, 8>; + def sub_16bit : SubRegIndex<16>; + def sub_32bit : SubRegIndex<32>; + def sub_xmm : SubRegIndex<128>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td index 9fbde88..9f2c781 100644 --- a/lib/Target/X86/X86Schedule.td +++ b/lib/Target/X86/X86Schedule.td @@ -266,10 +266,14 @@ def IIC_SSE_PINSRW : InstrItinClass; def IIC_SSE_PABS_RR : InstrItinClass; def IIC_SSE_PABS_RM : InstrItinClass; -def IIC_SSE_SQRTP_RR : InstrItinClass; -def IIC_SSE_SQRTP_RM : InstrItinClass; -def IIC_SSE_SQRTS_RR : InstrItinClass; -def IIC_SSE_SQRTS_RM : InstrItinClass; +def IIC_SSE_SQRTPS_RR : InstrItinClass; +def IIC_SSE_SQRTPS_RM : InstrItinClass; +def IIC_SSE_SQRTSS_RR : InstrItinClass; +def IIC_SSE_SQRTSS_RM : InstrItinClass; +def IIC_SSE_SQRTPD_RR : InstrItinClass; +def IIC_SSE_SQRTPD_RM : InstrItinClass; +def IIC_SSE_SQRTSD_RR : InstrItinClass; +def IIC_SSE_SQRTSD_RM : InstrItinClass; def IIC_SSE_RCPP_RR : InstrItinClass; def IIC_SSE_RCPP_RM : InstrItinClass; diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td index cce8f1b..cb0960a 100644 --- a/lib/Target/X86/X86ScheduleAtom.td +++ b/lib/Target/X86/X86ScheduleAtom.td @@ -211,10 +211,15 @@ def AtomItineraries : ProcessorItineraries< InstrItinData<IIC_SSE_UNPCK, [InstrStage<1, [Port0]>] >, - InstrItinData<IIC_SSE_SQRTP_RR, [InstrStage<13, [Port0, Port1]>] >, - InstrItinData<IIC_SSE_SQRTP_RM, [InstrStage<14, [Port0, Port1]>] >, - InstrItinData<IIC_SSE_SQRTS_RR, [InstrStage<11, [Port0, Port1]>] >, - InstrItinData<IIC_SSE_SQRTS_RM, [InstrStage<12, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTPS_RR, [InstrStage<70, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTPS_RM, [InstrStage<70, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTSS_RR, [InstrStage<34, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTSS_RM, [InstrStage<34, [Port0, Port1]>] >, + + InstrItinData<IIC_SSE_SQRTPD_RR, [InstrStage<125, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTPD_RM, [InstrStage<125, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTSD_RR, [InstrStage<62, [Port0, Port1]>] >, + InstrItinData<IIC_SSE_SQRTSD_RM, [InstrStage<62, [Port0, Port1]>] >, InstrItinData<IIC_SSE_RCPP_RR, [InstrStage<9, [Port0, Port1]>] >, InstrItinData<IIC_SSE_RCPP_RM, [InstrStage<10, [Port0, Port1]>] >, diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index f934fdd..d1db79f 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -27,7 +27,7 @@ X86SelectionDAGInfo::~X86SelectionDAGInfo() { } SDValue -X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, +X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -175,7 +175,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, } SDValue -X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, +X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline, diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h index d1d66fe..d728af5 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.h +++ b/lib/Target/X86/X86SelectionDAGInfo.h @@ -34,7 +34,7 @@ public: ~X86SelectionDAGInfo(); virtual - SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -42,7 +42,7 @@ public: MachinePointerInfo DstPtrInfo) const; virtual - SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 448d2e6..74da2a9 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -170,6 +170,26 @@ bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const { return isTargetELF() || TM.getRelocationModel() == Reloc::Static; } +static bool OSHasAVXSupport() { +#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\ + || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64) +#if defined(__GNUC__) + // Check xgetbv; this uses a .byte sequence instead of the instruction + // directly because older assemblers do not include support for xgetbv and + // there is no easy way to conditionally compile based on the assembler used. + int rEAX, rEDX; + __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0)); +#elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK) + unsigned long long rEAX = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); +#else + int rEAX = 0; // Ensures we return false +#endif + return (rEAX & 6) == 6; +#else + return false; +#endif +} + void X86Subtarget::AutoDetectSubtargetFeatures() { unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0; unsigned MaxLevel; @@ -192,7 +212,9 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { if ((ECX >> 9) & 1) { X86SSELevel = SSSE3; ToggleFeature(X86::FeatureSSSE3);} if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);} if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);} - if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); } + if (((ECX >> 27) & 1) && ((ECX >> 28) & 1) && OSHasAVXSupport()) { + X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); + } bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0; bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0; diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 00fa47f..0422a61 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -49,6 +49,7 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT, TLInfo(*this), TSInfo(*this), JITInfo(*this) { + initAsmInfo(); } void X86_64TargetMachine::anchor() { } @@ -69,6 +70,7 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT, TLInfo(*this), TSInfo(*this), JITInfo(*this) { + initAsmInfo(); } /// X86TargetMachine ctor - Create an X86 target. |