aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp2
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp49
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.h11
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp3
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.h3
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp12
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp8
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp9
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h34
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp5
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp4
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp8
-rw-r--r--lib/Target/X86/X86.td20
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp23
-rw-r--r--lib/Target/X86/X86FastISel.cpp2
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp7
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp138
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/X86/X86InstrAVX512.td155
-rw-r--r--lib/Target/X86/X86InstrArithmetic.td8
-rw-r--r--lib/Target/X86/X86InstrCompiler.td11
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td28
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp32
-rw-r--r--lib/Target/X86/X86InstrSSE.td63
-rw-r--r--lib/Target/X86/X86IntrinsicsInfo.h14
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp5
27 files changed, 383 insertions, 276 deletions
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index c24805a..93c6ea0 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2571,7 +2571,7 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallString<16> Tmp;
Tmp += Base;
Tmp += ' ';
- Op.setTokenValue(Tmp.str());
+ Op.setTokenValue(Tmp);
// If this instruction starts with an 'f', then it is a floating point stack
// instruction. These come in up to three forms for 32-bit, 64-bit, and
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 65461af..f265f1d 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -33,15 +33,12 @@ using namespace llvm;
#define PRINT_ALIAS_INSTR
#include "X86GenAsmWriter.inc"
-void X86ATTInstPrinter::printRegName(raw_ostream &OS,
- unsigned RegNo) const {
- OS << markup("<reg:")
- << '%' << getRegisterName(RegNo)
- << markup(">");
+void X86ATTInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
+ OS << markup("<reg:") << '%' << getRegisterName(RegNo) << markup(">");
}
void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
- StringRef Annot) {
+ StringRef Annot, const MCSubtargetInfo &STI) {
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
uint64_t TSFlags = Desc.TSFlags;
@@ -60,7 +57,7 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
// InstrInfo.td as soon as Requires clause is supported properly
// for InstAlias.
if (MI->getOpcode() == X86::CALLpcrel32 &&
- (getAvailableFeatures() & X86::Mode64Bit) != 0) {
+ (STI.getFeatureBits() & X86::Mode64Bit) != 0) {
OS << "\tcallq\t";
printPCRelImm(MI, 0, OS);
}
@@ -169,8 +166,7 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
// Print X86 immediates as signed values.
- O << markup("<imm:")
- << '$' << formatImm((int64_t)Op.getImm())
+ O << markup("<imm:") << '$' << formatImm((int64_t)Op.getImm())
<< markup(">");
// If there are no instruction-specific comments, add a comment clarifying
@@ -182,24 +178,22 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << markup("<imm:")
- << '$' << *Op.getExpr()
- << markup(">");
+ O << markup("<imm:") << '$' << *Op.getExpr() << markup(">");
}
}
void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- const MCOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
- const MCOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
- const MCOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
- const MCOperand &SegReg = MI->getOperand(Op+X86::AddrSegmentReg);
+ const MCOperand &BaseReg = MI->getOperand(Op + X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI->getOperand(Op + X86::AddrIndexReg);
+ const MCOperand &DispSpec = MI->getOperand(Op + X86::AddrDisp);
+ const MCOperand &SegReg = MI->getOperand(Op + X86::AddrSegmentReg);
O << markup("<mem:");
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+X86::AddrSegmentReg, O);
+ printOperand(MI, Op + X86::AddrSegmentReg, O);
O << ':';
}
@@ -215,16 +209,14 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
if (IndexReg.getReg() || BaseReg.getReg()) {
O << '(';
if (BaseReg.getReg())
- printOperand(MI, Op+X86::AddrBaseReg, O);
+ printOperand(MI, Op + X86::AddrBaseReg, O);
if (IndexReg.getReg()) {
O << ',';
- printOperand(MI, Op+X86::AddrIndexReg, O);
- unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
+ printOperand(MI, Op + X86::AddrIndexReg, O);
+ unsigned ScaleVal = MI->getOperand(Op + X86::AddrScaleAmt).getImm();
if (ScaleVal != 1) {
- O << ','
- << markup("<imm:")
- << ScaleVal // never printed in hex.
+ O << ',' << markup("<imm:") << ScaleVal // never printed in hex.
<< markup(">");
}
}
@@ -236,13 +228,13 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
void X86ATTInstPrinter::printSrcIdx(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- const MCOperand &SegReg = MI->getOperand(Op+1);
+ const MCOperand &SegReg = MI->getOperand(Op + 1);
O << markup("<mem:");
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+1, O);
+ printOperand(MI, Op + 1, O);
O << ':';
}
@@ -267,13 +259,13 @@ void X86ATTInstPrinter::printDstIdx(const MCInst *MI, unsigned Op,
void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &DispSpec = MI->getOperand(Op);
- const MCOperand &SegReg = MI->getOperand(Op+1);
+ const MCOperand &SegReg = MI->getOperand(Op + 1);
O << markup("<mem:");
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+1, O);
+ printOperand(MI, Op + 1, O);
O << ':';
}
@@ -289,7 +281,6 @@ void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- O << markup("<imm:")
- << '$' << formatImm(MI->getOperand(Op).getImm() & 0xff)
+ O << markup("<imm:") << '$' << formatImm(MI->getOperand(Op).getImm() & 0xff)
<< markup(">");
}
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index f71cb81..62b6b73 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -24,14 +24,12 @@ class MCOperand;
class X86ATTInstPrinter final : public MCInstPrinter {
public:
X86ATTInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
- const MCRegisterInfo &MRI, const MCSubtargetInfo &STI)
- : MCInstPrinter(MAI, MII, MRI) {
- // Initialize the set of available features.
- setAvailableFeatures(STI.getFeatureBits());
- }
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
- void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
// Autogenerated by tblgen, returns true if we successfully printed an
// alias.
@@ -142,7 +140,6 @@ public:
private:
bool HasCustomInstComment;
};
-
}
#endif
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index 91d1828..4d92daf 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -33,7 +33,8 @@ void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
}
void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
- StringRef Annot) {
+ StringRef Annot,
+ const MCSubtargetInfo &STI) {
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
uint64_t TSFlags = Desc.TSFlags;
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 2150144..6e371da 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -28,7 +28,8 @@ public:
: MCInstPrinter(MAI, MII, MRI) {}
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
- void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index a400d46..b84c983 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -360,7 +360,7 @@ public:
ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
: ELFX86AsmBackend(T, OSABI, CPU) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
}
};
@@ -370,7 +370,7 @@ public:
ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
: ELFX86AsmBackend(T, OSABI, CPU) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI,
ELF::EM_X86_64);
}
@@ -381,7 +381,7 @@ public:
ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
: ELFX86AsmBackend(T, OSABI, CPU) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
}
};
@@ -395,7 +395,7 @@ public:
, Is64Bit(is64Bit) {
}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
};
@@ -752,7 +752,7 @@ public:
StringRef CPU)
: DarwinX86AsmBackend(T, MRI, CPU, false) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
MachO::CPU_TYPE_I386,
MachO::CPU_SUBTYPE_I386_ALL);
@@ -772,7 +772,7 @@ public:
StringRef CPU, MachO::CPUSubTypeX86 st)
: DarwinX86AsmBackend(T, MRI, CPU, true), Subtype(st) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
MachO::CPU_TYPE_X86_64, Subtype);
}
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 76a9d2b..4508883 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -22,7 +22,8 @@ namespace {
public:
X86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine);
- virtual ~X86ELFObjectWriter();
+ ~X86ELFObjectWriter() override;
+
protected:
unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel) const override;
@@ -248,9 +249,8 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
return getRelocType32(Modifier, getType32(Type), IsPCRel);
}
-MCObjectWriter *llvm::createX86ELFObjectWriter(raw_ostream &OS,
- bool IsELF64,
- uint8_t OSABI,
+MCObjectWriter *llvm::createX86ELFObjectWriter(raw_pwrite_stream &OS,
+ bool IsELF64, uint8_t OSABI,
uint16_t EMachine) {
MCELFObjectTargetWriter *MOTW =
new X86ELFObjectWriter(IsELF64, OSABI, EMachine);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 9b98a3e..e27b7cb 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -39,7 +39,7 @@ public:
: MCII(mcii), Ctx(ctx) {
}
- ~X86MCCodeEmitter() {}
+ ~X86MCCodeEmitter() override {}
bool is64BitMode(const MCSubtargetInfo &STI) const {
return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 0946326..5bdd844 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -80,7 +80,7 @@ MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
std::string ArchFS = X86_MC::ParseX86Triple(TT);
if (!FS.empty()) {
if (!ArchFS.empty())
- ArchFS = ArchFS + "," + FS.str();
+ ArchFS = (Twine(ArchFS) + "," + FS).str();
else
ArchFS = FS;
}
@@ -207,14 +207,13 @@ static MCCodeGenInfo *createX86MCCodeGenInfo(StringRef TT, Reloc::Model RM,
return X;
}
-static MCInstPrinter *createX86MCInstPrinter(const Target &T,
+static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
const MCInstrInfo &MII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI) {
+ const MCRegisterInfo &MRI) {
if (SyntaxVariant == 0)
- return new X86ATTInstPrinter(MAI, MII, MRI, STI);
+ return new X86ATTInstPrinter(MAI, MII, MRI);
if (SyntaxVariant == 1)
return new X86IntelInstPrinter(MAI, MII, MRI);
return nullptr;
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 6f50f11..dcdae1d 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -31,10 +31,11 @@ class Target;
class Triple;
class StringRef;
class raw_ostream;
+class raw_pwrite_stream;
extern Target TheX86_32Target, TheX86_64Target;
-/// DWARFFlavour - Flavour of dwarf regnumbers
+/// Flavour of dwarf regnumbers
///
namespace DWARFFlavour {
enum {
@@ -42,7 +43,7 @@ namespace DWARFFlavour {
};
}
-/// N86 namespace - Native X86 register numbers
+/// Native X86 register numbers
///
namespace N86 {
enum {
@@ -57,9 +58,8 @@ namespace X86_MC {
void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
- /// createX86MCSubtargetInfo - Create a X86 MCSubtargetInfo instance.
- /// This is exposed so Asm parser, etc. do not need to go through
- /// TargetRegistry.
+ /// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
+ /// do not need to go through TargetRegistry.
MCSubtargetInfo *createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
StringRef FS);
}
@@ -78,27 +78,25 @@ MCAsmBackend *createX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
///
/// Takes ownership of \p AB and \p CE.
MCStreamer *createX86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB,
- raw_ostream &OS, MCCodeEmitter *CE,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE,
bool RelaxAll);
-/// createX86MachObjectWriter - Construct an X86 Mach-O object writer.
-MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
- bool Is64Bit,
+/// Construct an X86 Mach-O object writer.
+MCObjectWriter *createX86MachObjectWriter(raw_pwrite_stream &OS, bool Is64Bit,
uint32_t CPUType,
uint32_t CPUSubtype);
-/// createX86ELFObjectWriter - Construct an X86 ELF object writer.
-MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS,
- bool IsELF64,
- uint8_t OSABI,
- uint16_t EMachine);
-/// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer.
-MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit);
+/// Construct an X86 ELF object writer.
+MCObjectWriter *createX86ELFObjectWriter(raw_pwrite_stream &OS, bool IsELF64,
+ uint8_t OSABI, uint16_t EMachine);
+/// Construct an X86 Win COFF object writer.
+MCObjectWriter *createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit);
-/// createX86_64MachORelocationInfo - Construct X86-64 Mach-O relocation info.
+/// Construct X86-64 Mach-O relocation info.
MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
-/// createX86_64ELFORelocationInfo - Construct X86-64 ELF relocation info.
+/// Construct X86-64 ELF relocation info.
MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
} // End llvm namespace
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 7a83f4c..38539cd 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -575,9 +575,8 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
-MCObjectWriter *llvm::createX86MachObjectWriter(raw_ostream &OS,
- bool Is64Bit,
- uint32_t CPUType,
+MCObjectWriter *llvm::createX86MachObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit, uint32_t CPUType,
uint32_t CPUSubtype) {
return createMachObjectWriter(new X86MachObjectWriter(Is64Bit,
CPUType,
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index e1df5c2..bd1bc99 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -25,7 +25,7 @@ namespace {
class X86WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
public:
X86WinCOFFObjectWriter(bool Is64Bit);
- virtual ~X86WinCOFFObjectWriter();
+ ~X86WinCOFFObjectWriter() override;
unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsCrossSection,
@@ -90,7 +90,7 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
llvm_unreachable("Unsupported COFF machine type.");
}
-MCObjectWriter *llvm::createX86WinCOFFObjectWriter(raw_ostream &OS,
+MCObjectWriter *llvm::createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit) {
MCWinCOFFObjectTargetWriter *MOTW = new X86WinCOFFObjectWriter(Is64Bit);
return createWinCOFFObjectWriter(MOTW, OS);
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
index 5690efe..92f42b6 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
@@ -18,8 +18,8 @@ class X86WinCOFFStreamer : public MCWinCOFFStreamer {
Win64EH::UnwindEmitter EHStreamer;
public:
X86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB, MCCodeEmitter *CE,
- raw_ostream &OS)
- : MCWinCOFFStreamer(C, AB, *CE, OS) { }
+ raw_pwrite_stream &OS)
+ : MCWinCOFFStreamer(C, AB, *CE, OS) {}
void EmitWinEHHandlerData() override;
void EmitWindowsUnwindTables() override;
@@ -49,8 +49,8 @@ void X86WinCOFFStreamer::FinishImpl() {
}
MCStreamer *llvm::createX86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB,
- raw_ostream &OS, MCCodeEmitter *CE,
- bool RelaxAll) {
+ raw_pwrite_stream &OS,
+ MCCodeEmitter *CE, bool RelaxAll) {
X86WinCOFFStreamer *S = new X86WinCOFFStreamer(C, AB, CE, OS);
S->getAssembler().setRelaxAll(RelaxAll);
return S;
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 4f9836d..d13f155 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -273,17 +273,15 @@ def : SilvermontProc<"silvermont">;
def : SilvermontProc<"slm">; // Legacy alias.
// "Arrandale" along with corei3 and corei5
-class NehalemProc<string Name, list<SubtargetFeature> AdditionalFeatures>
- : ProcessorModel<Name, SandyBridgeModel, !listconcat([
- FeatureSSE42,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeatureFastUAMem,
- FeaturePOPCNT
- ],
- AdditionalFeatures)>;
-def : NehalemProc<"nehalem", []>;
-def : NehalemProc<"corei7", [FeatureAES]>;
+class NehalemProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureSSE42,
+ FeatureCMPXCHG16B,
+ FeatureSlowBTMem,
+ FeatureFastUAMem,
+ FeaturePOPCNT
+ ]>;
+def : NehalemProc<"nehalem">;
+def : NehalemProc<"corei7">;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index f6033a7..2ed4975 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -523,7 +523,6 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
// must be registered in .sxdata. Use of any unregistered handlers will
// cause the process to terminate immediately. LLVM does not know how to
// register any SEH handlers, so its object files should be safe.
- S->setAbsolute();
OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
OutStreamer.EmitAssignment(
S, MCConstantExpr::Create(int64_t(1), MMI->getContext()));
@@ -723,28 +722,8 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
- if (TT.isOSBinFormatELF()) {
- const TargetLoweringObjectFileELF &TLOFELF =
- static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
-
- MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
-
- // Output stubs for external and common global variables.
- MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getDataLayout();
-
- for (const auto &Stub : Stubs) {
- OutStreamer.EmitLabel(Stub.first);
- OutStreamer.EmitSymbolValue(Stub.second.getPointer(),
- TD->getPointerSize());
- }
- Stubs.clear();
- }
-
+ if (TT.isOSBinFormatELF())
SM.serializeToStackMapSection();
- }
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index cba140f..cdf10a7 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -2417,6 +2417,8 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
+ assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
+ "Expected inlined-at fields to agree");
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
.addImm(0)
.addMetadata(DI->getVariable())
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index fb12ce5..5da7acf 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2187,7 +2187,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
break;
- unsigned ShlOp, Op;
+ unsigned ShlOp, AddOp, Op;
MVT CstVT = NVT;
// Check the minimum bitwidth for the new constant.
@@ -2208,6 +2208,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i32:
assert(CstVT == MVT::i8);
ShlOp = X86::SHL32ri;
+ AddOp = X86::ADD32rr;
switch (Opcode) {
default: llvm_unreachable("Impossible opcode");
@@ -2219,6 +2220,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i64:
assert(CstVT == MVT::i8 || CstVT == MVT::i32);
ShlOp = X86::SHL64ri;
+ AddOp = X86::ADD64rr;
switch (Opcode) {
default: llvm_unreachable("Impossible opcode");
@@ -2232,6 +2234,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Emit the smaller op and the shift.
SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
+ if (ShlVal == 1)
+ return CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
+ SDValue(New, 0));
return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
getI8Imm(ShlVal));
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 8b92e70..c32412a 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -32,6 +32,7 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
@@ -2142,6 +2143,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
+ /*isTailCall*/false,
MachinePointerInfo(), MachinePointerInfo());
}
@@ -2277,6 +2279,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
const Function* Fn = MF.getFunction();
if (Fn->hasExternalLinkage() &&
@@ -2416,6 +2419,13 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MFI->CreateFixedObject(1, StackSize, true));
}
+ MachineModuleInfo &MMI = MF.getMMI();
+ const Function *WinEHParent = nullptr;
+ if (IsWin64 && MMI.hasWinEHFuncInfo(Fn))
+ WinEHParent = MMI.getWinEHParent(Fn);
+ bool IsWinEHOutlined = WinEHParent && WinEHParent != Fn;
+ bool IsWinEHParent = WinEHParent && WinEHParent == Fn;
+
// Figure out if XMM registers are in use.
assert(!(MF.getTarget().Options.UseSoftFloat &&
Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
@@ -2452,7 +2462,6 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
}
if (IsWin64) {
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
// Get to the caller-allocated home save location. Add 8 to account
// for the return address.
int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
@@ -2505,6 +2514,27 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
+ } else if (IsWinEHOutlined) {
+ // Get to the caller-allocated home save location. Add 8 to account
+ // for the return address.
+ int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
+ FuncInfo->setRegSaveFrameIndex(MFI->CreateFixedObject(
+ /*Size=*/1, /*SPOffset=*/HomeOffset + 8, /*Immutable=*/false));
+
+ MMI.getWinEHFuncInfo(Fn)
+ .CatchHandlerParentFrameObjIdx[const_cast<Function *>(Fn)] =
+ FuncInfo->getRegSaveFrameIndex();
+
+ // Store the second integer parameter (rdx) into rsp+16 relative to the
+ // stack pointer at the entry of the function.
+ SDValue RSFIN =
+ DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), getPointerTy());
+ unsigned GPR = MF.addLiveIn(X86::RDX, &X86::GR64RegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64);
+ Chain = DAG.getStore(
+ Val.getValue(1), dl, Val, RSFIN,
+ MachinePointerInfo::getFixedStack(FuncInfo->getRegSaveFrameIndex()),
+ /*isVolatile=*/true, /*isNonTemporal=*/false, /*Alignment=*/0);
}
if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
@@ -2571,6 +2601,17 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
FuncInfo->setArgumentStackSize(StackSize);
+ if (IsWinEHParent) {
+ int UnwindHelpFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
+ SDValue StackSlot = DAG.getFrameIndex(UnwindHelpFI, MVT::i64);
+ MMI.getWinEHFuncInfo(MF.getFunction()).UnwindHelpFrameIdx = UnwindHelpFI;
+ SDValue Neg2 = DAG.getConstant(-2, MVT::i64);
+ Chain = DAG.getStore(Chain, dl, Neg2, StackSlot,
+ MachinePointerInfo::getFixedStack(UnwindHelpFI),
+ /*isVolatile=*/true,
+ /*isNonTemporal=*/false, /*Alignment=*/0);
+ }
+
return Chain;
}
@@ -4420,6 +4461,29 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
SDLoc dl(Op);
SDValue V;
bool First = true;
+
+ // SSE4.1 - use PINSRB to insert each byte directly.
+ if (Subtarget->hasSSE41()) {
+ for (unsigned i = 0; i < 16; ++i) {
+ bool isNonZero = (NonZeros & (1 << i)) != 0;
+ if (isNonZero) {
+ if (First) {
+ if (NumZero)
+ V = getZeroVector(MVT::v16i8, Subtarget, DAG, dl);
+ else
+ V = DAG.getUNDEF(MVT::v16i8);
+ First = false;
+ }
+ V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
+ MVT::v16i8, V, Op.getOperand(i),
+ DAG.getIntPtrConstant(i));
+ }
+ }
+
+ return V;
+ }
+
+ // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
for (unsigned i = 0; i < 16; ++i) {
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
@@ -5650,14 +5714,24 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
+ // We can't directly insert an i8 or i16 into a vector, so zero extend
+ // it to i32 first.
if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
if (VT.is256BitVector()) {
- SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
- Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
+ if (Subtarget->hasAVX()) {
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v8i32, Item);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
+ } else {
+ // Without AVX, we need to extend to a 128-bit vector and then
+ // insert into the 256-bit vector.
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
+ SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
+ Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
+ }
} else {
assert(VT.is128BitVector() && "Expected an SSE value type!");
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
return DAG.getNode(ISD::BITCAST, dl, VT, Item);
@@ -5877,7 +5951,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
unsigned NumElems = ResVT.getVectorNumElements();
- if(ResVT.is256BitVector())
+ if (ResVT.is256BitVector())
return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
if (Op.getNumOperands() == 4) {
@@ -9281,15 +9355,6 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isShuffleEquivalent(V1, V2, Mask, {5, 1, 7, 3}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
- // If we have a single input to the zero element, insert that into V1 if we
- // can do so cheaply.
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
- if (NumV2Elements == 1 && Mask[0] >= 4)
- if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
- return Insertion;
-
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
@@ -9432,15 +9497,6 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
- // If we have a single input to the zero element, insert that into V1 if we
- // can do so cheaply.
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 8; });
- if (NumV2Elements == 1 && Mask[0] >= 8)
- if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
- DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
- return Insertion;
-
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
@@ -9811,6 +9867,18 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
+ // If we have a single input to the zero element, insert that into V1 if we
+ // can do so cheaply.
+ int NumElts = VT.getVectorNumElements();
+ int NumV2Elements = std::count_if(Mask.begin(), Mask.end(), [NumElts](int M) {
+ return M >= NumElts;
+ });
+
+ if (NumV2Elements == 1 && Mask[0] >= NumElts)
+ if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
+ DL, VT, V1, V2, Mask, Subtarget, DAG))
+ return Insertion;
+
// There is a really nice hard cut-over between AVX1 and AVX2 that means we can
// check for those subtargets here and avoid much of the subtarget querying in
// the per-vector-type lowering routines. With AVX1 we have essentially *zero*
@@ -11903,7 +11971,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
// Now we have only mask extension
assert(InVT.getVectorElementType() == MVT::i1);
SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
- const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
@@ -11979,7 +12047,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
}
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
- const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
@@ -12750,6 +12818,16 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
return SDValue();
}
+/// If we have at least two divisions that use the same divisor, convert to
+/// multplication by a reciprocal. This may need to be adjusted for a given
+/// CPU if a division's cost is not at least twice the cost of a multiplication.
+/// This is because we still need one division to calculate the reciprocal and
+/// then we need two multiplies by that reciprocal as replacements for the
+/// original divisions.
+bool X86TargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const {
+ return NumUsers > 1;
+}
+
static bool isAllOnes(SDValue V) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
return C && C->isAllOnesValue();
@@ -14427,7 +14505,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
- false,
+ false, false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
@@ -15220,10 +15298,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
- unsigned HintVal;
- if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
- (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
- llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
+ unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
+ assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1");
unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
SDValue Chain = Op.getOperand(0);
SDValue Mask = Op.getOperand(2);
@@ -24175,7 +24251,7 @@ TargetLowering::ConstraintWeight
break;
case 'G':
case 'C':
- if (dyn_cast<ConstantFP>(CallOperandVal)) {
+ if (isa<ConstantFP>(CallOperandVal)) {
weight = CW_Constant;
}
break;
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index dd20ec2..5130c37 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -1072,6 +1072,9 @@ namespace llvm {
/// Use rcp* to speed up fdiv calculations.
SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps) const override;
+
+ /// Reassociate floating point divisions into multiply by reciprocal.
+ bool combineRepeatedFPDivisors(unsigned NumUsers) const override;
};
namespace X86 {
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index 509602f..0959162 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -2971,60 +2971,36 @@ multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
itins, HasBWI, IsCommutable>;
}
-multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
- ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
- PatFrag memop_frag, X86MemOperand x86memop,
- PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
- string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
- let isCommutable = IsCommutable in
- {
- def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V;
- def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, RC:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
- [], itins.rr>, EVEX_4V, EVEX_K;
- def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}} {z}" ,
- "|$dst {${mask}} {z}, $src1, $src2}"),
- [], itins.rr>, EVEX_4V, EVEX_KZ;
- }
+multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
+ SDNode OpNode,X86VectorVTInfo _Src,
+ X86VectorVTInfo _Dst, bit IsCommutable = 0> {
+ defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
+ "$src2, $src1","$src1, $src2",
+ (_Dst.VT (OpNode
+ (_Src.VT _Src.RC:$src1),
+ (_Src.VT _Src.RC:$src2))),
+ "",itins.rr, IsCommutable>,
+ AVX512BIBase, EVEX_4V;
let mayLoad = 1 in {
- def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V;
- def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
- [], itins.rm>, EVEX_4V, EVEX_K;
- def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
- [], itins.rm>, EVEX_4V, EVEX_KZ;
- def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
- ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
- [], itins.rm>, EVEX_4V, EVEX_B;
- def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
- ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
- BrdcstStr, "}"),
- [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
- def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
- ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
- BrdcstStr, "}"),
- [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
+ (bitconvert (_Src.LdFrag addr:$src2)))),
+ "", itins.rm>,
+ AVX512BIBase, EVEX_4V;
+
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
+ OpcodeStr,
+ "${src2}"##_Dst.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_Dst.BroadcastStr,
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bc_v16i32
+ (_Dst.VT (X86VBroadcast
+ (_Dst.ScalarLdFrag addr:$src2)))))),
+ "", itins.rm>,
+ AVX512BIBase, EVEX_4V, EVEX_B;
}
}
@@ -3039,24 +3015,13 @@ defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
-defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
- loadv8i64, i512mem, loadi64, i64mem, "{1to8}",
- SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
- EVEX_CD8<64, CD8VF>, VEX_W;
-
-defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
- loadv8i64, i512mem, loadi64, i64mem, "{1to8}",
- SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
+defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
+ X86pmuldq, v16i32_info, v8i64_info, 1>,
+ T8PD, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
-def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
- (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
-
-def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
-def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMULDQZrr VR512:$src1, VR512:$src2)>;
+defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
+ X86pmuludq, v16i32_info, v8i64_info, 1>,
+ EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax,
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
@@ -3208,7 +3173,7 @@ defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or,
defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
SSE_INTALU_ITINS_P, HasAVX512, 1>;
defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
- SSE_INTALU_ITINS_P, HasAVX512, 1>;
+ SSE_INTALU_ITINS_P, HasAVX512, 0>;
//===----------------------------------------------------------------------===//
// AVX-512 FP arithmetic
@@ -3743,16 +3708,19 @@ multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.ScalarMemOp:$src3),
- OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"), !strconcat("$src2, ${src3}", _.BroadcastStr ),
- (OpNode _.RC:$src1, _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
AVX512FMA3Base, EVEX_B;
}
} // Constraints = "$src1 = $dst"
let Constraints = "$src1 = $dst" in {
// Omitting the parameter OpNode (= null_frag) disables ISel pattern matching.
-multiclass avx512_fma3_round_rrb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
- SDPatternOperator OpNode> {
+multiclass avx512_fma3_round_rrb<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo _,
+ SDPatternOperator OpNode> {
defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
@@ -3772,7 +3740,6 @@ multiclass avx512_fma3p_forms<bits<8> opc213, bits<8> opc231,
SDPatternOperator OpNode> {
defm v213r : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>;
-
defm v231r : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix),
VTI>, EVEX_CD8<VTI.EltSize, CD8VF>;
}
@@ -3794,12 +3761,14 @@ let ExeDomain = SSEPackedSingle in {
let ExeDomain = SSEPackedDouble in {
defm NAME##PDZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
v8f64_info, OpNode>,
- avx512_fma3_round_forms<opc213, OpcodeStr,
- v8f64_info, OpNodeRnd>, EVEX_V512, VEX_W;
+ avx512_fma3_round_forms<opc213, OpcodeStr, v8f64_info,
+ OpNodeRnd>, EVEX_V512, VEX_W;
defm NAME##PDZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
- v4f64x_info, OpNode>, EVEX_V256, VEX_W;
+ v4f64x_info, OpNode>,
+ EVEX_V256, VEX_W;
defm NAME##PDZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
- v2f64x_info, OpNode>, EVEX_V128, VEX_W;
+ v2f64x_info, OpNode>,
+ EVEX_V128, VEX_W;
}
}
@@ -3830,26 +3799,29 @@ multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
} // Constraints = "$src1 = $dst"
-
-multiclass avx512_fma3p_m132_f<bits<8> opc,
- string OpcodeStr,
- SDNode OpNode> {
+multiclass avx512_fma3p_m132_f<bits<8> opc, string OpcodeStr, SDNode OpNode> {
let ExeDomain = SSEPackedSingle in {
defm NAME##PSZ : avx512_fma3p_m132<opc, OpcodeStr##ps,
- OpNode,v16f32_info>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ OpNode,v16f32_info>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
defm NAME##PSZ256 : avx512_fma3p_m132<opc, OpcodeStr##ps,
- OpNode, v8f32x_info>, EVEX_V256, EVEX_CD8<32, CD8VF>;
+ OpNode, v8f32x_info>, EVEX_V256,
+ EVEX_CD8<32, CD8VF>;
defm NAME##PSZ128 : avx512_fma3p_m132<opc, OpcodeStr##ps,
- OpNode, v4f32x_info>, EVEX_V128, EVEX_CD8<32, CD8VF>;
+ OpNode, v4f32x_info>, EVEX_V128,
+ EVEX_CD8<32, CD8VF>;
}
let ExeDomain = SSEPackedDouble in {
defm NAME##PDZ : avx512_fma3p_m132<opc, OpcodeStr##pd,
- OpNode, v8f64_info>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VF>;
+ OpNode, v8f64_info>, EVEX_V512,
+ VEX_W, EVEX_CD8<32, CD8VF>;
defm NAME##PDZ256 : avx512_fma3p_m132<opc, OpcodeStr##pd,
- OpNode, v4f64x_info>, EVEX_V256, VEX_W, EVEX_CD8<32, CD8VF>;
+ OpNode, v4f64x_info>, EVEX_V256,
+ VEX_W, EVEX_CD8<32, CD8VF>;
defm NAME##PDZ128 : avx512_fma3p_m132<opc, OpcodeStr##pd,
- OpNode, v2f64x_info>, EVEX_V128, VEX_W, EVEX_CD8<32, CD8VF>;
+ OpNode, v2f64x_info>, EVEX_V128,
+ VEX_W, EVEX_CD8<32, CD8VF>;
}
}
@@ -3860,7 +3832,6 @@ defm VFMSUBADD132 : avx512_fma3p_m132_f<0x97, "vfmsubadd132", X86Fmsubadd>;
defm VFNMADD132 : avx512_fma3p_m132_f<0x9C, "vfnmadd132", X86Fnmadd>;
defm VFNMSUB132 : avx512_fma3p_m132_f<0x9E, "vfnmsub132", X86Fnmsub>;
-
// Scalar FMA
let Constraints = "$src1 = $dst" in {
multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3883,7 +3854,6 @@ multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(OpVT (OpNode RC:$src2, RC:$src1,
(mem_frag addr:$src3))))]>;
}
-
} // Constraints = "$src1 = $dst"
defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
@@ -3920,6 +3890,7 @@ let hasSideEffects = 0 in {
EVEX_4V;
} // hasSideEffects = 0
}
+
let Predicates = [HasAVX512] in {
defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td
index 78efc4d..5e19ad4 100644
--- a/lib/Target/X86/X86InstrArithmetic.td
+++ b/lib/Target/X86/X86InstrArithmetic.td
@@ -1216,10 +1216,10 @@ def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
let isCompare = 1 in {
let Defs = [EFLAGS] in {
let isCommutable = 1 in {
- def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>;
- def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>;
- def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>;
- def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>;
+ def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat>;
+ def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat>;
+ def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat>;
+ def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat>;
} // isCommutable
def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>;
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index 18bbe5d..45e6d0a 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -1232,7 +1232,11 @@ def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
// with implicit zero-extension instead of a 64-bit and if the immediate has at
// least 32 bits of leading zeros. If in addition the last 32 bits can be
// represented with a sign extension of a 8 bit constant, use that.
+// This can also reduce instruction size by eliminating the need for the REX
+// prefix.
+// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
+let AddedComplexity = 1 in {
def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
(SUBREG_TO_REG
(i64 0),
@@ -1248,8 +1252,13 @@ def : Pat<(and GR64:$src, i64immZExt32:$imm),
(EXTRACT_SUBREG GR64:$src, sub_32bit),
(i32 (GetLo32XForm imm:$imm))),
sub_32bit)>;
+} // AddedComplexity = 1
+// AddedComplexity is needed due to the increased complexity on the
+// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
+// the MOVZX patterns keeps thems together in DAGIsel tables.
+let AddedComplexity = 1 in {
// r & (2^16-1) ==> movz
def : Pat<(and GR32:$src1, 0xffff),
(MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
@@ -1272,6 +1281,7 @@ def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
(MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
sub_32bit)>;
// r & (2^16-1) ==> movz
+let AddedComplexity = 1 in // Give priority over i64immZExt32.
def : Pat<(and GR64:$src, 0xffff),
(SUBREG_TO_REG (i64 0),
(MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
@@ -1290,6 +1300,7 @@ def : Pat<(and GR16:$src1, 0xff),
(EXTRACT_SUBREG (MOVZX32rr8 (i8
(EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
Requires<[In64BitMode]>;
+} // AddedComplexity = 1
// sext_inreg patterns
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 0bdabdf..b75a9f4 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -631,53 +631,53 @@ def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 16;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 16;
return false;
}]>;
def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 32;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 32;
return false;
}]>;
def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 64;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 64;
return false;
}]>;
def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- return (dyn_cast<MaskedLoadSDNode>(N) != 0);
+ return isa<MaskedLoadSDNode>(N);
}]>;
def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 16;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 16;
return false;
}]>;
def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 32;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 32;
return false;
}]>;
def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 64;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 64;
return false;
}]>;
def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- return (dyn_cast<MaskedStoreSDNode>(N) != 0);
+ return isa<MaskedStoreSDNode>(N);
}]>;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 538ec1c..fbfd868 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -559,6 +559,15 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MMX_PABSWrr64, X86::MMX_PABSWrm64, 0 },
{ X86::MMX_PSHUFWri, X86::MMX_PSHUFWmi, 0 },
+ // 3DNow! version of foldable instructions
+ { X86::PF2IDrr, X86::PF2IDrm, 0 },
+ { X86::PF2IWrr, X86::PF2IWrm, 0 },
+ { X86::PFRCPrr, X86::PFRCPrm, 0 },
+ { X86::PFRSQRTrr, X86::PFRSQRTrm, 0 },
+ { X86::PI2FDrr, X86::PI2FDrm, 0 },
+ { X86::PI2FWrr, X86::PI2FWrm, 0 },
+ { X86::PSWAPDrr, X86::PSWAPDrm, 0 },
+
// AVX 128-bit versions of foldable instructions
{ X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 },
{ X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 },
@@ -943,6 +952,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16 },
{ X86::CMPSDrr, X86::CMPSDrm, 0 },
{ X86::CMPSSrr, X86::CMPSSrm, 0 },
+ { X86::CRC32r32r32, X86::CRC32r32m32, 0 },
+ { X86::CRC32r64r64, X86::CRC32r64m64, 0 },
{ X86::DIVPDrr, X86::DIVPDrm, TB_ALIGN_16 },
{ X86::DIVPSrr, X86::DIVPSrm, TB_ALIGN_16 },
{ X86::DIVSDrr, X86::DIVSDrm, 0 },
@@ -1201,6 +1212,25 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MMX_PUNPCKLWDirr, X86::MMX_PUNPCKLWDirm, 0 },
{ X86::MMX_PXORirr, X86::MMX_PXORirm, 0 },
+ // 3DNow! version of foldable instructions
+ { X86::PAVGUSBrr, X86::PAVGUSBrm, 0 },
+ { X86::PFACCrr, X86::PFACCrm, 0 },
+ { X86::PFADDrr, X86::PFADDrm, 0 },
+ { X86::PFCMPEQrr, X86::PFCMPEQrm, 0 },
+ { X86::PFCMPGErr, X86::PFCMPGErm, 0 },
+ { X86::PFCMPGTrr, X86::PFCMPGTrm, 0 },
+ { X86::PFMAXrr, X86::PFMAXrm, 0 },
+ { X86::PFMINrr, X86::PFMINrm, 0 },
+ { X86::PFMULrr, X86::PFMULrm, 0 },
+ { X86::PFNACCrr, X86::PFNACCrm, 0 },
+ { X86::PFPNACCrr, X86::PFPNACCrm, 0 },
+ { X86::PFRCPIT1rr, X86::PFRCPIT1rm, 0 },
+ { X86::PFRCPIT2rr, X86::PFRCPIT2rm, 0 },
+ { X86::PFRSQIT1rr, X86::PFRSQIT1rm, 0 },
+ { X86::PFSUBrr, X86::PFSUBrm, 0 },
+ { X86::PFSUBRrr, X86::PFSUBRrm, 0 },
+ { X86::PMULHRWrr, X86::PMULHRWrm, 0 },
+
// AVX 128-bit versions of foldable instructions
{ X86::VCVTSD2SSrr, X86::VCVTSD2SSrm, 0 },
{ X86::Int_VCVTSD2SSrr, X86::Int_VCVTSD2SSrm, 0 },
@@ -5969,6 +5999,7 @@ static const uint16_t ReplaceableInstrs[][3] = {
{ X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
{ X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
{ X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
+ { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
{ X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
{ X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
{ X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
@@ -5984,6 +6015,7 @@ static const uint16_t ReplaceableInstrs[][3] = {
{ X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
{ X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
{ X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
+ // TODO: Add the AVX versions of MOVLPSmr
{ X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index ccdbf0e..65b155c 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -643,9 +643,6 @@ let Predicates = [UseAVX] in {
// Represent the same patterns above but in the form they appear for
// 256-bit types
- def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
@@ -653,9 +650,6 @@ let Predicates = [UseAVX] in {
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
}
- def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
@@ -793,7 +787,7 @@ let Predicates = [UseSSE2] in {
(MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
- // is during lowering, where it's not possible to recognize the fold cause
+ // is during lowering, where it's not possible to recognize the fold because
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
@@ -3678,13 +3672,30 @@ def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
PS, Requires<[HasSSE2]>;
} // SchedRW = [WriteStore]
+let Predicates = [HasAVX2, NoVLX] in {
+ def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
+ (VMOVNTDQYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
+ (VMOVNTDQYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
+ (VMOVNTDQYmr addr:$dst, VR256:$src)>;
+}
+
let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
- (VMOVNTPSmr addr:$dst, VR128:$src)>;
+ (VMOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
+ (VMOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
+ (VMOVNTDQmr addr:$dst, VR128:$src)>;
}
def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
- (MOVNTPSmr addr:$dst, VR128:$src)>;
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
} // AddedComplexity
@@ -4890,7 +4901,8 @@ let Predicates = [UseAVX] in {
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
(VMOVDI2PDIrr GR32:$src)>;
- // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
+ // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
+ // These instructions also write zeros in the high part of a 256-bit register.
let AddedComplexity = 20 in {
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
(VMOVDI2PDIrm addr:$src)>;
@@ -4898,6 +4910,9 @@ let Predicates = [UseAVX] in {
(VMOVDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
(VMOVDI2PDIrm addr:$src)>;
+ def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
@@ -5016,6 +5031,9 @@ let Predicates = [UseAVX], AddedComplexity = 20 in {
(VMOVZQI2PQIrm addr:$src)>;
def : Pat<(v2i64 (X86vzload addr:$src)),
(VMOVZQI2PQIrm addr:$src)>;
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
}
let Predicates = [UseSSE2], AddedComplexity = 20 in {
@@ -7150,6 +7168,10 @@ let Predicates = [HasAVX2] in {
}
// Patterns
+// FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
+// on targets where they have equal performance. These were changed to use
+// blends because blends have better throughput on SandyBridge and Haswell, but
+// movs[s/d] are 1-2 byte shorter instructions.
let Predicates = [UseAVX] in {
let AddedComplexity = 15 in {
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
@@ -7166,8 +7188,10 @@ let Predicates = [UseAVX] in {
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
(VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
- def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
- (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
+
+ // Move low f64 and clear high bits.
+ def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
+ (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
}
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
@@ -7181,14 +7205,19 @@ let Predicates = [UseAVX] in {
(v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
sub_xmm)>;
- // Move low f64 and clear high bits.
- def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
- (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
-
+ // These will incur an FP/int domain crossing penalty, but it may be the only
+ // way without AVX2. Do not add any complexity because we may be able to match
+ // more optimal patterns defined earlier in this file.
+ def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
+ (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
(VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
}
+// FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
+// on targets where they have equal performance. These were changed to use
+// blends because blends have better throughput on SandyBridge and Haswell, but
+// movs[s/d] are 1-2 byte shorter instructions.
let Predicates = [UseSSE41] in {
// With SSE41 we can use blends for these patterns.
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
@@ -8341,7 +8370,7 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
(VBROADCASTSDYrr VR128:$src)>;
- // Provide aliases for broadcast from the same regitser class that
+ // Provide aliases for broadcast from the same register class that
// automatically does the extract.
def : Pat<(v32i8 (X86VBroadcast (v32i8 VR256:$src))),
(VPBROADCASTBYrr (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src),
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index 42256b2..28a3b7b 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -334,6 +334,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FMUL_RND),
X86_INTRINSIC_DATA(avx512_mask_mul_ps_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
X86ISD::FMUL_RND),
+ X86_INTRINSIC_DATA(avx512_mask_padd_d_512, INTR_TYPE_2OP_MASK, ISD::ADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask_padd_q_512, INTR_TYPE_2OP_MASK, ISD::ADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pand_d_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pand_q_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_128, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_256, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_512, CMP_MASK, X86ISD::PCMPEQM, 0),
@@ -358,6 +362,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512, CMP_MASK, X86ISD::PCMPGTM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmul_dq_512, INTR_TYPE_2OP_MASK,
+ X86ISD::PMULDQ, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_512, INTR_TYPE_2OP_MASK,
+ X86ISD::PMULUDQ, 0),
+ X86_INTRINSIC_DATA(avx512_mask_por_d_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
+ X86_INTRINSIC_DATA(avx512_mask_por_q_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_pslli_d, VSHIFT_MASK, X86ISD::VSHLI, 0),
@@ -376,6 +386,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_psrli_q, VSHIFT_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psub_d_512, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psub_q_512, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pxor_d_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pxor_q_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
X86_INTRINSIC_DATA(avx512_mask_rndscale_sd, INTR_TYPE_SCALAR_MASK_RM,
X86ISD::RNDSCALE, 0),
X86_INTRINSIC_DATA(avx512_mask_rndscale_ss, INTR_TYPE_SCALAR_MASK_RM,
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index ca8fc9c..4bfc7f9 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -193,7 +193,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
DAG.getConstant(Offset, AddrVT)),
Src,
DAG.getConstant(BytesLeft, SizeVT),
- Align, isVolatile, DstPtrInfo.getWithOffset(Offset));
+ Align, isVolatile, false,
+ DstPtrInfo.getWithOffset(Offset));
}
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
@@ -282,7 +283,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
DAG.getNode(ISD::ADD, dl, SrcVT, Src,
DAG.getConstant(Offset, SrcVT)),
DAG.getConstant(BytesLeft, SizeVT),
- Align, isVolatile, AlwaysInline,
+ Align, isVolatile, AlwaysInline, false,
DstPtrInfo.getWithOffset(Offset),
SrcPtrInfo.getWithOffset(Offset)));
}