aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Mips')
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp147
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h82
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp213
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h5
-rw-r--r--lib/Target/Mips/Makefile2
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td82
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp1
-rw-r--r--lib/Target/Mips/MipsCallingConv.td24
-rw-r--r--lib/Target/Mips/MipsCodeEmitter.cpp57
-rw-r--r--lib/Target/Mips/MipsFrameLowering.cpp24
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp446
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h80
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td136
-rw-r--r--lib/Target/Mips/MipsMCInstLower.cpp1
-rw-r--r--lib/Target/Mips/MipsMachineFunction.h11
-rw-r--r--lib/Target/Mips/TargetInfo/LLVMBuild.txt2
19 files changed, 1048 insertions, 276 deletions
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index e81ba6f..53656d4d 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_TARGET_DEFINITIONS Mips.td)
tablegen(LLVM MipsGenRegisterInfo.inc -gen-register-info)
tablegen(LLVM MipsGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM MipsGenCodeEmitter.inc -gen-emitter)
+tablegen(LLVM MipsGenMCCodeEmitter.inc -gen-emitter -mc-emitter)
tablegen(LLVM MipsGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM MipsGenDAGISel.inc -gen-dag-isel)
tablegen(LLVM MipsGenCallingConv.inc -gen-callingconv)
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index 4a815f3..f544d39 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -118,7 +118,10 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
OS << Offset;
}
- if (Kind != MCSymbolRefExpr::VK_None)
+ if ((Kind == MCSymbolRefExpr::VK_Mips_GPOFF_HI) ||
+ (Kind == MCSymbolRefExpr::VK_Mips_GPOFF_LO))
+ OS << ")))";
+ else if (Kind != MCSymbolRefExpr::VK_None)
OS << ')';
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index f190ec4..4f017d0 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -1,5 +1,21 @@
+//===-- MipsASMBackend.cpp - ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MipsAsmBackend and MipsELFObjectWriter classes.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include "MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
@@ -8,7 +24,6 @@
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ELF.h"
@@ -16,7 +31,50 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+
+ // Add/subtract and shift
+ switch (Kind) {
+ default:
+ break;
+ case Mips::fixup_Mips_PC16:
+ // So far we are only using this type for branches.
+ // For branches we start 1 instruction after the branch
+ // so the displacement will be one instruction size less.
+ Value -= 4;
+ // The displacement is then divided by 4 to give us an 18 bit
+ // address range.
+ Value >>= 2;
+ break;
+ case Mips::fixup_Mips_26:
+ // So far we are only using this type for jumps.
+ // The displacement is then divided by 4 to give us an 28 bit
+ // address range.
+ Value >>= 2;
+ break;
+ }
+
+ // Mask off value for placement as an operand
+ switch (Kind) {
+ default:
+ break;
+ case FK_Data_4:
+ Value &= 0xffffffff;
+ break;
+ case Mips::fixup_Mips_26:
+ Value &= 0x03ffffff;
+ break;
+ case Mips::fixup_Mips_LO16:
+ case Mips::fixup_Mips_PC16:
+ Value &= 0x0000ffff;
+ break;
+ }
+
+ return Value;
+}
+
namespace {
+
class MipsELFObjectWriter : public MCELFObjectTargetWriter {
public:
MipsELFObjectWriter(bool is64Bit, Triple::OSType OSType, uint16_t EMachine,
@@ -27,18 +85,75 @@ public:
class MipsAsmBackend : public MCAsmBackend {
public:
- MipsAsmBackend(const Target &T)
- : MCAsmBackend() {}
-
- unsigned getNumFixupKinds() const {
- return 1; //tbd
- }
+ MipsAsmBackend(const Target &T) : MCAsmBackend() {}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
+ unsigned Kind = (unsigned)Fixup.getKind();
+ Value = adjustFixupValue(Kind, Value);
+
+ if (!Value)
+ return; // Doesn't change encoding.
+
+ unsigned Offset = Fixup.getOffset();
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case Mips::fixup_Mips_GOT16: // This will be fixed up at link time
+ break;
+ case FK_Data_4:
+ case Mips::fixup_Mips_26:
+ case Mips::fixup_Mips_LO16:
+ case Mips::fixup_Mips_PC16:
+ // For each byte of the fragment that the fixup touches, mask i
+ // the fixup value. The Value has been "split up" into the appr
+ // bitfields above.
+ for (unsigned i = 0; i != 4; ++i) // FIXME - Need to support 2 and 8 bytes
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ break;
+ }
+ }
+
+ unsigned getNumFixupKinds() const { return Mips::NumTargetFixupKinds; }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] = {
+ // This table *must* be in the order that the fixup_* kinds a
+ // MipsFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_Mips_NONE", 0, 0, 0 },
+ { "fixup_Mips_16", 0, 16, 0 },
+ { "fixup_Mips_32", 0, 32, 0 },
+ { "fixup_Mips_REL32", 0, 32, 0 },
+ { "fixup_Mips_26", 0, 26, 0 },
+ { "fixup_Mips_HI16", 0, 16, 0 },
+ { "fixup_Mips_LO16", 0, 16, 0 },
+ { "fixup_Mips_GPREL16", 0, 16, 0 },
+ { "fixup_Mips_LITERAL", 0, 16, 0 },
+ { "fixup_Mips_GOT16", 0, 16, 0 },
+ { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_CALL16", 0, 16, 0 },
+ { "fixup_Mips_GPREL32", 0, 32, 0 },
+ { "fixup_Mips_SHIFT5", 6, 5, 0 },
+ { "fixup_Mips_SHIFT6", 6, 5, 0 },
+ { "fixup_Mips_64", 0, 64, 0 },
+ { "fixup_Mips_TLSGD", 0, 16, 0 },
+ { "fixup_Mips_GOTTPREL", 0, 16, 0 },
+ { "fixup_Mips_TPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_TPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
}
/// @name Target Relaxation Interfaces
@@ -52,24 +167,24 @@ public:
return false;
}
- /// RelaxInstruction - Relax the instruction in the given fragment to the next
- /// wider instruction.
+ /// RelaxInstruction - Relax the instruction in the given fragment
+ /// to the next wider instruction.
///
- /// \param Inst - The instruction to relax, which may be the same as the
- /// output.
+ /// \param Inst - The instruction to relax, which may be the same
+ /// as the output.
/// \parm Res [output] - On return, the relaxed instruction.
void RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
}
/// @}
- /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
- /// output. If the target cannot generate such a sequence, it should return an
- /// error.
+ /// WriteNopData - Write an (optimal) nop sequence of Count bytes
+ /// to the given output. If the target cannot generate such a sequence,
+ /// it should return an error.
///
/// \return - True on success.
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
- return false;
+ return true;
}
};
@@ -106,7 +221,7 @@ public:
return new MipsELFObjectWriter(false, OSType, ELF::EM_MIPS, false);
}
};
-}
+} // namespace
MCAsmBackend *llvm::createMipsAsmBackend(const Target &T, StringRef TT) {
Triple TheTriple(TT);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index f7a6fa9..cebfde0 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -19,6 +19,88 @@
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
+
+/// MipsII - This namespace holds all of the target specific flags that
+/// instruction info tracks.
+///
+namespace MipsII {
+ /// Target Operand Flag enum.
+ enum TOF {
+ //===------------------------------------------------------------------===//
+ // Mips Specific MachineOperand flags.
+
+ MO_NO_FLAG,
+
+ /// MO_GOT - Represents the offset into the global offset table at which
+ /// the address the relocation entry symbol resides during execution.
+ MO_GOT,
+
+ /// MO_GOT_CALL - Represents the offset into the global offset table at
+ /// which the address of a call site relocation entry symbol resides
+ /// during execution. This is different from the above since this flag
+ /// can only be present in call instructions.
+ MO_GOT_CALL,
+
+ /// MO_GPREL - Represents the offset from the current gp value to be used
+ /// for the relocatable object file being produced.
+ MO_GPREL,
+
+ /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
+ /// address.
+ MO_ABS_HI,
+ MO_ABS_LO,
+
+ /// MO_TLSGD - Represents the offset into the global offset table at which
+ // the module ID and TSL block offset reside during execution (General
+ // Dynamic TLS).
+ MO_TLSGD,
+
+ /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
+ // Exec TLS).
+ MO_GOTTPREL,
+
+ /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
+ // the thread pointer (Local Exec TLS).
+ MO_TPREL_HI,
+ MO_TPREL_LO,
+
+ // N32/64 Flags.
+ MO_GPOFF_HI,
+ MO_GPOFF_LO,
+ MO_GOT_DISP,
+ MO_GOT_PAGE,
+ MO_GOT_OFST
+ };
+
+ enum {
+ //===------------------------------------------------------------------===//
+ // Instruction encodings. These are the standard/most common forms for
+ // Mips instructions.
+ //
+
+ // Pseudo - This represents an instruction that is a pseudo instruction
+ // or one that has not been implemented yet. It is illegal to code generate
+ // it, but tolerated for intermediate implementation stages.
+ Pseudo = 0,
+
+ /// FrmR - This form is for instructions of the format R.
+ FrmR = 1,
+ /// FrmI - This form is for instructions of the format I.
+ FrmI = 2,
+ /// FrmJ - This form is for instructions of the format J.
+ FrmJ = 3,
+ /// FrmFR - This form is for instructions of the format FR.
+ FrmFR = 4,
+ /// FrmFI - This form is for instructions of the format FI.
+ FrmFI = 5,
+ /// FrmOther - This form is for instructions that have no specific format.
+ FrmOther = 6,
+
+ FormMask = 15
+ };
+}
+
+
/// getMipsRegisterNumbering - Given the enum value for some register,
/// return the number that it corresponds to.
inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index d66de23..1115fec 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -12,16 +12,18 @@
//===----------------------------------------------------------------------===//
//
#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
-#include "MCTargetDesc/MipsMCTargetDesc.h"
using namespace llvm;
@@ -31,22 +33,217 @@ class MipsMCCodeEmitter : public MCCodeEmitter {
void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
+ MCContext &Ctx;
public:
MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii), STI(sti) {}
+ MCContext &ctx) : MCII(mcii), STI(sti) , Ctx(ctx) {}
~MipsMCCodeEmitter() {}
- void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ void EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
+ }
+
+ void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
+ // Output the instruction encoding in little endian byte order.
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, OS);
+ Val >>= 8;
+ }
}
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ unsigned getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchJumpOpValue - Return binary encoding of the jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchTargetOpValue - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machin
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
}; // class MipsMCCodeEmitter
} // namespace
MCCodeEmitter *llvm::createMipsMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
- MCContext &Ctx) {
+ MCContext &Ctx)
+{
return new MipsMCCodeEmitter(MCII, STI, Ctx);
}
+
+/// EncodeInstruction - Emit the instruction.
+/// Size the instruction (currently only 4 bytes
+void MipsMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const
+{
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+
+ // Check for unimplemented opcodes.
+ // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0
+ // so we have to special check for them.
+ unsigned Opcode = MI.getOpcode();
+ if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary)
+ llvm_unreachable("unimplemented opcode in EncodeInstruction()");
+
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+
+ // Pseudo instructions don't get encoded and shouldn't be here
+ // in the first place!
+ if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo)
+ llvm_unreachable("Pseudo opcode found in EncodeInstruction()");
+
+ // For now all instructions are 4 bytes
+ int Size = 4; // FIXME: Have Desc.getSize() return the correct value!
+
+ EmitInstruction(Binary, Size, OS);
+}
+
+/// getBranchTargetOpValue - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_PC16)));
+ return 0;
+}
+
+/// getJumpTargetOpValue - Return binary encoding of the jump
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_26)));
+ return 0;
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ unsigned RegNo = getMipsRegisterNumbering(Reg);
+ return RegNo;
+ } else if (MO.isImm()) {
+ return static_cast<unsigned>(MO.getImm());
+ } else if (MO.isFPImm()) {
+ return static_cast<unsigned>(APFloat(MO.getFPImm())
+ .bitcastToAPInt().getHiBits(32).getLimitedValue());
+ } else if (MO.isExpr()) {
+ const MCExpr *Expr = MO.getExpr();
+ MCExpr::ExprKind Kind = Expr->getKind();
+ if (Kind == MCExpr::SymbolRef) {
+ Mips::Fixups FixupKind = Mips::fixup_Mips_NONE;
+ MCSymbolRefExpr::VariantKind SymRefKind =
+ cast<MCSymbolRefExpr>(Expr)->getKind();
+ switch(SymRefKind) {
+ case MCSymbolRefExpr::VK_Mips_GPREL:
+ FixupKind = Mips::fixup_Mips_GPREL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_CALL:
+ FixupKind = Mips::fixup_Mips_CALL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT:
+ FixupKind = Mips::fixup_Mips_GOT16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ FixupKind = Mips::fixup_Mips_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ FixupKind = Mips::fixup_Mips_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSGD:
+ FixupKind = Mips::fixup_Mips_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL:
+ FixupKind = Mips::fixup_Mips_GOTTPREL;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ FixupKind = Mips::fixup_Mips_TPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO:
+ FixupKind = Mips::fixup_Mips_TPREL_LO;
+ break;
+ default:
+ return 0;
+ } // switch
+ Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind)));
+ } // if SymbolRef
+ // All of the information is in the fixup.
+ return 0;
+ }
+ llvm_unreachable("Unable to encode MCOperand!");
+ // Not reached
+ return 0;
+}
+
+/// getMemEncoding - Return binary encoding of memory related operand.
+/// If the offset operand requires relocation, record the relocation.
+unsigned
+MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+
+ return (OffBits & 0xFFFF) | RegBits;
+}
+
+unsigned
+MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // FIXME: implement
+ return 0;
+}
+
+unsigned
+MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // FIXME: implement
+ return 0;
+}
+
+#include "MipsGenMCCodeEmitter.inc"
+
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 1f9e3dd..e6040e4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MipsMCTargetDesc.h"
#include "MipsMCAsmInfo.h"
+#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/MC/MCCodeGenInfo.h"
@@ -140,6 +140,9 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterMCAsmBackend(TheMips64Target, createMipsAsmBackend);
TargetRegistry::RegisterMCAsmBackend(TheMips64elTarget, createMipsAsmBackend);
+ TargetRegistry::RegisterMCCodeEmitter(TheMipsTarget, createMipsMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheMipselTarget, createMipsMCCodeEmitter);
+
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(TheMipsTarget,
createMipsMCSubtargetInfo);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 7a0042a..fc43d2d 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -16,12 +16,14 @@
namespace llvm {
class MCAsmBackend;
-class MCInstrInfo;
class MCCodeEmitter;
class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class StringRef;
class Target;
+class raw_ostream;
extern Target TheMipsTarget;
extern Target TheMipselTarget;
@@ -33,6 +35,7 @@ MCCodeEmitter *createMipsMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createMipsAsmBackend(const Target &T, StringRef TT);
+
} // End llvm namespace
// Defines symbolic names for Mips registers. This defines a mapping from
diff --git a/lib/Target/Mips/Makefile b/lib/Target/Mips/Makefile
index d72693c..94f7c18 100644
--- a/lib/Target/Mips/Makefile
+++ b/lib/Target/Mips/Makefile
@@ -15,7 +15,7 @@ TARGET = Mips
BUILT_SOURCES = MipsGenRegisterInfo.inc MipsGenInstrInfo.inc \
MipsGenAsmWriter.inc MipsGenCodeEmitter.inc \
MipsGenDAGISel.inc MipsGenCallingConv.inc \
- MipsGenSubtargetInfo.inc
+ MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc
DIRS = InstPrinter TargetInfo MCTargetDesc
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index 9a769e8..3c97241 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -51,12 +51,58 @@ class shift_rotate_imm64_32<bits<6> func, bits<5> isRotate, string instr_asm,
shift_rotate_imm<func, isRotate, instr_asm, OpNode, imm32_63, shamt,
CPU64Regs>;
+// Jump and Link (Call)
+let isCall=1, hasDelaySlot=1,
+ // All calls clobber the non-callee saved registers...
+ Defs = [AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ K0, K1, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9], Uses = [GP] in {
+ class JumpLink64<bits<6> op, string instr_asm>:
+ FJ<op, (outs), (ins calltarget64:$target, variable_ops),
+ !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
+ IIBranch>;
+
+ class JumpLinkReg64<bits<6> op, bits<6> func, string instr_asm>:
+ FR<op, func, (outs), (ins CPU64Regs:$rs, variable_ops),
+ !strconcat(instr_asm, "\t$rs"),
+ [(MipsJmpLink CPU64Regs:$rs)], IIBranch> {
+ let rt = 0;
+ let rd = 31;
+ let shamt = 0;
+ }
+
+ class BranchLink64<string instr_asm>:
+ FI<0x1, (outs), (ins CPU64Regs:$rs, brtarget:$imm16, variable_ops),
+ !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch>;
+}
+
// Mul, Div
class Mult64<bits<6> func, string instr_asm, InstrItinClass itin>:
Mult<func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
Div<op, func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
+multiclass Atomic2Ops64<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]>;
+}
+
+multiclass AtomicCmpSwap64<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>,
+ Requires<[IsN64]>;
+}
+
+let usesCustomInserter = 1, Predicates = [HasMips64] in {
+ defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">;
+ defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">;
+ defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64, "load_and_64">;
+ defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64, "load_or_64">;
+ defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64, "load_xor_64">;
+ defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64, "load_nand_64">;
+ defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64, "swap_64">;
+ defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64, "64">;
+}
+
//===----------------------------------------------------------------------===//
// Instruction definition
//===----------------------------------------------------------------------===//
@@ -122,7 +168,15 @@ defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>;
defm ULD : LoadM64<0x37, "uld", load_u, 1>;
defm USD : StoreM64<0x3f, "usd", store_u, 1>;
+/// Load-linked, Store-conditional
+def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>;
+def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]>;
+def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>;
+def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]>;
+
/// Jump and Branch Instructions
+def JAL64 : JumpLink64<0x03, "jal">;
+def JALR64 : JumpLinkReg64<0x00, 0x09, "jalr">;
def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>;
def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>;
def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>;
@@ -145,6 +199,12 @@ def MFLO64 : MoveFromLOHI<0x12, "mflo", CPU64Regs, [LO64]>;
def DCLZ : CountLeading0<0x24, "dclz", CPU64Regs>;
def DCLO : CountLeading1<0x25, "dclo", CPU64Regs>;
+def LEA_ADDiu64 : EffectiveAddress<"addiu\t$rt, $addr", CPU64Regs, mem_ea_64>;
+
+let Uses = [SP_64] in
+def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>,
+ Requires<[IsN64]>;
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -155,11 +215,20 @@ def : Pat<(i64 immSExt16:$in),
def : Pat<(i64 immZExt16:$in),
(ORi64 ZERO_64, imm:$in)>;
-// zextloadi32_u
-def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64_P8 addr:$a), 0), 0)>,
- Requires<[IsN64]>;
-def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64 addr:$a), 0), 0)>,
- Requires<[NotN64]>;
+// Arbitrary immediates
+def : Pat<(i64 imm:$imm),
+ (ORi64 (LUi64 (HI16 imm:$imm)), (LO16 imm:$imm))>;
+
+// extended loads
+let Predicates = [NotN64] in {
+ def : Pat<(extloadi32_a addr:$a), (DSRL32 (DSLL32 (LW64 addr:$a), 0), 0)>;
+ def : Pat<(zextloadi32_u addr:$a), (DSRL32 (DSLL32 (ULW64 addr:$a), 0), 0)>;
+}
+let Predicates = [IsN64] in {
+ def : Pat<(extloadi32_a addr:$a), (DSRL32 (DSLL32 (LW64_P8 addr:$a), 0), 0)>;
+ def : Pat<(zextloadi32_u addr:$a),
+ (DSRL32 (DSLL32 (ULW64_P8 addr:$a), 0), 0)>;
+}
// hi/lo relocs
def : Pat<(i64 (MipsLo tglobaladdr:$in)), (DADDiu ZERO_64, tglobaladdr:$in)>;
@@ -174,6 +243,9 @@ defm : SetgtPats<CPU64Regs, SLT64, SLTu64>;
defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
+// select MipsDynAlloc
+def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>;
+
// truncate
def : Pat<(i32 (trunc CPU64Regs:$src)),
(SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>;
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index d7b7f06..186a5e3 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -20,6 +20,7 @@
#include "MipsMCInstLower.h"
#include "MipsMCSymbolRefExpr.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 0ae4ef6..3d973ce 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -35,8 +35,9 @@ def RetCC_MipsO32 : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsN : CallingConv<[
- // FIXME: Handle byval, complex and float double parameters.
-
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
+
// Promote i8/i16/i32 arguments to i64.
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
@@ -63,6 +64,25 @@ def CC_MipsN : CallingConv<[
CCIfType<[f32], CCAssignToStack<4, 8>>
]>;
+// N32/64 variable arguments.
+// All arguments are passed in integer registers.
+def CC_MipsN_VarArg : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
+
+ // Promote i8/i16/i32 arguments to i64.
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ CCIfType<[i64, f64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64,
+ T0_64, T1_64, T2_64, T3_64]>>,
+
+ CCIfType<[f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
+
+ // All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>,
+ CCIfType<[f32], CCAssignToStack<4, 8>>
+]>;
+
def RetCC_MipsN : CallingConv<[
// FIXME: Handle complex and float double return values.
diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp
index dc4ecd6..a8f29ae 100644
--- a/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -18,18 +18,20 @@
#include "MipsRelocations.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/PassManager.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,8 +39,6 @@
#include <iomanip>
#endif
-#include "llvm/CodeGen/MachineOperand.h"
-
using namespace llvm;
STATISTIC(NumEmitted, "Number of machine instructions emitted");
@@ -66,9 +66,9 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) :
MachineFunctionPass(ID), JTI(0),
- II((const MipsInstrInfo *) tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
+ II((const MipsInstrInfo *) tm.getInstrInfo()),
+ TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
}
bool runOnMachineFunction(MachineFunction &MF);
@@ -91,7 +91,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
/// Routines that handle operands which add machine relocations which are
/// fixed up by the relocation stage.
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const;
+ bool MayNeedFarStub) const;
void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const;
void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const;
void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const;
@@ -105,6 +105,9 @@ class MipsCodeEmitter : public MachineFunctionPass {
unsigned getRelocation(const MachineInstr &MI,
const MachineOperand &MO) const;
+ unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
+
+ unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
@@ -165,23 +168,34 @@ unsigned MipsCodeEmitter::getRelocation(const MachineInstr &MI,
return Mips::reloc_mips_lo;
}
+unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // FIXME: implement
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // FIXME: implement
+ return 0;
+}
+
unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
assert(MI.getOperand(OpNo).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo)) << 16;
- return
- (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
+ return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
}
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
}
unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as pos+size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo-1)) +
getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
@@ -190,7 +204,7 @@ unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) const {
+ const MachineOperand &MO) const {
if (MO.isReg())
return MipsRegisterInfo::getRegisterNumbering(MO.getReg());
else if (MO.isImm())
@@ -217,9 +231,10 @@ unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
}
void MipsCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const {
+ bool MayNeedFarStub) const {
MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- const_cast<GlobalValue *>(GV), 0, MayNeedFarStub));
+ const_cast<GlobalValue *>(GV), 0,
+ MayNeedFarStub));
}
void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV,
@@ -248,7 +263,7 @@ emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const {
}
void MipsCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
- unsigned Reloc) const {
+ unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
Reloc, BB));
}
@@ -395,7 +410,7 @@ void MipsCodeEmitter::emitWordLE(unsigned Word) {
/// createMipsJITCodeEmitterPass - Return a pass that emits the collected Mips
/// code to the specified MCE object.
FunctionPass *llvm::createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
- JITCodeEmitter &JCE) {
+ JITCodeEmitter &JCE) {
return new MipsCodeEmitter(TM, JCE);
}
diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp
index 71f3116..19bb1a5 100644
--- a/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/lib/Target/Mips/MipsFrameLowering.cpp
@@ -14,6 +14,7 @@
#include "MipsFrameLowering.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -149,6 +150,11 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned NewReg = 0;
int NewImm = 0;
bool ATUsed;
+ unsigned GP = STI.isABI_N64() ? Mips::GP_64 : Mips::GP;
+ unsigned T9 = STI.isABI_N64() ? Mips::T9_64 : Mips::T9;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+ unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
// First, compute final stack size.
unsigned RegSize = STI.isGP32bit() ? 4 : 8;
@@ -157,7 +163,6 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
(MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) :
MipsFI->getMaxCallFrameSize();
unsigned StackSize = AlignOffset(LocalVarAreaOffset, StackAlign) +
- AlignOffset(MipsFI->getRegSaveAreaSize(), StackAlign) +
AlignOffset(MFI->getStackSize(), StackAlign);
// Update stack size
@@ -165,10 +170,25 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
- // TODO: check need from GP here.
+ // Emit instructions that set $gp using the the value of $t9.
+ // O32 uses the directive .cpload while N32/64 requires three instructions to
+ // do this.
+ // TODO: Do not emit these instructions if no instructions use $gp.
if (isPIC && STI.isABI_O32())
BuildMI(MBB, MBBI, dl, TII.get(Mips::CPLOAD))
.addReg(RegInfo->getPICCallReg());
+ else if (STI.isABI_N64() || (isPIC && STI.isABI_N32())) {
+ // lui $28,%hi(%neg(%gp_rel(fname)))
+ // addu $28,$28,$25
+ // addiu $28,$28,%lo(%neg(%gp_rel(fname)))
+ const GlobalValue *FName = MF.getFunction();
+ BuildMI(MBB, MBBI, dl, TII.get(LUi), GP)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), GP).addReg(GP).addReg(T9);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDiu), GP).addReg(GP)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
+ }
+
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
// No need to allocate space on the stack.
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 50aa78f..b595f03 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -24,6 +24,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/CallingConv.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -395,7 +396,8 @@ static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMadd(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMadd(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -407,7 +409,8 @@ static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMsub(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMsub(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -794,60 +797,108 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
assert(false && "Unexpected instr type to insert");
return NULL;
case Mips::ATOMIC_LOAD_ADD_I8:
+ case Mips::ATOMIC_LOAD_ADD_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
+ case Mips::ATOMIC_LOAD_ADD_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
+ case Mips::ATOMIC_LOAD_ADD_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::ADDu);
+ case Mips::ATOMIC_LOAD_ADD_I64:
+ case Mips::ATOMIC_LOAD_ADD_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
+ case Mips::ATOMIC_LOAD_AND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
+ case Mips::ATOMIC_LOAD_AND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
+ case Mips::ATOMIC_LOAD_AND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::AND);
+ case Mips::ATOMIC_LOAD_AND_I64:
+ case Mips::ATOMIC_LOAD_AND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
+ case Mips::ATOMIC_LOAD_OR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
+ case Mips::ATOMIC_LOAD_OR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
+ case Mips::ATOMIC_LOAD_OR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::OR);
+ case Mips::ATOMIC_LOAD_OR_I64:
+ case Mips::ATOMIC_LOAD_OR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
+ case Mips::ATOMIC_LOAD_XOR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
+ case Mips::ATOMIC_LOAD_XOR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
+ case Mips::ATOMIC_LOAD_XOR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::XOR);
+ case Mips::ATOMIC_LOAD_XOR_I64:
+ case Mips::ATOMIC_LOAD_XOR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
+ case Mips::ATOMIC_LOAD_NAND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
+ case Mips::ATOMIC_LOAD_NAND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
+ case Mips::ATOMIC_LOAD_NAND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0, true);
+ case Mips::ATOMIC_LOAD_NAND_I64:
+ case Mips::ATOMIC_LOAD_NAND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
+ case Mips::ATOMIC_LOAD_SUB_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
+ case Mips::ATOMIC_LOAD_SUB_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
+ case Mips::ATOMIC_LOAD_SUB_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::SUBu);
+ case Mips::ATOMIC_LOAD_SUB_I64:
+ case Mips::ATOMIC_LOAD_SUB_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
+ case Mips::ATOMIC_SWAP_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
+ case Mips::ATOMIC_SWAP_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
+ case Mips::ATOMIC_SWAP_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0);
+ case Mips::ATOMIC_SWAP_I64:
+ case Mips::ATOMIC_SWAP_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
+ case Mips::ATOMIC_CMP_SWAP_I8_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
+ case Mips::ATOMIC_CMP_SWAP_I16_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
+ case Mips::ATOMIC_CMP_SWAP_I32_P8:
return EmitAtomicCmpSwap(MI, BB, 4);
+ case Mips::ATOMIC_CMP_SWAP_I64:
+ case Mips::ATOMIC_CMP_SWAP_I64_P8:
+ return EmitAtomicCmpSwap(MI, BB, 8);
}
}
@@ -857,13 +908,31 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode,
bool Nand) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicBinary.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, AND, NOR, ZERO, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ AND = Mips::AND;
+ NOR = Mips::NOR;
+ ZERO = Mips::ZERO;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ AND = Mips::AND64;
+ NOR = Mips::NOR64;
+ ZERO = Mips::ZERO_64;
+ BEQ = Mips::BEQ64;
+ }
unsigned OldVal = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -901,23 +970,20 @@ MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// sc success, storeval, 0(ptr)
// beq success, $0, loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
if (Nand) {
// and andres, oldval, incr
// nor storeval, $0, andres
- BuildMI(BB, dl, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr);
- BuildMI(BB, dl, TII->get(Mips::NOR), StoreVal)
- .addReg(Mips::ZERO).addReg(AndRes);
+ BuildMI(BB, dl, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr);
+ BuildMI(BB, dl, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes);
} else if (BinOpcode) {
// <binop> storeval, oldval, incr
BuildMI(BB, dl, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr);
} else {
StoreVal = Incr;
}
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
- .addReg(StoreVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
+ BuildMI(BB, dl, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -937,6 +1003,8 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1028,7 +1096,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1051,7 +1119,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1082,13 +1150,29 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned Size) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicCmpSwap.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, ZERO, BNE, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ ZERO = Mips::ZERO;
+ BNE = Mips::BNE;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ ZERO = Mips::ZERO_64;
+ BNE = Mips::BNE64;
+ BEQ = Mips::BEQ64;
+ }
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1127,18 +1211,18 @@ MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
// ll dest, 0(ptr)
// bne dest, oldval, exitMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), Dest).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BNE))
+ BuildMI(BB, dl, TII->get(LL), Dest).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BNE))
.addReg(Dest).addReg(OldVal).addMBB(exitMBB);
// loop2MBB:
// sc success, newval, 0(ptr)
// beq success, $0, loop1MBB
BB = loop2MBB;
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(NewVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
+ BuildMI(BB, dl, TII->get(BEQ))
+ .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -1157,6 +1241,8 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1247,7 +1333,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, dl, TII->get(Mips::BNE))
@@ -1263,7 +1349,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1295,6 +1381,7 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
{
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP;
assert(getTargetMachine().getFrameLowering()->getStackAlignment() >=
cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() &&
@@ -1306,20 +1393,19 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
// Get a reference from Mips stack pointer
- SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32);
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy());
// Subtract the dynamic size from the actual stack size to
// obtain the new stack size.
- SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size);
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
- Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub,
- SDValue());
+ Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue());
// This node always has two return values: a new stack pointer
// value and a chain
- SDVTList VTLs = DAG.getVTList(MVT::i32, MVT::Other);
+ SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy());
SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) };
@@ -1658,7 +1744,8 @@ LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Mips::FP, VT);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ IsN64 ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
@@ -1685,8 +1772,6 @@ SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
-#include "MipsGenCallingConv.inc"
-
//===----------------------------------------------------------------------===//
// TODO: Implement a generic logic using tblgen that can support this.
// Mips O32 ABI rules:
@@ -1793,6 +1878,70 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
return false; // CC must always match
}
+static const unsigned Mips64IntRegs[8] =
+ {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
+ Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
+static const unsigned Mips64DPRegs[8] =
+ {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
+ Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64};
+
+static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8);
+ unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8;
+ unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8);
+
+ assert(Align <= 16 && "Cannot handle alignments larger than 16.");
+
+ // If byval is 16-byte aligned, the first arg register must be even.
+ if ((Align == 16) && (FirstIdx % 2)) {
+ State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
+ ++FirstIdx;
+ }
+
+ // Mark the registers allocated.
+ for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I)
+ State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]);
+
+ // Allocate space on caller's stack.
+ unsigned Offset = State.AllocateStack(Size, Align);
+
+ if (FirstIdx < 8)
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
+ LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+ return true;
+}
+
+#include "MipsGenCallingConv.inc"
+
+static void
+AnalyzeMips64CallOperands(CCState CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ unsigned NumOps = Outs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ bool R;
+
+ if (Outs[i].IsFixed)
+ R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+ else
+ R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+
+ if (R) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -1901,6 +2050,90 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
MachinePointerInfo(0), MachinePointerInfo(0));
}
+// Copy Mips64 byVal arg to registers and stack.
+void static
+PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
+ SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass,
+ SmallVector<SDValue, 8>& MemOpChains, int& LastFI,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ EVT PtrTy, bool isLittle) {
+ unsigned ByValSize = Flags.getByValSize();
+ unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
+ bool IsRegLoc = VA.isRegLoc();
+ unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
+ unsigned LocMemOffset = 0;
+
+ if (!IsRegLoc)
+ LocMemOffset = VA.getLocMemOffset();
+ else {
+ const unsigned *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8,
+ VA.getLocReg());
+ const unsigned *RegEnd = Mips64IntRegs + 8;
+
+ // Copy double words to registers.
+ for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) {
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr,
+ MachinePointerInfo(), false, false, false,
+ Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+ RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
+ }
+
+ // If there is an argument register available, copy the remainder of the
+ // byval argument with sub-doubleword loads and shifts.
+ if ((Reg != RegEnd) && (ByValSize != Offset)) {
+ assert((ByValSize < Offset + 8) &&
+ "Size of the remainder should be smaller than 8-byte.");
+ SDValue Val;
+ for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) {
+ unsigned RemSize = ByValSize - Offset;
+
+ if (RemSize < LoadSize)
+ continue;
+
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal =
+ DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
+ MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
+ false, false, Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+
+ // Offset in number of bits from double word boundary.
+ unsigned OffsetDW = (Offset % 8) * 8;
+ unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
+ SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
+ DAG.getConstant(Shamt, MVT::i32));
+
+ Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
+ Shift;
+ Offset += LoadSize;
+ Alignment = std::min(Alignment, LoadSize);
+ }
+
+ RegsToPass.push_back(std::make_pair(*Reg, Val));
+ return;
+ }
+ }
+
+ unsigned MemCpySize = ByValSize - Offset;
+ if (MemCpySize) {
+ // Create a fixed object on stack at offset LocMemOffset and copy
+ // remainder of byval arg to it with memcpy.
+ SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true);
+ SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy);
+ ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src,
+ DAG.getConstant(MemCpySize, PtrTy), Alignment,
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+ }
+}
+
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isTailCall.
@@ -1929,6 +2162,8 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
if (IsO32)
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
+ else if (HasMips64)
+ AnalyzeMips64CallOperands(CCInfo, Outs);
else
CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
@@ -1987,6 +2222,22 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
SDValue Arg = OutVals[i];
CCValAssign &VA = ArgLocs[i];
MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // ByVal Arg.
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32)
+ WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ else
+ PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ continue;
+ }
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -2032,18 +2283,6 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Register can't get to this point...
assert(VA.isMemLoc());
- // ByVal Arg.
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- if (Flags.isByVal()) {
- assert(IsO32 &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, MFI,
- DAG, Arg, VA, Flags, getPointerTy(), Subtarget->isLittle());
- continue;
- }
-
// Create the frame index object for this incoming parameter
LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
@@ -2232,6 +2471,45 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
}
}
+// Create frame object on stack and copy registers used for byval passing to it.
+static unsigned
+CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
+ std::vector<SDValue>& OutChains, SelectionDAG &DAG,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ MachineFrameInfo *MFI, bool IsRegLoc,
+ SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
+ EVT PtrTy) {
+ const unsigned *Reg = Mips64IntRegs + 8;
+ int FOOffset; // Frame object offset from virtual frame pointer.
+
+ if (IsRegLoc) {
+ Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg());
+ FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8;
+ }
+ else
+ FOOffset = VA.getLocMemOffset();
+
+ // Create frame object.
+ unsigned NumRegs = (Flags.getByValSize() + 7) / 8;
+ unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy);
+ InVals.push_back(FIN);
+
+ // Copy arg registers.
+ for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
+ ++Reg, ++I) {
+ unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass);
+ SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
+ DAG.getConstant(I * 8, PtrTy));
+ SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
+ StorePtr, MachinePointerInfo(), false,
+ false, 0);
+ OutChains.push_back(Store);
+ }
+
+ return LastFI;
+}
+
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
@@ -2267,9 +2545,28 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT ValVT = VA.getValVT();
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ bool IsRegLoc = VA.isRegLoc();
+
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32) {
+ unsigned NumWords = (Flags.getByValSize() + 3) / 4;
+ LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
+ true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
+ InVals.push_back(FIN);
+ ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags);
+ } else // N32/64
+ LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags,
+ MFI, IsRegLoc, InVals, MipsFI,
+ getPointerTy());
+ continue;
+ }
// Arguments stored on registers
- if (VA.isRegLoc()) {
+ if (IsRegLoc) {
EVT RegVT = VA.getLocVT();
unsigned ArgReg = VA.getLocReg();
TargetRegisterClass *RC = 0;
@@ -2325,23 +2622,6 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// sanity check
assert(VA.isMemLoc());
- ISD::ArgFlagsTy Flags = Ins[i].Flags;
-
- if (Flags.isByVal()) {
- assert(IsO32 &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- unsigned NumWords = (Flags.getByValSize() + 3) / 4;
- LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
- true);
- SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
- InVals.push_back(FIN);
- ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags);
-
- continue;
- }
-
// The stack pointer offset is relative to the caller stack frame.
LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
@@ -2367,24 +2647,40 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
- if (isVarArg && IsO32) {
+ if (isVarArg) {
+ unsigned NumOfRegs = IsO32 ? 4 : 8;
+ const unsigned *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
+ int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
+ TargetRegisterClass *RC
+ = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass;
+ unsigned RegSize = RC->getSize();
+ int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
+
+ // Offset of the first variable argument from stack pointer.
+ int FirstVaArgOffset;
+
+ if (IsO32 || (Idx == NumOfRegs)) {
+ FirstVaArgOffset =
+ (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize;
+ } else
+ FirstVaArgOffset = RegSlotOffset;
+
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
- unsigned NextStackOffset = CCInfo.getNextStackOffset();
- assert(NextStackOffset % 4 == 0 &&
- "NextStackOffset must be aligned to 4-byte boundaries.");
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true);
MipsFI->setVarArgsFrameIndex(LastFI);
- // If NextStackOffset is smaller than o32's 16-byte reserved argument area,
- // copy the integer registers that have not been used for argument passing
- // to the caller's stack frame.
- for (; NextStackOffset < 16; NextStackOffset += 4) {
- TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
- unsigned Idx = NextStackOffset / 4;
- unsigned Reg = AddLiveIn(DAG.getMachineFunction(), O32IntRegs[Idx], RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, MVT::i32);
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ // Copy the integer registers that have not been used for argument passing
+ // to the argument register save area. For O32, the save area is allocated
+ // in the caller's stack frame, while for N32/64, it is allocated in the
+ // callee's stack frame.
+ for (int StackOffset = RegSlotOffset;
+ Idx < NumOfRegs; ++Idx, StackOffset += RegSize) {
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
+ MVT::getIntegerVT(RegSize * 8));
+ LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true);
SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
MachinePointerInfo(),
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 271d248..8fa3052 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -30,86 +30,6 @@ namespace Mips {
unsigned GetOppositeBranchOpc(unsigned Opc);
}
-/// MipsII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace MipsII {
- /// Target Operand Flag enum.
- enum TOF {
- //===------------------------------------------------------------------===//
- // Mips Specific MachineOperand flags.
-
- MO_NO_FLAG,
-
- /// MO_GOT - Represents the offset into the global offset table at which
- /// the address the relocation entry symbol resides during execution.
- MO_GOT,
-
- /// MO_GOT_CALL - Represents the offset into the global offset table at
- /// which the address of a call site relocation entry symbol resides
- /// during execution. This is different from the above since this flag
- /// can only be present in call instructions.
- MO_GOT_CALL,
-
- /// MO_GPREL - Represents the offset from the current gp value to be used
- /// for the relocatable object file being produced.
- MO_GPREL,
-
- /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
- /// address.
- MO_ABS_HI,
- MO_ABS_LO,
-
- /// MO_TLSGD - Represents the offset into the global offset table at which
- // the module ID and TSL block offset reside during execution (General
- // Dynamic TLS).
- MO_TLSGD,
-
- /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
- // Exec TLS).
- MO_GOTTPREL,
-
- /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
- // the thread pointer (Local Exec TLS).
- MO_TPREL_HI,
- MO_TPREL_LO,
-
- // N32/64 Flags.
- MO_GPOFF_HI,
- MO_GPOFF_LO,
- MO_GOT_DISP,
- MO_GOT_PAGE,
- MO_GOT_OFST
- };
-
- enum {
- //===------------------------------------------------------------------===//
- // Instruction encodings. These are the standard/most common forms for
- // Mips instructions.
- //
-
- // Pseudo - This represents an instruction that is a pseudo instruction
- // or one that has not been implemented yet. It is illegal to code generate
- // it, but tolerated for intermediate implementation stages.
- Pseudo = 0,
-
- /// FrmR - This form is for instructions of the format R.
- FrmR = 1,
- /// FrmI - This form is for instructions of the format I.
- FrmI = 2,
- /// FrmJ - This form is for instructions of the format J.
- FrmJ = 3,
- /// FrmFR - This form is for instructions of the format FR.
- FrmFR = 4,
- /// FrmFI - This form is for instructions of the format FI.
- FrmFI = 5,
- /// FrmOther - This form is for instructions that have no specific format.
- FrmOther = 6,
-
- FormMask = 15
- };
-}
-
class MipsInstrInfo : public MipsGenInstrInfo {
MipsTargetMachine &TM;
bool IsN64;
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 1cc3841..5dca9b6 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -39,8 +39,8 @@ def SDT_MipsDivRem : SDTypeProfile<0, 2,
def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
-def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
- SDTCisVT<1, iPTR>]>;
+def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>,
+ SDTCisSameAs<0, 1>]>;
def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
@@ -138,8 +138,15 @@ def NotN64 : Predicate<"!Subtarget.isABI_N64()">;
//===----------------------------------------------------------------------===//
// Instruction operand types
-def brtarget : Operand<OtherVT>;
+def jmptarget : Operand<OtherVT> {
+ let EncoderMethod = "getJumpTargetOpValue";
+}
+def brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+ let OperandType = "OPERAND_PCREL";
+}
def calltarget : Operand<i32>;
+def calltarget64: Operand<i64>;
def simm16 : Operand<i32>;
def simm16_64 : Operand<i64>;
def shamt : Operand<i32>;
@@ -167,6 +174,12 @@ def mem_ea : Operand<i32> {
let EncoderMethod = "getMemEncoding";
}
+def mem_ea_64 : Operand<i64> {
+ let PrintMethod = "printMemOperandEA";
+ let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let EncoderMethod = "getMemEncoding";
+}
+
// size operand of ext instruction
def size_ext : Operand<i32> {
let EncoderMethod = "getSizeExtEncoding";
@@ -442,7 +455,7 @@ class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
// Unconditional branch
let isBranch=1, isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
class JumpFJ<bits<6> op, string instr_asm>:
- FJ<op, (outs), (ins brtarget:$target),
+ FJ<op, (outs), (ins jmptarget:$target),
!strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch>;
let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1 in
@@ -525,9 +538,9 @@ class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
let Defs = DefRegs;
}
-class EffectiveAddress<string instr_asm> :
- FMem<0x09, (outs CPURegs:$rt), (ins mem_ea:$addr),
- instr_asm, [(set CPURegs:$rt, addr:$addr)], IIAlu>;
+class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
+ FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
+ instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
// Count Leading Ones/Zeros in Word
class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
@@ -587,20 +600,41 @@ class ExtIns<bits<6> _funct, string instr_asm, dag outs, dag ins,
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
-class Atomic2Ops<PatFrag Op, string Opstr> :
- MipsPseudo<(outs CPURegs:$dst), (ins CPURegs:$ptr, CPURegs:$incr),
+class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
!strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$incr))]>;
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
+
+multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
// Atomic Compare & Swap.
-class AtomicCmpSwap<PatFrag Op, string Width> :
- MipsPseudo<(outs CPURegs:$dst),
- (ins CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap),
- !strconcat("atomic_cmp_swap_", Width,
- "\t$dst, $ptr, $cmp, $swap"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap))]>;
+class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
+ !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
+
+multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
+
+class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$rt), (ins Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IILoad> {
+ let mayLoad = 1;
+}
+
+class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IIStore> {
+ let mayStore = 1;
+ let Constraints = "$rt = $dst";
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -636,32 +670,32 @@ def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$picreg), ".cpload\t$picreg", []>;
def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc), ".cprestore\t$loc", []>;
let usesCustomInserter = 1 in {
- def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, "load_add_8">;
- def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, "load_add_16">;
- def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, "load_add_32">;
- def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, "load_sub_8">;
- def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, "load_sub_16">;
- def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, "load_sub_32">;
- def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, "load_and_8">;
- def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, "load_and_16">;
- def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, "load_and_32">;
- def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, "load_or_8">;
- def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, "load_or_16">;
- def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, "load_or_32">;
- def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, "load_xor_8">;
- def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, "load_xor_16">;
- def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, "load_xor_32">;
- def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, "load_nand_8">;
- def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, "load_nand_16">;
- def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, "load_nand_32">;
-
- def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, "swap_8">;
- def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, "swap_16">;
- def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, "swap_32">;
-
- def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, "8">;
- def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, "16">;
- def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, "32">;
+ defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
+ defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">;
+ defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">;
+ defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">;
+ defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">;
+ defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">;
+ defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">;
+ defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">;
+ defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">;
+ defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">;
+ defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">;
+ defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">;
+ defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">;
+ defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">;
+ defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">;
+ defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">;
+ defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">;
+ defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">;
+
+ defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">;
+ defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">;
+ defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">;
+
+ defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">;
+ defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">;
+ defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">;
}
//===----------------------------------------------------------------------===//
@@ -738,12 +772,10 @@ def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
}
/// Load-linked, Store-conditional
-let mayLoad = 1 in
- def LL : FMem<0x30, (outs CPURegs:$rt), (ins mem:$addr),
- "ll\t$rt, $addr", [], IILoad>;
-let mayStore = 1, Constraints = "$rt = $dst" in
- def SC : FMem<0x38, (outs CPURegs:$dst), (ins CPURegs:$rt, mem:$addr),
- "sc\t$rt, $addr", [], IIStore>;
+def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
+def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>;
+def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
+def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>;
/// Jump and Branch Instructions
def J : JumpFJ<0x02, "j">;
@@ -798,13 +830,13 @@ let addr=0 in
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr">;
+def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// DynAlloc node points to dynamically allocated stack space.
// $sp is added to the list of implicitly used registers to prevent dead code
// elimination from removing instructions that modify $sp.
let Uses = [SP] in
-def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr">;
+def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// MADD*/MSUB*
def MADD : MArithR<0, "madd", MipsMAdd, 1>;
diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp
index 6c0e4f6..1fab52c 100644
--- a/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/lib/Target/Mips/MipsMCInstLower.cpp
@@ -15,6 +15,7 @@
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
#include "MipsMCInstLower.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h
index be27606..bc30b6b 100644
--- a/lib/Target/Mips/MipsMachineFunction.h
+++ b/lib/Target/Mips/MipsMachineFunction.h
@@ -51,16 +51,12 @@ private:
mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
unsigned MaxCallFrameSize;
- // Size of area on callee's stack frame which is used to save va_arg or
- // byval arguments passed in registers.
- unsigned RegSaveAreaSize;
-
public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0),
VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)),
OutArgFIRange(std::make_pair(-1, 0)), GPFI(0), DynAllocFI(0),
- MaxCallFrameSize(0), RegSaveAreaSize(0)
+ MaxCallFrameSize(0)
{}
bool isInArgFI(int FI) const {
@@ -104,11 +100,6 @@ public:
unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
-
- unsigned getRegSaveAreaSize() const { return RegSaveAreaSize; }
- void setRegSaveAreaSize(unsigned S) {
- if (RegSaveAreaSize < S) RegSaveAreaSize = S;
- }
};
} // end of namespace llvm
diff --git a/lib/Target/Mips/TargetInfo/LLVMBuild.txt b/lib/Target/Mips/TargetInfo/LLVMBuild.txt
index e8035af..90ae260 100644
--- a/lib/Target/Mips/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/Mips/TargetInfo/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = MipsInfo
parent = Mips
-required_libraries = MC Support
+required_libraries = MC Support Target
add_to_library_groups = Mips