aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/Sparc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Sparc')
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h62
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp30
-rw-r--r--lib/Target/Sparc/SparcAsmPrinter.cpp50
-rw-r--r--lib/Target/Sparc/SparcCallingConv.td101
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.cpp52
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.h8
-rw-r--r--lib/Target/Sparc/SparcISelDAGToDAG.cpp10
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp813
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h38
-rw-r--r--lib/Target/Sparc/SparcInstr64Bit.td333
-rw-r--r--lib/Target/Sparc/SparcInstrFormats.td37
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.td198
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.cpp11
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.h3
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.td14
-rw-r--r--lib/Target/Sparc/SparcSubtarget.h6
16 files changed, 1496 insertions, 270 deletions
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
new file mode 100644
index 0000000..aac0e8d
--- /dev/null
+++ b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
@@ -0,0 +1,62 @@
+//===-- SparcBaseInfo.h - Top level definitions for Sparc ---- --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone helper functions and enum definitions
+// for the Sparc target useful for the compiler back-end and the MC libraries.
+// As such, it deliberately does not include references to LLVM core code gen
+// types, passes, etc..
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SPARCBASEINFO_H
+#define SPARCBASEINFO_H
+
+namespace llvm {
+
+/// SPII - This namespace holds target specific flags for instruction info.
+namespace SPII {
+
+/// Target Operand Flags. Sparc specific TargetFlags for MachineOperands and
+/// SDNodes.
+enum TOF {
+ MO_NO_FLAG,
+
+ // Extract the low 10 bits of an address.
+ // Assembler: %lo(addr)
+ MO_LO,
+
+ // Extract bits 31-10 of an address. Only for sethi.
+ // Assembler: %hi(addr) or %lm(addr)
+ MO_HI,
+
+ // Extract bits 43-22 of an adress. Only for sethi.
+ // Assembler: %h44(addr)
+ MO_H44,
+
+ // Extract bits 21-12 of an address.
+ // Assembler: %m44(addr)
+ MO_M44,
+
+ // Extract bits 11-0 of an address.
+ // Assembler: %l44(addr)
+ MO_L44,
+
+ // Extract bits 63-42 of an address. Only for sethi.
+ // Assembler: %hh(addr)
+ MO_HH,
+
+ // Extract bits 41-32 of an address.
+ // Assembler: %hm(addr)
+ MO_HM
+};
+
+} // end namespace SPII
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
index 7fdb0c3..1c64e1b 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -50,14 +50,42 @@ static MCSubtargetInfo *createSparcMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
+// Code models. Some only make sense for 64-bit code.
+//
+// SunCC Reloc CodeModel Constraints
+// abs32 Static Small text+data+bss linked below 2^32 bytes
+// abs44 Static Medium text+data+bss linked below 2^44 bytes
+// abs64 Static Large text smaller than 2^31 bytes
+// pic13 PIC_ Small GOT < 2^13 bytes
+// pic32 PIC_ Medium GOT < 2^32 bytes
+//
+// All code models require that the text segment is smaller than 2GB.
+
static MCCodeGenInfo *createSparcMCCodeGenInfo(StringRef TT, Reloc::Model RM,
CodeModel::Model CM,
CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
+
+ // The default 32-bit code model is abs32/pic32.
+ if (CM == CodeModel::Default)
+ CM = RM == Reloc::PIC_ ? CodeModel::Medium : CodeModel::Small;
+
X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
+static MCCodeGenInfo *createSparcV9MCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ MCCodeGenInfo *X = new MCCodeGenInfo();
+
+ // The default 64-bit code model is abs44/pic32.
+ if (CM == CodeModel::Default)
+ CM = CodeModel::Medium;
+
+ X->InitMCCodeGenInfo(RM, CM, OL);
+ return X;
+}
extern "C" void LLVMInitializeSparcTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfo<SparcELFMCAsmInfo> X(TheSparcTarget);
@@ -67,7 +95,7 @@ extern "C" void LLVMInitializeSparcTargetMC() {
TargetRegistry::RegisterMCCodeGenInfo(TheSparcTarget,
createSparcMCCodeGenInfo);
TargetRegistry::RegisterMCCodeGenInfo(TheSparcV9Target,
- createSparcMCCodeGenInfo);
+ createSparcV9MCCodeGenInfo);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(TheSparcTarget, createSparcMCInstrInfo);
diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp
index e14b3cb..108eb90 100644
--- a/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -16,6 +16,7 @@
#include "Sparc.h"
#include "SparcInstrInfo.h"
#include "SparcTargetMachine.h"
+#include "MCTargetDesc/SparcBaseInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -72,15 +73,39 @@ namespace {
void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand (opNum);
- bool CloseParen = false;
- if (MI->getOpcode() == SP::SETHIi && !MO.isReg() && !MO.isImm()) {
- O << "%hi(";
- CloseParen = true;
- } else if ((MI->getOpcode() == SP::ORri || MI->getOpcode() == SP::ADDri) &&
- !MO.isReg() && !MO.isImm()) {
- O << "%lo(";
- CloseParen = true;
+ unsigned TF = MO.getTargetFlags();
+#ifndef NDEBUG
+ // Verify the target flags.
+ if (MO.isGlobal() || MO.isSymbol() || MO.isCPI()) {
+ if (MI->getOpcode() == SP::CALL)
+ assert(TF == SPII::MO_NO_FLAG &&
+ "Cannot handle target flags on call address");
+ else if (MI->getOpcode() == SP::SETHIi)
+ assert((TF == SPII::MO_HI || TF == SPII::MO_H44 || TF == SPII::MO_HH) &&
+ "Invalid target flags for address operand on sethi");
+ else
+ assert((TF == SPII::MO_LO || TF == SPII::MO_M44 || TF == SPII::MO_L44 ||
+ TF == SPII::MO_HM) &&
+ "Invalid target flags for small address operand");
}
+#endif
+
+ bool CloseParen = true;
+ switch (TF) {
+ default:
+ llvm_unreachable("Unknown target flags on operand");
+ case SPII::MO_NO_FLAG:
+ CloseParen = false;
+ break;
+ case SPII::MO_LO: O << "%lo("; break;
+ case SPII::MO_HI: O << "%hi("; break;
+ case SPII::MO_H44: O << "%h44("; break;
+ case SPII::MO_M44: O << "%m44("; break;
+ case SPII::MO_L44: O << "%l44("; break;
+ case SPII::MO_HH: O << "%hh("; break;
+ case SPII::MO_HM: O << "%hm("; break;
+ }
+
switch (MO.getType()) {
case MachineOperand::MO_Register:
O << "%" << StringRef(getRegisterName(MO.getReg())).lower();
@@ -127,14 +152,7 @@ void SparcAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
return; // don't print "+0"
O << "+";
- if (MI->getOperand(opNum+1).isGlobal() ||
- MI->getOperand(opNum+1).isCPI()) {
- O << "%lo(";
- printOperand(MI, opNum+1, O);
- O << ")";
- } else {
- printOperand(MI, opNum+1, O);
- }
+ printOperand(MI, opNum+1, O);
}
bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
diff --git a/lib/Target/Sparc/SparcCallingConv.td b/lib/Target/Sparc/SparcCallingConv.td
index d471220..54784e0 100644
--- a/lib/Target/Sparc/SparcCallingConv.td
+++ b/lib/Target/Sparc/SparcCallingConv.td
@@ -12,17 +12,9 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Return Value Calling Conventions
+// SPARC v8 32-bit.
//===----------------------------------------------------------------------===//
-// Sparc 32-bit C return-value convention.
-def RetCC_Sparc32 : CallingConv<[
- CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
- CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>,
- CCIfType<[f64], CCAssignToReg<[D0, D1]>>
-]>;
-
-// Sparc 32-bit C Calling convention.
def CC_Sparc32 : CallingConv<[
//Custom assign SRet to [sp+64].
CCIfSRet<CCCustom<"CC_Sparc_Assign_SRet">>,
@@ -34,3 +26,94 @@ def CC_Sparc32 : CallingConv<[
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>
]>;
+
+def RetCC_Sparc32 : CallingConv<[
+ CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
+ CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>,
+ CCIfType<[f64], CCAssignToReg<[D0, D1]>>
+]>;
+
+
+//===----------------------------------------------------------------------===//
+// SPARC v9 64-bit.
+//===----------------------------------------------------------------------===//
+//
+// The 64-bit ABI conceptually assigns all function arguments to a parameter
+// array starting at [%fp+BIAS+128] in the callee's stack frame. All arguments
+// occupy a multiple of 8 bytes in the array. Integer arguments are extended to
+// 64 bits by the caller. Floats are right-aligned in their 8-byte slot, the
+// first 4 bytes in the slot are undefined.
+//
+// The integer registers %i0 to %i5 shadow the first 48 bytes of the parameter
+// array at fixed offsets. Integer arguments are promoted to registers when
+// possible.
+//
+// The floating point registers %f0 to %f31 shadow the first 128 bytes of the
+// parameter array at fixed offsets. Float and double parameters are promoted
+// to these registers when possible.
+//
+// Structs up to 16 bytes in size are passed by value. They are right-aligned
+// in one or two 8-byte slots in the parameter array. Struct members are
+// promoted to both floating point and integer registers when possible. A
+// struct containing two floats would thus be passed in %f0 and %f1, while two
+// float function arguments would occupy 8 bytes each, and be passed in %f1 and
+// %f3.
+//
+// When a struct { int, float } is passed by value, the int goes in the high
+// bits of an integer register while the float goes in a floating point
+// register.
+//
+// The difference is encoded in LLVM IR using the inreg atttribute on function
+// arguments:
+//
+// C: void f(float, float);
+// IR: declare void f(float %f1, float %f3)
+//
+// C: void f(struct { float f0, f1; });
+// IR: declare void f(float inreg %f0, float inreg %f1)
+//
+// C: void f(int, float);
+// IR: declare void f(int signext %i0, float %f3)
+//
+// C: void f(struct { int i0high; float f1; });
+// IR: declare void f(i32 inreg %i0high, float inreg %f1)
+//
+// Two ints in a struct are simply coerced to i64:
+//
+// C: void f(struct { int i0high, i0low; });
+// IR: declare void f(i64 %i0.coerced)
+//
+// The frontend and backend divide the task of producing ABI compliant code for
+// C functions. The C frontend will:
+//
+// - Annotate integer arguments with zeroext or signext attributes.
+//
+// - Split structs into one or two 64-bit sized chunks, or 32-bit chunks with
+// inreg attributes.
+//
+// - Pass structs larger than 16 bytes indirectly with an explicit pointer
+// argument. The byval attribute is not used.
+//
+// The backend will:
+//
+// - Assign all arguments to 64-bit aligned stack slots, 32-bits for inreg.
+//
+// - Promote to integer or floating point registers depending on type.
+//
+// Function return values are passed exactly like function arguments, except a
+// struct up to 32 bytes in size can be returned in registers.
+
+// Function arguments AND return values.
+def CC_Sparc64 : CallingConv<[
+ // The frontend uses the inreg flag to indicate i32 and float arguments from
+ // structs. These arguments are not promoted to 64 bits, but they can still
+ // be assigned to integer and float registers.
+ CCIfInReg<CCIfType<[i32, f32], CCCustom<"CC_Sparc64_Half">>>,
+
+ // All integers are promoted to i64 by the caller.
+ CCIfType<[i32], CCPromoteToType<i64>>,
+
+ // Custom assignment is required because stack space is reserved for all
+ // arguments whether they are passed in registers or not.
+ CCCustom<"CC_Sparc64_Full">
+]>;
diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp
index a0dae6e..7874240 100644
--- a/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -37,18 +37,27 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
// Get the number of bytes to allocate from the FrameInfo
int NumBytes = (int) MFI->getStackSize();
- // Emit the correct save instruction based on the number of bytes in
- // the frame. Minimum stack frame size according to V8 ABI is:
- // 16 words for register window spill
- // 1 word for address of returned aggregate-value
- // + 6 words for passing parameters on the stack
- // ----------
- // 23 words * 4 bytes per word = 92 bytes
- NumBytes += 92;
+ if (SubTarget.is64Bit()) {
+ // All 64-bit stack frames must be 16-byte aligned, and must reserve space
+ // for spilling the 16 window registers at %sp+BIAS..%sp+BIAS+128.
+ NumBytes += 128;
+ // Frames with calls must also reserve space for 6 outgoing arguments
+ // whether they are used or not. LowerCall_64 takes care of that.
+ assert(NumBytes % 16 == 0 && "Stack size not 16-byte aligned");
+ } else {
+ // Emit the correct save instruction based on the number of bytes in
+ // the frame. Minimum stack frame size according to V8 ABI is:
+ // 16 words for register window spill
+ // 1 word for address of returned aggregate-value
+ // + 6 words for passing parameters on the stack
+ // ----------
+ // 23 words * 4 bytes per word = 92 bytes
+ NumBytes += 92;
- // Round up to next doubleword boundary -- a double-word boundary
- // is required by the ABI.
- NumBytes = (NumBytes + 7) & ~7;
+ // Round up to next doubleword boundary -- a double-word boundary
+ // is required by the ABI.
+ NumBytes = RoundUpToAlignment(NumBytes, 8);
+ }
NumBytes = -NumBytes;
if (NumBytes >= -4096) {
@@ -70,15 +79,18 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
void SparcFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- MachineInstr &MI = *I;
- DebugLoc dl = MI.getDebugLoc();
- int Size = MI.getOperand(0).getImm();
- if (MI.getOpcode() == SP::ADJCALLSTACKDOWN)
- Size = -Size;
- const SparcInstrInfo &TII =
- *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
- if (Size)
- BuildMI(MBB, I, dl, TII.get(SP::ADDri), SP::O6).addReg(SP::O6).addImm(Size);
+ if (!hasReservedCallFrame(MF)) {
+ MachineInstr &MI = *I;
+ DebugLoc DL = MI.getDebugLoc();
+ int Size = MI.getOperand(0).getImm();
+ if (MI.getOpcode() == SP::ADJCALLSTACKDOWN)
+ Size = -Size;
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
+ if (Size)
+ BuildMI(MBB, I, DL, TII.get(SP::ADDri), SP::O6).addReg(SP::O6)
+ .addImm(Size);
+ }
MBB.erase(I);
}
diff --git a/lib/Target/Sparc/SparcFrameLowering.h b/lib/Target/Sparc/SparcFrameLowering.h
index 464233e..c375662 100644
--- a/lib/Target/Sparc/SparcFrameLowering.h
+++ b/lib/Target/Sparc/SparcFrameLowering.h
@@ -22,10 +22,12 @@ namespace llvm {
class SparcSubtarget;
class SparcFrameLowering : public TargetFrameLowering {
+ const SparcSubtarget &SubTarget;
public:
- explicit SparcFrameLowering(const SparcSubtarget &/*sti*/)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0) {
- }
+ explicit SparcFrameLowering(const SparcSubtarget &ST)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
+ ST.is64Bit() ? 16 : 8, 0, ST.is64Bit() ? 16 : 8),
+ SubTarget(ST) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 5fa545d..a709685 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -73,7 +73,7 @@ SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), TLI.getPointerTy());
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@@ -87,7 +87,8 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
if (FrameIndexSDNode *FIN =
dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
// Constant offset from frame ref.
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
+ TLI.getPointerTy());
} else {
Base = Addr.getOperand(0);
}
@@ -130,7 +131,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
}
R1 = Addr;
- R2 = CurDAG->getRegister(SP::G0, MVT::i32);
+ R2 = CurDAG->getRegister(SP::G0, TLI.getPointerTy());
return true;
}
@@ -146,6 +147,9 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
case ISD::SDIV:
case ISD::UDIV: {
+ // sdivx / udivx handle 64-bit divides.
+ if (N->getValueType(0) == MVT::i64)
+ break;
// FIXME: should use a custom expander to expose the SRA to the dag.
SDValue DivLHS = N->getOperand(0);
SDValue DivRHS = N->getOperand(1);
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 28ac02a..3863e2c 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -15,6 +15,7 @@
#include "SparcISelLowering.h"
#include "SparcMachineFunctionInfo.h"
#include "SparcTargetMachine.h"
+#include "MCTargetDesc/SparcBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -74,25 +75,117 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
return true;
}
+// Allocate a full-sized argument for the 64-bit ABI.
+static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) &&
+ "Can't handle non-64 bits locations");
+
+ // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
+ unsigned Offset = State.AllocateStack(8, 8);
+ unsigned Reg = 0;
+
+ if (LocVT == MVT::i64 && Offset < 6*8)
+ // Promote integers to %i0-%i5.
+ Reg = SP::I0 + Offset/8;
+ else if (LocVT == MVT::f64 && Offset < 16*8)
+ // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
+ Reg = SP::D0 + Offset/8;
+ else if (LocVT == MVT::f32 && Offset < 16*8)
+ // Promote floats to %f1, %f3, ...
+ Reg = SP::F1 + Offset/4;
+
+ // Promote to register when possible, otherwise use the stack slot.
+ if (Reg) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return true;
+ }
+
+ // This argument goes on the stack in an 8-byte slot.
+ // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
+ // the right-aligned float. The first 4 bytes of the stack slot are undefined.
+ if (LocVT == MVT::f32)
+ Offset += 4;
+
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return true;
+}
+
+// Allocate a half-sized argument for the 64-bit ABI.
+//
+// This is used when passing { float, int } structs by value in registers.
+static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
+ unsigned Offset = State.AllocateStack(4, 4);
+
+ if (LocVT == MVT::f32 && Offset < 16*8) {
+ // Promote floats to %f0-%f31.
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
+ LocVT, LocInfo));
+ return true;
+ }
+
+ if (LocVT == MVT::i32 && Offset < 6*8) {
+ // Promote integers to %i0-%i5, using half the register.
+ unsigned Reg = SP::I0 + Offset/8;
+ LocVT = MVT::i64;
+ LocInfo = CCValAssign::AExt;
+
+ // Set the Custom bit if this i32 goes in the high bits of a register.
+ if (Offset % 8 == 0)
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
+ LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return true;
+ }
+
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return true;
+}
+
#include "SparcGenCallingConv.inc"
+// The calling conventions in SparcCallingConv.td are described in terms of the
+// callee's register window. This function translates registers to the
+// corresponding caller window %o register.
+static unsigned toCallerWindow(unsigned Reg) {
+ assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum");
+ if (Reg >= SP::I0 && Reg <= SP::I7)
+ return Reg - SP::I0 + SP::O0;
+ return Reg;
+}
+
SDValue
SparcTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ DebugLoc DL, SelectionDAG &DAG) const {
+ if (Subtarget->is64Bit())
+ return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+ return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+}
+SDValue
+SparcTargetLowering::LowerReturn_32(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
// CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
DAG.getTarget(), RVLocs, *DAG.getContext());
- // Analize return values.
+ // Analyze return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
SDValue Flag;
@@ -105,7 +198,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(),
OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together with flags.
@@ -120,8 +213,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
unsigned Reg = SFI->getSRetReturnReg();
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
- SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
- Chain = DAG.getCopyToReg(Chain, dl, SP::I0, Val, Flag);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
+ Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy()));
RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
@@ -134,22 +227,114 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other,
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
&RetOps[0], RetOps.size());
}
-/// LowerFormalArguments - V8 uses a very simple ABI, where all values are
-/// passed in either one or two GPRs, including FP values. TODO: we should
-/// pass FP values in FP registers for fastcc functions.
+// Lower return values for the 64-bit ABI.
+// Return values are passed the exactly the same way as function arguments.
SDValue
-SparcTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SparcTargetLowering::LowerReturn_64(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const {
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
+ DAG.getTarget(), RVLocs, *DAG.getContext());
+
+ // Analyze return values.
+ CCInfo.AnalyzeReturn(Outs, CC_Sparc64);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // The second operand on the return instruction is the return address offset.
+ // The return address is always %i7+8 with the 64-bit ABI.
+ RetOps.push_back(DAG.getConstant(8, MVT::i32));
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ SDValue OutVal = OutVals[i];
+
+ // Integer return values must be sign or zero extended by the callee.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::ZExt:
+ OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::AExt:
+ OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
+ default:
+ break;
+ }
+
+ // The custom bit on an i32 return value indicates that it should be passed
+ // in the high bits of the register.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
+ OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
+ DAG.getConstant(32, MVT::i32));
+
+ // The next value may go in the low bits of the same register.
+ // Handle both at once.
+ if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
+ SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
+ OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
+ // Skip the next value, it's already done.
+ ++i;
+ }
+ }
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
+ &RetOps[0], RetOps.size());
+}
+SDValue SparcTargetLowering::
+LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc DL,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ if (Subtarget->is64Bit())
+ return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
+ DL, DAG, InVals);
+ return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
+ DL, DAG, InVals);
+}
+
+/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
+/// passed in either one or two GPRs, including FP values. TODO: we should
+/// pass FP values in FP registers for fastcc functions.
+SDValue SparcTargetLowering::
+LowerFormalArguments_32(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
@@ -341,9 +526,132 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
return Chain;
}
+// Lower formal arguments for the 64 bit ABI.
+SDValue SparcTargetLowering::
+LowerFormalArguments_64(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc DL,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Analyze arguments according to CC_Sparc64.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
+
+ // The argument array begins at %fp+BIAS+128, after the register save area.
+ const unsigned ArgArea = 128;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isRegLoc()) {
+ // This argument is passed in a register.
+ // All integer register arguments are promoted by the caller to i64.
+
+ // Create a virtual register for the promoted live-in value.
+ unsigned VReg = MF.addLiveIn(VA.getLocReg(),
+ getRegClassFor(VA.getLocVT()));
+ SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
+
+ // Get the high bits for i32 struct elements.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom())
+ Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
+ DAG.getConstant(32, MVT::i32));
+
+ // The caller promoted the argument, so insert an Assert?ext SDNode so we
+ // won't promote the value again in this function.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ default:
+ break;
+ }
+
+ // Truncate the register down to the argument type.
+ if (VA.isExtInLoc())
+ Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
+
+ InVals.push_back(Arg);
+ continue;
+ }
+
+ // The registers are exhausted. This argument was passed on the stack.
+ assert(VA.isMemLoc());
+ // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
+ // beginning of the arguments area at %fp+BIAS+128.
+ unsigned Offset = VA.getLocMemOffset() + ArgArea;
+ unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
+ // Adjust offset for extended arguments, SPARC is big-endian.
+ // The caller will have written the full slot with extended bytes, but we
+ // prefer our own extending loads.
+ if (VA.isExtInLoc())
+ Offset += 8 - ValSize;
+ int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true);
+ InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain,
+ DAG.getFrameIndex(FI, getPointerTy()),
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0));
+ }
+
+ if (!IsVarArg)
+ return Chain;
+
+ // This function takes variable arguments, some of which may have been passed
+ // in registers %i0-%i5. Variable floating point arguments are never passed
+ // in floating point registers. They go on %i0-%i5 or on the stack like
+ // integer arguments.
+ //
+ // The va_start intrinsic needs to know the offset to the first variable
+ // argument.
+ unsigned ArgOffset = CCInfo.getNextStackOffset();
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ // Skip the 128 bytes of register save area.
+ FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
+ Subtarget->getStackPointerBias());
+
+ // Save the variable arguments that were passed in registers.
+ // The caller is required to reserve stack space for 6 arguments regardless
+ // of how many arguments were actually passed.
+ SmallVector<SDValue, 8> OutChains;
+ for (; ArgOffset < 6*8; ArgOffset += 8) {
+ unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
+ SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
+ int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true);
+ OutChains.push_back(DAG.getStore(Chain, DL, VArg,
+ DAG.getFrameIndex(FI, getPointerTy()),
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, 0));
+ }
+
+ if (!OutChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ &OutChains[0], OutChains.size());
+
+ return Chain;
+}
+
SDValue
SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ if (Subtarget->is64Bit())
+ return LowerCall_64(CLI, InVals);
+ return LowerCall_32(CLI, InVals);
+}
+
+// Lower a call for the 32-bit ABI.
+SDValue
+SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
DebugLoc &dl = CLI.DL;
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
@@ -546,11 +854,7 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// stuck together.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- unsigned Reg = RegsToPass[i].first;
- // Remap I0->I7 -> O0->O7.
- if (Reg >= SP::I0 && Reg <= SP::I7)
- Reg = Reg-SP::I0+SP::O0;
-
+ unsigned Reg = toCallerWindow(RegsToPass[i].first);
Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
@@ -572,13 +876,9 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(Callee);
if (hasStructRetAttr)
Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32));
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- unsigned Reg = RegsToPass[i].first;
- if (Reg >= SP::I0 && Reg <= SP::I7)
- Reg = Reg-SP::I0+SP::O0;
-
- Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
- }
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
+ RegsToPass[i].second.getValueType()));
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -598,13 +898,7 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
- unsigned Reg = RVLocs[i].getLocReg();
-
- // Remap I0->I7 -> O0->O7.
- if (Reg >= SP::I0 && Reg <= SP::I7)
- Reg = Reg-SP::I0+SP::O0;
-
- Chain = DAG.getCopyFromReg(Chain, dl, Reg,
+ Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2);
InVals.push_back(Chain.getValue(0));
@@ -637,6 +931,259 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
return getDataLayout()->getTypeAllocSize(ElementTy);
}
+
+// Fixup floating point arguments in the ... part of a varargs call.
+//
+// The SPARC v9 ABI requires that floating point arguments are treated the same
+// as integers when calling a varargs function. This does not apply to the
+// fixed arguments that are part of the function's prototype.
+//
+// This function post-processes a CCValAssign array created by
+// AnalyzeCallOperands().
+static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
+ ArrayRef<ISD::OutputArg> Outs) {
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ const CCValAssign &VA = ArgLocs[i];
+ // FIXME: What about f32 arguments? C promotes them to f64 when calling
+ // varargs functions.
+ if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64)
+ continue;
+ // The fixed arguments to a varargs function still go in FP registers.
+ if (Outs[VA.getValNo()].IsFixed)
+ continue;
+
+ // This floating point argument should be reassigned.
+ CCValAssign NewVA;
+
+ // Determine the offset into the argument array.
+ unsigned Offset = 8 * (VA.getLocReg() - SP::D0);
+ assert(Offset < 16*8 && "Offset out of range, bad register enum?");
+
+ if (Offset < 6*8) {
+ // This argument should go in %i0-%i5.
+ unsigned IReg = SP::I0 + Offset/8;
+ // Full register, just bitconvert into i64.
+ NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
+ IReg, MVT::i64, CCValAssign::BCvt);
+ } else {
+ // This needs to go to memory, we're out of integer registers.
+ NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
+ Offset, VA.getLocVT(), VA.getLocInfo());
+ }
+ ArgLocs[i] = NewVA;
+ }
+}
+
+// Lower a call for the 64-bit ABI.
+SDValue
+SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc DL = CLI.DL;
+ SDValue Chain = CLI.Chain;
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
+ DAG.getTarget(), ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
+
+ // Get the size of the outgoing arguments stack space requirement.
+ // The stack offset computed by CC_Sparc64 includes all arguments.
+ // Called functions expect 6 argument words to exist in the stack frame, used
+ // or not.
+ unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
+
+ // Keep stack frames 16-byte aligned.
+ ArgsSize = RoundUpToAlignment(ArgsSize, 16);
+
+ // Varargs calls require special treatment.
+ if (CLI.IsVarArg)
+ fixupVariableFloatArgs(ArgLocs, CLI.Outs);
+
+ // Adjust the stack pointer to make room for the arguments.
+ // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
+ // with more than 6 arguments.
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
+
+ // Collect the set of registers to pass to the function and their values.
+ // This will be emitted as a sequence of CopyToReg nodes glued to the call
+ // instruction.
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+
+ // Collect chains from all the memory opeations that copy arguments to the
+ // stack. They must follow the stack pointer adjustment above and precede the
+ // call instruction itself.
+ SmallVector<SDValue, 8> MemOpChains;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ const CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = CLI.OutVals[i];
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown location info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isRegLoc()) {
+ // The custom bit on an i32 return value indicates that it should be
+ // passed in the high bits of the register.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
+ Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
+ DAG.getConstant(32, MVT::i32));
+
+ // The next value may go in the low bits of the same register.
+ // Handle both at once.
+ if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
+ ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
+ SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
+ CLI.OutVals[i+1]);
+ Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
+ // Skip the next value, it's already done.
+ ++i;
+ }
+ }
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
+ continue;
+ }
+
+ assert(VA.isMemLoc());
+
+ // Create a store off the stack pointer for this argument.
+ SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy());
+ // The argument area starts at %fp+BIAS+128 in the callee frame,
+ // %sp+BIAS+128 in ours.
+ SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
+ Subtarget->getStackPointerBias() +
+ 128);
+ PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
+ }
+
+ // Emit all stores, make sure they occur before the call.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Build a sequence of CopyToReg nodes glued together with token chain and
+ // glue operands which copy the outgoing args into registers. The InGlue is
+ // necessary since all emitted instructions must be stuck together in order
+ // to pass the live physical registers.
+ SDValue InGlue;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, DL,
+ RegsToPass[i].first, RegsToPass[i].second, InGlue);
+ InGlue = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ // Likewise ExternalSymbol -> TargetExternalSymbol.
+ SDValue Callee = CLI.Callee;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy());
+ else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
+
+ // Build the operands for the call instruction itself.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
+ // Make sure the CopyToReg nodes are glued to the call instruction which
+ // consumes the registers.
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
+
+ // Now the call itself.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ InGlue = Chain.getValue(1);
+
+ // Revert the stack pointer immediately after the call.
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
+ DAG.getIntPtrConstant(0, true), InGlue);
+ InGlue = Chain.getValue(1);
+
+ // Now extract the return values. This is more or less the same as
+ // LowerFormalArguments_64.
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
+ DAG.getTarget(), RVLocs, *DAG.getContext());
+ RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ unsigned Reg = toCallerWindow(VA.getLocReg());
+
+ // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
+ // reside in the same register in the high and low bits. Reuse the
+ // CopyFromReg previous node to avoid duplicate copies.
+ SDValue RV;
+ if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
+ if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
+ RV = Chain.getValue(0);
+
+ // But usually we'll create a new CopyFromReg for a different register.
+ if (!RV.getNode()) {
+ RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
+ Chain = RV.getValue(1);
+ InGlue = Chain.getValue(2);
+ }
+
+ // Get the high bits for i32 struct elements.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom())
+ RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
+ DAG.getConstant(32, MVT::i32));
+
+ // The callee promoted the return value, so insert an Assert?ext SDNode so
+ // we won't promote the value again in this function.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ case CCValAssign::ZExt:
+ RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ default:
+ break;
+ }
+
+ // Truncate the register down to the return value type.
+ if (VA.isExtInLoc())
+ RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
+
+ InVals.push_back(RV);
+ }
+
+ return Chain;
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
@@ -689,11 +1236,14 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
+ Subtarget = &TM.getSubtarget<SparcSubtarget>();
// Set up the register classes.
addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
+ if (Subtarget->is64Bit())
+ addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
// Turn FP extload into load/fextend
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
@@ -703,9 +1253,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Custom legalize GlobalAddress nodes into LO/HI parts.
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
- setOperationAction(ISD::ConstantPool , MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom);
+ setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom);
+ setOperationAction(ISD::ConstantPool, getPointerTy(), Custom);
// Sparc doesn't have sext_inreg, replace them with shl/sra
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
@@ -749,9 +1299,13 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::BR_CC, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+ }
+
// FIXME: There are instructions available for ATOMIC_FENCE
// on SparcV8 and later.
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
@@ -818,8 +1372,10 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::CMPICC: return "SPISD::CMPICC";
case SPISD::CMPFCC: return "SPISD::CMPFCC";
case SPISD::BRICC: return "SPISD::BRICC";
+ case SPISD::BRXCC: return "SPISD::BRXCC";
case SPISD::BRFCC: return "SPISD::BRFCC";
case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
+ case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
case SPISD::Hi: return "SPISD::Hi";
case SPISD::Lo: return "SPISD::Lo";
@@ -846,6 +1402,7 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
switch (Op.getOpcode()) {
default: break;
case SPISD::SELECT_ICC:
+ case SPISD::SELECT_XCC:
case SPISD::SELECT_FCC:
DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
@@ -866,7 +1423,8 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
if (isa<ConstantSDNode>(RHS) &&
cast<ConstantSDNode>(RHS)->isNullValue() &&
CC == ISD::SETNE &&
- ((LHS.getOpcode() == SPISD::SELECT_ICC &&
+ (((LHS.getOpcode() == SPISD::SELECT_ICC ||
+ LHS.getOpcode() == SPISD::SELECT_XCC) &&
LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
(LHS.getOpcode() == SPISD::SELECT_FCC &&
LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
@@ -881,46 +1439,89 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
}
}
+// Convert to a target node and set target flags.
+SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
+ SelectionDAG &DAG) const {
+ if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
+ return DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getDebugLoc(),
+ GA->getValueType(0),
+ GA->getOffset(), TF);
+
+ if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
+ return DAG.getTargetConstantPool(CP->getConstVal(),
+ CP->getValueType(0),
+ CP->getAlignment(),
+ CP->getOffset(), TF);
+
+ if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
+ return DAG.getTargetExternalSymbol(ES->getSymbol(),
+ ES->getValueType(0), TF);
+
+ llvm_unreachable("Unhandled address SDNode");
+}
+
+// Split Op into high and low parts according to HiTF and LoTF.
+// Return an ADD node combining the parts.
+SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
+ unsigned HiTF, unsigned LoTF,
+ SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ EVT VT = Op.getValueType();
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
+ return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
+}
+
+// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
+// or ExternalSymbol SDNode.
+SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ EVT VT = getPointerTy();
+
+ // Handle PIC mode first.
+ if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ // This is the pic32 code model, the GOT is known to be smaller than 4GB.
+ SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
+ SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
+ MachinePointerInfo::getGOT(), false, false, false, 0);
+ }
+
+ // This is one of the absolute code models.
+ switch(getTargetMachine().getCodeModel()) {
+ default:
+ llvm_unreachable("Unsupported absolute code model");
+ case CodeModel::Small:
+ // abs32.
+ return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ case CodeModel::Medium: {
+ // abs44.
+ SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG);
+ H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32));
+ SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG);
+ L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
+ return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
+ }
+ case CodeModel::Large: {
+ // abs64.
+ SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG);
+ Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32));
+ SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
+ }
+ }
+}
+
SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- // FIXME there isn't really any debug info here
- DebugLoc dl = Op.getDebugLoc();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
- SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
- SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
-
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
- return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
-
- SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
- getPointerTy());
- SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
- SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
- GlobalBase, RelAddr);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, MachinePointerInfo(), false, false, false, 0);
+ return makeAddress(Op, DAG);
}
SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
- ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
- // FIXME there isn't really any debug info here
- DebugLoc dl = Op.getDebugLoc();
- const Constant *C = N->getConstVal();
- SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
- SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
- SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
- return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
-
- SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
- getPointerTy());
- SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
- SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
- GlobalBase, RelAddr);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, MachinePointerInfo(), false, false, false, 0);
+ return makeAddress(Op, DAG);
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
@@ -954,12 +1555,13 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
// Get the condition flag.
SDValue CompareFlag;
- if (LHS.getValueType() == MVT::i32) {
- EVT VTs[] = { MVT::i32, MVT::Glue };
+ if (LHS.getValueType().isInteger()) {
+ EVT VTs[] = { LHS.getValueType(), MVT::Glue };
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
- Opc = SPISD::BRICC;
+ // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
+ Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
} else {
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
@@ -983,12 +1585,13 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
LookThroughSetCC(LHS, RHS, CC, SPCC);
SDValue CompareFlag;
- if (LHS.getValueType() == MVT::i32) {
+ if (LHS.getValueType().isInteger()) {
// subcc returns a value
EVT VTs[] = { LHS.getValueType(), MVT::Glue };
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
- Opc = SPISD::SELECT_ICC;
+ Opc = LHS.getValueType() == MVT::i32 ?
+ SPISD::SELECT_ICC : SPISD::SELECT_XCC;
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
} else {
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
@@ -1006,14 +1609,13 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
SDValue Offset =
- DAG.getNode(ISD::ADD, dl, MVT::i32,
- DAG.getRegister(SP::I6, MVT::i32),
- DAG.getConstant(FuncInfo->getVarArgsFrameOffset(),
- MVT::i32));
+ DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(),
+ DAG.getRegister(SP::I6, TLI.getPointerTy()),
+ DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset()));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1),
+ return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
MachinePointerInfo(SV), false, false, 0);
}
@@ -1022,33 +1624,22 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
EVT VT = Node->getValueType(0);
SDValue InChain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
+ EVT PtrVT = VAListPtr.getValueType();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
- DebugLoc dl = Node->getDebugLoc();
- SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr,
+ DebugLoc DL = Node->getDebugLoc();
+ SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr,
MachinePointerInfo(SV), false, false, false, 0);
- // Increment the pointer, VAList, to the next vaarg
- SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
- DAG.getConstant(VT.getSizeInBits()/8,
- MVT::i32));
- // Store the incremented VAList to the legalized pointer
- InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr,
+ // Increment the pointer, VAList, to the next vaarg.
+ SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
+ DAG.getIntPtrConstant(VT.getSizeInBits()/8));
+ // Store the incremented VAList to the legalized pointer.
+ InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr,
VAListPtr, MachinePointerInfo(SV), false, false, 0);
- // Load the actual argument out of the pointer VAList, unless this is an
- // f64 load.
- if (VT != MVT::f64)
- return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
- false, false, false, 0);
-
- // Otherwise, load it as i64, then do a bitconvert.
- SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, MachinePointerInfo(),
- false, false, false, 0);
-
- // Bit-Convert the value to f64.
- SDValue Ops[2] = {
- DAG.getNode(ISD::BITCAST, dl, MVT::f64, V),
- V.getValue(1)
- };
- return DAG.getMergeValues(Ops, 2, dl);
+ // Load the actual argument out of the pointer VAList.
+ // We can't count on greater alignment than the word size.
+ return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
+ false, false, false,
+ std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
}
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index 09148ea..fd706be 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -19,14 +19,18 @@
#include "llvm/Target/TargetLowering.h"
namespace llvm {
+ class SparcSubtarget;
+
namespace SPISD {
enum {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
- CMPICC, // Compare two GPR operands, set icc.
+ CMPICC, // Compare two GPR operands, set icc+xcc.
CMPFCC, // Compare two FP operands, set fcc.
BRICC, // Branch to dest on icc condition
+ BRXCC, // Branch to dest on xcc condition (64-bit only).
BRFCC, // Branch to dest on fcc condition
SELECT_ICC, // Select between two values using the current ICC flags.
+ SELECT_XCC, // Select between two values using the current XCC flags.
SELECT_FCC, // Select between two values using the current FCC flags.
Hi, Lo, // Hi/Lo operations, typically on a global address.
@@ -42,6 +46,7 @@ namespace llvm {
}
class SparcTargetLowering : public TargetLowering {
+ const SparcSubtarget *Subtarget;
public:
SparcTargetLowering(TargetMachine &TM);
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -66,6 +71,7 @@ namespace llvm {
getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
virtual SDValue
LowerFormalArguments(SDValue Chain,
@@ -74,10 +80,26 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerFormalArguments_32(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerFormalArguments_64(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerReturn(SDValue Chain,
@@ -85,11 +107,25 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerReturn_32(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const;
+ SDValue LowerReturn_64(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const;
+ SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const;
+ SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
+ SelectionDAG &DAG) const;
+ SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const;
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td
new file mode 100644
index 0000000..91805f9
--- /dev/null
+++ b/lib/Target/Sparc/SparcInstr64Bit.td
@@ -0,0 +1,333 @@
+//===-- SparcInstr64Bit.td - 64-bit instructions for Sparc Target ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction definitions and patterns needed for 64-bit
+// code generation on SPARC v9.
+//
+// Some SPARC v9 instructions are defined in SparcInstrInfo.td because they can
+// also be used in 32-bit code running on a SPARC v9 CPU.
+//
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+// The same integer registers are used for i32 and i64 values.
+// When registers hold i32 values, the high bits are don't care.
+// This give us free trunc and anyext.
+def : Pat<(i64 (anyext i32:$val)), (COPY_TO_REGCLASS $val, I64Regs)>;
+def : Pat<(i32 (trunc i64:$val)), (COPY_TO_REGCLASS $val, IntRegs)>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Shift Instructions.
+//===----------------------------------------------------------------------===//
+//
+// The 32-bit shift instructions are still available. The left shift srl
+// instructions shift all 64 bits, but it only accepts a 5-bit shift amount.
+//
+// The srl instructions only shift the low 32 bits and clear the high 32 bits.
+// Finally, sra shifts the low 32 bits and sign-extends to 64 bits.
+
+let Predicates = [Is64Bit] in {
+
+def : Pat<(i64 (zext i32:$val)), (SRLri $val, 0)>;
+def : Pat<(i64 (sext i32:$val)), (SRAri $val, 0)>;
+
+def : Pat<(i64 (and i64:$val, 0xffffffff)), (SRLri $val, 0)>;
+def : Pat<(i64 (sext_inreg i64:$val, i32)), (SRAri $val, 0)>;
+
+defm SLLX : F3_S<"sllx", 0b100101, 1, shl, i64, I64Regs>;
+defm SRLX : F3_S<"srlx", 0b100110, 1, srl, i64, I64Regs>;
+defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Immediates.
+//===----------------------------------------------------------------------===//
+//
+// All 32-bit immediates can be materialized with sethi+or, but 64-bit
+// immediates may require more code. There may be a point where it is
+// preferable to use a constant pool load instead, depending on the
+// microarchitecture.
+
+// The %g0 register is constant 0.
+// This is useful for stx %g0, [...], for example.
+def : Pat<(i64 0), (i64 G0)>, Requires<[Is64Bit]>;
+
+// Single-instruction patterns.
+
+// The ALU instructions want their simm13 operands as i32 immediates.
+def as_i32imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32);
+}]>;
+def : Pat<(i64 simm13:$val), (ORri (i64 G0), (as_i32imm $val))>;
+def : Pat<(i64 SETHIimm:$val), (SETHIi (HI22 $val))>;
+
+// Double-instruction patterns.
+
+// All unsigned i32 immediates can be handled by sethi+or.
+def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>;
+def : Pat<(i64 uimm32:$val), (ORri (SETHIi (HI22 $val)), (LO10 $val))>,
+ Requires<[Is64Bit]>;
+
+// All negative i33 immediates can be handled by sethi+xor.
+def nimm33 : PatLeaf<(imm), [{
+ int64_t Imm = N->getSExtValue();
+ return Imm < 0 && isInt<33>(Imm);
+}]>;
+// Bits 10-31 inverted. Same as assembler's %hix.
+def HIX22 : SDNodeXForm<imm, [{
+ uint64_t Val = (~N->getZExtValue() >> 10) & ((1u << 22) - 1);
+ return CurDAG->getTargetConstant(Val, MVT::i32);
+}]>;
+// Bits 0-9 with ones in bits 10-31. Same as assembler's %lox.
+def LOX10 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(~(~N->getZExtValue() & 0x3ff), MVT::i32);
+}]>;
+def : Pat<(i64 nimm33:$val), (XORri (SETHIi (HIX22 $val)), (LOX10 $val))>,
+ Requires<[Is64Bit]>;
+
+// More possible patterns:
+//
+// (sllx sethi, n)
+// (sllx simm13, n)
+//
+// 3 instrs:
+//
+// (xor (sllx sethi), simm13)
+// (sllx (xor sethi, simm13))
+//
+// 4 instrs:
+//
+// (or sethi, (sllx sethi))
+// (xnor sethi, (sllx sethi))
+//
+// 5 instrs:
+//
+// (or (sllx sethi), (or sethi, simm13))
+// (xnor (sllx sethi), (or sethi, simm13))
+// (or (sllx sethi), (sllx sethi))
+// (xnor (sllx sethi), (sllx sethi))
+//
+// Worst case is 6 instrs:
+//
+// (or (sllx (or sethi, simmm13)), (or sethi, simm13))
+
+// Bits 42-63, same as assembler's %hh.
+def HH22 : SDNodeXForm<imm, [{
+ uint64_t Val = (N->getZExtValue() >> 42) & ((1u << 22) - 1);
+ return CurDAG->getTargetConstant(Val, MVT::i32);
+}]>;
+// Bits 32-41, same as assembler's %hm.
+def HM10 : SDNodeXForm<imm, [{
+ uint64_t Val = (N->getZExtValue() >> 32) & ((1u << 10) - 1);
+ return CurDAG->getTargetConstant(Val, MVT::i32);
+}]>;
+def : Pat<(i64 imm:$val),
+ (ORrr (SLLXri (ORri (SETHIi (HH22 $val)), (HM10 $val)), (i32 32)),
+ (ORri (SETHIi (HI22 $val)), (LO10 $val)))>,
+ Requires<[Is64Bit]>;
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Integer Arithmetic and Logic.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+// Register-register instructions.
+
+def : Pat<(and i64:$a, i64:$b), (ANDrr $a, $b)>;
+def : Pat<(or i64:$a, i64:$b), (ORrr $a, $b)>;
+def : Pat<(xor i64:$a, i64:$b), (XORrr $a, $b)>;
+
+def : Pat<(and i64:$a, (not i64:$b)), (ANDNrr $a, $b)>;
+def : Pat<(or i64:$a, (not i64:$b)), (ORNrr $a, $b)>;
+def : Pat<(xor i64:$a, (not i64:$b)), (XNORrr $a, $b)>;
+
+def : Pat<(add i64:$a, i64:$b), (ADDrr $a, $b)>;
+def : Pat<(sub i64:$a, i64:$b), (SUBrr $a, $b)>;
+
+// Add/sub with carry were renamed to addc/subc in SPARC v9.
+def : Pat<(adde i64:$a, i64:$b), (ADDXrr $a, $b)>;
+def : Pat<(sube i64:$a, i64:$b), (SUBXrr $a, $b)>;
+
+def : Pat<(addc i64:$a, i64:$b), (ADDCCrr $a, $b)>;
+def : Pat<(subc i64:$a, i64:$b), (SUBCCrr $a, $b)>;
+
+def : Pat<(SPcmpicc i64:$a, i64:$b), (SUBCCrr $a, $b)>;
+
+// Register-immediate instructions.
+
+def : Pat<(and i64:$a, (i64 simm13:$b)), (ANDri $a, (as_i32imm $b))>;
+def : Pat<(or i64:$a, (i64 simm13:$b)), (ORri $a, (as_i32imm $b))>;
+def : Pat<(xor i64:$a, (i64 simm13:$b)), (XORri $a, (as_i32imm $b))>;
+
+def : Pat<(add i64:$a, (i64 simm13:$b)), (ADDri $a, (as_i32imm $b))>;
+def : Pat<(sub i64:$a, (i64 simm13:$b)), (SUBri $a, (as_i32imm $b))>;
+
+def : Pat<(SPcmpicc i64:$a, (i64 simm13:$b)), (SUBCCri $a, (as_i32imm $b))>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Integer Multiply and Divide.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+def MULXrr : F3_1<2, 0b001001,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "mulx $rs1, $rs2, $rd",
+ [(set i64:$rd, (mul i64:$rs1, i64:$rs2))]>;
+def MULXri : F3_2<2, 0b001001,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i),
+ "mulx $rs1, $i, $rd",
+ [(set i64:$rd, (mul i64:$rs1, (i64 simm13:$i)))]>;
+
+// Division can trap.
+let hasSideEffects = 1 in {
+def SDIVXrr : F3_1<2, 0b101101,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "sdivx $rs1, $rs2, $rd",
+ [(set i64:$rd, (sdiv i64:$rs1, i64:$rs2))]>;
+def SDIVXri : F3_2<2, 0b101101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i),
+ "sdivx $rs1, $i, $rd",
+ [(set i64:$rd, (sdiv i64:$rs1, (i64 simm13:$i)))]>;
+
+def UDIVXrr : F3_1<2, 0b001101,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "udivx $rs1, $rs2, $rd",
+ [(set i64:$rd, (udiv i64:$rs1, i64:$rs2))]>;
+def UDIVXri : F3_2<2, 0b001101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i),
+ "udivx $rs1, $i, $rd",
+ [(set i64:$rd, (udiv i64:$rs1, (i64 simm13:$i)))]>;
+} // hasSideEffects = 1
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Loads and Stores.
+//===----------------------------------------------------------------------===//
+//
+// All the 32-bit loads and stores are available. The extending loads are sign
+// or zero-extending to 64 bits. The LDrr and LDri instructions load 32 bits
+// zero-extended to i64. Their mnemonic is lduw in SPARC v9 (Load Unsigned
+// Word).
+//
+// SPARC v9 adds 64-bit loads as well as a sign-extending ldsw i32 loads.
+
+let Predicates = [Is64Bit] in {
+
+// 64-bit loads.
+def LDXrr : F3_1<3, 0b001011,
+ (outs I64Regs:$dst), (ins MEMrr:$addr),
+ "ldx [$addr], $dst",
+ [(set i64:$dst, (load ADDRrr:$addr))]>;
+def LDXri : F3_2<3, 0b001011,
+ (outs I64Regs:$dst), (ins MEMri:$addr),
+ "ldx [$addr], $dst",
+ [(set i64:$dst, (load ADDRri:$addr))]>;
+
+// Extending loads to i64.
+def : Pat<(i64 (zextloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (sextloadi8 ADDRrr:$addr)), (LDSBrr ADDRrr:$addr)>;
+def : Pat<(i64 (sextloadi8 ADDRri:$addr)), (LDSBri ADDRri:$addr)>;
+
+def : Pat<(i64 (zextloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
+def : Pat<(i64 (sextloadi16 ADDRrr:$addr)), (LDSHrr ADDRrr:$addr)>;
+def : Pat<(i64 (sextloadi16 ADDRri:$addr)), (LDSHri ADDRri:$addr)>;
+
+def : Pat<(i64 (zextloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
+
+// Sign-extending load of i32 into i64 is a new SPARC v9 instruction.
+def LDSWrr : F3_1<3, 0b001011,
+ (outs I64Regs:$dst), (ins MEMrr:$addr),
+ "ldsw [$addr], $dst",
+ [(set i64:$dst, (sextloadi32 ADDRrr:$addr))]>;
+def LDSWri : F3_2<3, 0b001011,
+ (outs I64Regs:$dst), (ins MEMri:$addr),
+ "ldsw [$addr], $dst",
+ [(set i64:$dst, (sextloadi32 ADDRri:$addr))]>;
+
+// 64-bit stores.
+def STXrr : F3_1<3, 0b001110,
+ (outs), (ins MEMrr:$addr, I64Regs:$src),
+ "stx $src, [$addr]",
+ [(store i64:$src, ADDRrr:$addr)]>;
+def STXri : F3_2<3, 0b001110,
+ (outs), (ins MEMri:$addr, I64Regs:$src),
+ "stx $src, [$addr]",
+ [(store i64:$src, ADDRri:$addr)]>;
+
+// Truncating stores from i64 are identical to the i32 stores.
+def : Pat<(truncstorei8 i64:$src, ADDRrr:$addr), (STBrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei8 i64:$src, ADDRri:$addr), (STBri ADDRri:$addr, $src)>;
+def : Pat<(truncstorei16 i64:$src, ADDRrr:$addr), (STHrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei16 i64:$src, ADDRri:$addr), (STHri ADDRri:$addr, $src)>;
+def : Pat<(truncstorei32 i64:$src, ADDRrr:$addr), (STrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), (STri ADDRri:$addr, $src)>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Conditionals.
+//===----------------------------------------------------------------------===//
+//
+// Flag-setting instructions like subcc and addcc set both icc and xcc flags.
+// The icc flags correspond to the 32-bit result, and the xcc are for the
+// full 64-bit result.
+//
+// We reuse CMPICC SDNodes for compares, but use new BRXCC branch nodes for
+// 64-bit compares. See LowerBR_CC.
+
+let Predicates = [Is64Bit] in {
+
+let Uses = [ICC] in
+def BPXCC : BranchSP<0, (ins brtarget:$dst, CCOp:$cc),
+ "bp$cc %xcc, $dst",
+ [(SPbrxcc bb:$dst, imm:$cc)]>;
+
+// Conditional moves on %xcc.
+let Uses = [ICC], Constraints = "$f = $rd" in {
+def MOVXCCrr : Pseudo<(outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %xcc, $rs2, $rd",
+ [(set i32:$rd,
+ (SPselectxcc i32:$rs2, i32:$f, imm:$cond))]>;
+def MOVXCCri : Pseudo<(outs IntRegs:$rd),
+ (ins i32imm:$i, IntRegs:$f, CCOp:$cond),
+ "mov$cond %xcc, $i, $rd",
+ [(set i32:$rd,
+ (SPselecticc simm11:$i, i32:$f, imm:$cond))]>;
+} // Uses, Constraints
+
+def : Pat<(SPselectxcc i64:$t, i64:$f, imm:$cond),
+ (MOVXCCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselectxcc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVXCCri (as_i32imm $t), $f, imm:$cond)>;
+
+} // Predicates = [Is64Bit]
diff --git a/lib/Target/Sparc/SparcInstrFormats.td b/lib/Target/Sparc/SparcInstrFormats.td
index dce3312..e7fde08 100644
--- a/lib/Target/Sparc/SparcInstrFormats.td
+++ b/lib/Target/Sparc/SparcInstrFormats.td
@@ -111,4 +111,41 @@ class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
let Inst{4-0} = rs2;
}
+// Shift by register rs2.
+class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
+ string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ bit x = xVal; // 1 for 64-bit shifts.
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 0; // i field = 0
+ let Inst{12} = x; // extended registers.
+ let Inst{4-0} = rs2;
+}
+// Shift by immediate.
+class F3_Si<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
+ string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ bit x = xVal; // 1 for 64-bit shifts.
+ bits<6> shcnt; // shcnt32 / shcnt64.
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 1; // i field = 1
+ let Inst{12} = x; // extended registers.
+ let Inst{5-0} = shcnt;
+}
+
+// Define rr and ri shift instructions with patterns.
+multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode,
+ ValueType VT, RegisterClass RC> {
+ def rr : F3_Sr<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs, IntRegs:$rs2),
+ !strconcat(OpcStr, " $rs, $rs2, $rd"),
+ [(set VT:$rd, (OpNode VT:$rs, i32:$rs2))]>;
+ def ri : F3_Si<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs, i32imm:$shcnt),
+ !strconcat(OpcStr, " $rs, $shcnt, $rd"),
+ [(set VT:$rd, (OpNode VT:$rs, (i32 imm:$shcnt)))]>;
+}
diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td
index 90b698d..baefb06 100644
--- a/lib/Target/Sparc/SparcInstrInfo.td
+++ b/lib/Target/Sparc/SparcInstrInfo.td
@@ -21,6 +21,12 @@ include "SparcInstrFormats.td"
// Feature predicates.
//===----------------------------------------------------------------------===//
+// True when generating 32-bit code.
+def Is32Bit : Predicate<"!Subtarget.is64Bit()">;
+
+// True when generating 64-bit code. This also implies HasV9.
+def Is64Bit : Predicate<"Subtarget.is64Bit()">;
+
// HasV9 - This predicate is true when the target processor supports V9
// instructions. Note that the machine may be running in 32-bit mode.
def HasV9 : Predicate<"Subtarget.isV9()">;
@@ -58,22 +64,21 @@ def HI22 : SDNodeXForm<imm, [{
}]>;
def SETHIimm : PatLeaf<(imm), [{
- return (((unsigned)N->getZExtValue() >> 10) << 10) ==
- (unsigned)N->getZExtValue();
+ return isShiftedUInt<22, 10>(N->getZExtValue());
}], HI22>;
// Addressing modes.
-def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
-def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex], []>;
+def ADDRrr : ComplexPattern<iPTR, 2, "SelectADDRrr", [], []>;
+def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
// Address operands
-def MEMrr : Operand<i32> {
+def MEMrr : Operand<iPTR> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops IntRegs, IntRegs);
+ let MIOperandInfo = (ops ptr_rc, ptr_rc);
}
-def MEMri : Operand<i32> {
+def MEMri : Operand<iPTR> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops IntRegs, i32imm);
+ let MIOperandInfo = (ops ptr_rc, i32imm);
}
// Branch targets have OtherVT type.
@@ -98,6 +103,7 @@ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
def SPcmpicc : SDNode<"SPISD::CMPICC", SDTIntBinOp, [SDNPOutGlue]>;
def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbrxcc : SDNode<"SPISD::BRXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPhi : SDNode<"SPISD::Hi", SDTIntUnaryOp>;
@@ -107,6 +113,7 @@ def SPftoi : SDNode<"SPISD::FTOI", SDTSPFTOI>;
def SPitof : SDNode<"SPISD::ITOF", SDTSPITOF>;
def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>;
+def SPselectxcc : SDNode<"SPISD::SELECT_XCC", SDTSPselectcc, [SDNPInGlue]>;
def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInGlue]>;
// These are target-independent nodes, but have target-specific formats.
@@ -182,11 +189,11 @@ multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode> {
def rr : F3_1<2, Op3Val,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat(OpcStr, " $b, $c, $dst"),
- [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ [(set i32:$dst, (OpNode i32:$b, i32:$c))]>;
def ri : F3_2<2, Op3Val,
(outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c),
!strconcat(OpcStr, " $b, $c, $dst"),
- [(set IntRegs:$dst, (OpNode IntRegs:$b, simm13:$c))]>;
+ [(set i32:$dst, (OpNode i32:$b, (i32 simm13:$c)))]>;
}
/// F3_12np multiclass - Define a normal F3_1/F3_2 pattern in one shot, with no
@@ -243,10 +250,10 @@ let Predicates = [HasNoV9] in { // Only emit these in V8 mode.
"!FpMOVD $src, $dst", []>;
def FpNEGD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src),
"!FpNEGD $src, $dst",
- [(set DFPRegs:$dst, (fneg DFPRegs:$src))]>;
+ [(set f64:$dst, (fneg f64:$src))]>;
def FpABSD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src),
"!FpABSD $src, $dst",
- [(set DFPRegs:$dst, (fabs DFPRegs:$src))]>;
+ [(set f64:$dst, (fabs f64:$src))]>;
}
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
@@ -257,19 +264,16 @@ let Uses = [ICC], usesCustomInserter = 1 in {
def SELECT_CC_Int_ICC
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
"; SELECT_CC_Int_ICC PSEUDO!",
- [(set IntRegs:$dst, (SPselecticc IntRegs:$T, IntRegs:$F,
- imm:$Cond))]>;
+ [(set i32:$dst, (SPselecticc i32:$T, i32:$F, imm:$Cond))]>;
def SELECT_CC_FP_ICC
: Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
"; SELECT_CC_FP_ICC PSEUDO!",
- [(set FPRegs:$dst, (SPselecticc FPRegs:$T, FPRegs:$F,
- imm:$Cond))]>;
+ [(set f32:$dst, (SPselecticc f32:$T, f32:$F, imm:$Cond))]>;
def SELECT_CC_DFP_ICC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_ICC PSEUDO!",
- [(set DFPRegs:$dst, (SPselecticc DFPRegs:$T, DFPRegs:$F,
- imm:$Cond))]>;
+ [(set f64:$dst, (SPselecticc f64:$T, f64:$F, imm:$Cond))]>;
}
let usesCustomInserter = 1, Uses = [FCC] in {
@@ -277,19 +281,16 @@ let usesCustomInserter = 1, Uses = [FCC] in {
def SELECT_CC_Int_FCC
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
"; SELECT_CC_Int_FCC PSEUDO!",
- [(set IntRegs:$dst, (SPselectfcc IntRegs:$T, IntRegs:$F,
- imm:$Cond))]>;
+ [(set i32:$dst, (SPselectfcc i32:$T, i32:$F, imm:$Cond))]>;
def SELECT_CC_FP_FCC
: Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
"; SELECT_CC_FP_FCC PSEUDO!",
- [(set FPRegs:$dst, (SPselectfcc FPRegs:$T, FPRegs:$F,
- imm:$Cond))]>;
+ [(set f32:$dst, (SPselectfcc f32:$T, f32:$F, imm:$Cond))]>;
def SELECT_CC_DFP_FCC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_FCC PSEUDO!",
- [(set DFPRegs:$dst, (SPselectfcc DFPRegs:$T, DFPRegs:$F,
- imm:$Cond))]>;
+ [(set f64:$dst, (SPselectfcc f64:$T, f64:$F, imm:$Cond))]>;
}
@@ -309,111 +310,111 @@ let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
def LDSBrr : F3_1<3, 0b001001,
(outs IntRegs:$dst), (ins MEMrr:$addr),
"ldsb [$addr], $dst",
- [(set IntRegs:$dst, (sextloadi8 ADDRrr:$addr))]>;
+ [(set i32:$dst, (sextloadi8 ADDRrr:$addr))]>;
def LDSBri : F3_2<3, 0b001001,
(outs IntRegs:$dst), (ins MEMri:$addr),
"ldsb [$addr], $dst",
- [(set IntRegs:$dst, (sextloadi8 ADDRri:$addr))]>;
+ [(set i32:$dst, (sextloadi8 ADDRri:$addr))]>;
def LDSHrr : F3_1<3, 0b001010,
(outs IntRegs:$dst), (ins MEMrr:$addr),
"ldsh [$addr], $dst",
- [(set IntRegs:$dst, (sextloadi16 ADDRrr:$addr))]>;
+ [(set i32:$dst, (sextloadi16 ADDRrr:$addr))]>;
def LDSHri : F3_2<3, 0b001010,
(outs IntRegs:$dst), (ins MEMri:$addr),
"ldsh [$addr], $dst",
- [(set IntRegs:$dst, (sextloadi16 ADDRri:$addr))]>;
+ [(set i32:$dst, (sextloadi16 ADDRri:$addr))]>;
def LDUBrr : F3_1<3, 0b000001,
(outs IntRegs:$dst), (ins MEMrr:$addr),
"ldub [$addr], $dst",
- [(set IntRegs:$dst, (zextloadi8 ADDRrr:$addr))]>;
+ [(set i32:$dst, (zextloadi8 ADDRrr:$addr))]>;
def LDUBri : F3_2<3, 0b000001,
(outs IntRegs:$dst), (ins MEMri:$addr),
"ldub [$addr], $dst",
- [(set IntRegs:$dst, (zextloadi8 ADDRri:$addr))]>;
+ [(set i32:$dst, (zextloadi8 ADDRri:$addr))]>;
def LDUHrr : F3_1<3, 0b000010,
(outs IntRegs:$dst), (ins MEMrr:$addr),
"lduh [$addr], $dst",
- [(set IntRegs:$dst, (zextloadi16 ADDRrr:$addr))]>;
+ [(set i32:$dst, (zextloadi16 ADDRrr:$addr))]>;
def LDUHri : F3_2<3, 0b000010,
(outs IntRegs:$dst), (ins MEMri:$addr),
"lduh [$addr], $dst",
- [(set IntRegs:$dst, (zextloadi16 ADDRri:$addr))]>;
+ [(set i32:$dst, (zextloadi16 ADDRri:$addr))]>;
def LDrr : F3_1<3, 0b000000,
(outs IntRegs:$dst), (ins MEMrr:$addr),
"ld [$addr], $dst",
- [(set IntRegs:$dst, (load ADDRrr:$addr))]>;
+ [(set i32:$dst, (load ADDRrr:$addr))]>;
def LDri : F3_2<3, 0b000000,
(outs IntRegs:$dst), (ins MEMri:$addr),
"ld [$addr], $dst",
- [(set IntRegs:$dst, (load ADDRri:$addr))]>;
+ [(set i32:$dst, (load ADDRri:$addr))]>;
// Section B.2 - Load Floating-point Instructions, p. 92
def LDFrr : F3_1<3, 0b100000,
(outs FPRegs:$dst), (ins MEMrr:$addr),
"ld [$addr], $dst",
- [(set FPRegs:$dst, (load ADDRrr:$addr))]>;
+ [(set f32:$dst, (load ADDRrr:$addr))]>;
def LDFri : F3_2<3, 0b100000,
(outs FPRegs:$dst), (ins MEMri:$addr),
"ld [$addr], $dst",
- [(set FPRegs:$dst, (load ADDRri:$addr))]>;
+ [(set f32:$dst, (load ADDRri:$addr))]>;
def LDDFrr : F3_1<3, 0b100011,
(outs DFPRegs:$dst), (ins MEMrr:$addr),
"ldd [$addr], $dst",
- [(set DFPRegs:$dst, (load ADDRrr:$addr))]>;
+ [(set f64:$dst, (load ADDRrr:$addr))]>;
def LDDFri : F3_2<3, 0b100011,
(outs DFPRegs:$dst), (ins MEMri:$addr),
"ldd [$addr], $dst",
- [(set DFPRegs:$dst, (load ADDRri:$addr))]>;
+ [(set f64:$dst, (load ADDRri:$addr))]>;
// Section B.4 - Store Integer Instructions, p. 95
def STBrr : F3_1<3, 0b000101,
(outs), (ins MEMrr:$addr, IntRegs:$src),
"stb $src, [$addr]",
- [(truncstorei8 IntRegs:$src, ADDRrr:$addr)]>;
+ [(truncstorei8 i32:$src, ADDRrr:$addr)]>;
def STBri : F3_2<3, 0b000101,
(outs), (ins MEMri:$addr, IntRegs:$src),
"stb $src, [$addr]",
- [(truncstorei8 IntRegs:$src, ADDRri:$addr)]>;
+ [(truncstorei8 i32:$src, ADDRri:$addr)]>;
def STHrr : F3_1<3, 0b000110,
(outs), (ins MEMrr:$addr, IntRegs:$src),
"sth $src, [$addr]",
- [(truncstorei16 IntRegs:$src, ADDRrr:$addr)]>;
+ [(truncstorei16 i32:$src, ADDRrr:$addr)]>;
def STHri : F3_2<3, 0b000110,
(outs), (ins MEMri:$addr, IntRegs:$src),
"sth $src, [$addr]",
- [(truncstorei16 IntRegs:$src, ADDRri:$addr)]>;
+ [(truncstorei16 i32:$src, ADDRri:$addr)]>;
def STrr : F3_1<3, 0b000100,
(outs), (ins MEMrr:$addr, IntRegs:$src),
"st $src, [$addr]",
- [(store IntRegs:$src, ADDRrr:$addr)]>;
+ [(store i32:$src, ADDRrr:$addr)]>;
def STri : F3_2<3, 0b000100,
(outs), (ins MEMri:$addr, IntRegs:$src),
"st $src, [$addr]",
- [(store IntRegs:$src, ADDRri:$addr)]>;
+ [(store i32:$src, ADDRri:$addr)]>;
// Section B.5 - Store Floating-point Instructions, p. 97
def STFrr : F3_1<3, 0b100100,
(outs), (ins MEMrr:$addr, FPRegs:$src),
"st $src, [$addr]",
- [(store FPRegs:$src, ADDRrr:$addr)]>;
+ [(store f32:$src, ADDRrr:$addr)]>;
def STFri : F3_2<3, 0b100100,
(outs), (ins MEMri:$addr, FPRegs:$src),
"st $src, [$addr]",
- [(store FPRegs:$src, ADDRri:$addr)]>;
+ [(store f32:$src, ADDRri:$addr)]>;
def STDFrr : F3_1<3, 0b100111,
(outs), (ins MEMrr:$addr, DFPRegs:$src),
"std $src, [$addr]",
- [(store DFPRegs:$src, ADDRrr:$addr)]>;
+ [(store f64:$src, ADDRrr:$addr)]>;
def STDFri : F3_2<3, 0b100111,
(outs), (ins MEMri:$addr, DFPRegs:$src),
"std $src, [$addr]",
- [(store DFPRegs:$src, ADDRri:$addr)]>;
+ [(store f64:$src, ADDRri:$addr)]>;
// Section B.9 - SETHI Instruction, p. 104
def SETHIi: F2_1<0b100,
(outs IntRegs:$dst), (ins i32imm:$src),
"sethi $src, $dst",
- [(set IntRegs:$dst, SETHIimm:$src)]>;
+ [(set i32:$dst, SETHIimm:$src)]>;
// Section B.10 - NOP Instruction, p. 105
// (It's a special case of SETHI)
@@ -426,7 +427,7 @@ defm AND : F3_12<"and", 0b000001, and>;
def ANDNrr : F3_1<2, 0b000101,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
"andn $b, $c, $dst",
- [(set IntRegs:$dst, (and IntRegs:$b, (not IntRegs:$c)))]>;
+ [(set i32:$dst, (and i32:$b, (not i32:$c)))]>;
def ANDNri : F3_2<2, 0b000101,
(outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c),
"andn $b, $c, $dst", []>;
@@ -436,7 +437,7 @@ defm OR : F3_12<"or", 0b000010, or>;
def ORNrr : F3_1<2, 0b000110,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
"orn $b, $c, $dst",
- [(set IntRegs:$dst, (or IntRegs:$b, (not IntRegs:$c)))]>;
+ [(set i32:$dst, (or i32:$b, (not i32:$c)))]>;
def ORNri : F3_2<2, 0b000110,
(outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c),
"orn $b, $c, $dst", []>;
@@ -445,7 +446,7 @@ defm XOR : F3_12<"xor", 0b000011, xor>;
def XNORrr : F3_1<2, 0b000111,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
"xnor $b, $c, $dst",
- [(set IntRegs:$dst, (not (xor IntRegs:$b, IntRegs:$c)))]>;
+ [(set i32:$dst, (not (xor i32:$b, i32:$c)))]>;
def XNORri : F3_2<2, 0b000111,
(outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c),
"xnor $b, $c, $dst", []>;
@@ -462,7 +463,7 @@ defm ADD : F3_12<"add", 0b000000, add>;
def LEA_ADDri : F3_2<2, 0b000000,
(outs IntRegs:$dst), (ins MEMri:$addr),
"add ${addr:arith}, $dst",
- [(set IntRegs:$dst, ADDRri:$addr)]>;
+ [(set i32:$dst, ADDRri:$addr)]>;
let Defs = [ICC] in
defm ADDCC : F3_12<"addcc", 0b010000, addc>;
@@ -603,11 +604,11 @@ def FDTOI : F3_3<2, 0b110100, 0b011010010,
def FSTOD : F3_3<2, 0b110100, 0b011001001,
(outs DFPRegs:$dst), (ins FPRegs:$src),
"fstod $src, $dst",
- [(set DFPRegs:$dst, (fextend FPRegs:$src))]>;
+ [(set f64:$dst, (fextend f32:$src))]>;
def FDTOS : F3_3<2, 0b110100, 0b011000110,
(outs FPRegs:$dst), (ins DFPRegs:$src),
"fdtos $src, $dst",
- [(set FPRegs:$dst, (fround DFPRegs:$src))]>;
+ [(set f32:$dst, (fround f64:$src))]>;
// Floating-point Move Instructions, p. 144
def FMOVS : F3_3<2, 0b110100, 0b000000001,
@@ -616,22 +617,22 @@ def FMOVS : F3_3<2, 0b110100, 0b000000001,
def FNEGS : F3_3<2, 0b110100, 0b000000101,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fnegs $src, $dst",
- [(set FPRegs:$dst, (fneg FPRegs:$src))]>;
+ [(set f32:$dst, (fneg f32:$src))]>;
def FABSS : F3_3<2, 0b110100, 0b000001001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fabss $src, $dst",
- [(set FPRegs:$dst, (fabs FPRegs:$src))]>;
+ [(set f32:$dst, (fabs f32:$src))]>;
// Floating-point Square Root Instructions, p.145
def FSQRTS : F3_3<2, 0b110100, 0b000101001,
(outs FPRegs:$dst), (ins FPRegs:$src),
"fsqrts $src, $dst",
- [(set FPRegs:$dst, (fsqrt FPRegs:$src))]>;
+ [(set f32:$dst, (fsqrt f32:$src))]>;
def FSQRTD : F3_3<2, 0b110100, 0b000101010,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fsqrtd $src, $dst",
- [(set DFPRegs:$dst, (fsqrt DFPRegs:$src))]>;
+ [(set f64:$dst, (fsqrt f64:$src))]>;
@@ -639,42 +640,42 @@ def FSQRTD : F3_3<2, 0b110100, 0b000101010,
def FADDS : F3_3<2, 0b110100, 0b001000001,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fadds $src1, $src2, $dst",
- [(set FPRegs:$dst, (fadd FPRegs:$src1, FPRegs:$src2))]>;
+ [(set f32:$dst, (fadd f32:$src1, f32:$src2))]>;
def FADDD : F3_3<2, 0b110100, 0b001000010,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"faddd $src1, $src2, $dst",
- [(set DFPRegs:$dst, (fadd DFPRegs:$src1, DFPRegs:$src2))]>;
+ [(set f64:$dst, (fadd f64:$src1, f64:$src2))]>;
def FSUBS : F3_3<2, 0b110100, 0b001000101,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fsubs $src1, $src2, $dst",
- [(set FPRegs:$dst, (fsub FPRegs:$src1, FPRegs:$src2))]>;
+ [(set f32:$dst, (fsub f32:$src1, f32:$src2))]>;
def FSUBD : F3_3<2, 0b110100, 0b001000110,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fsubd $src1, $src2, $dst",
- [(set DFPRegs:$dst, (fsub DFPRegs:$src1, DFPRegs:$src2))]>;
+ [(set f64:$dst, (fsub f64:$src1, f64:$src2))]>;
// Floating-point Multiply and Divide Instructions, p. 147
def FMULS : F3_3<2, 0b110100, 0b001001001,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fmuls $src1, $src2, $dst",
- [(set FPRegs:$dst, (fmul FPRegs:$src1, FPRegs:$src2))]>;
+ [(set f32:$dst, (fmul f32:$src1, f32:$src2))]>;
def FMULD : F3_3<2, 0b110100, 0b001001010,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fmuld $src1, $src2, $dst",
- [(set DFPRegs:$dst, (fmul DFPRegs:$src1, DFPRegs:$src2))]>;
+ [(set f64:$dst, (fmul f64:$src1, f64:$src2))]>;
def FSMULD : F3_3<2, 0b110100, 0b001101001,
(outs DFPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fsmuld $src1, $src2, $dst",
- [(set DFPRegs:$dst, (fmul (fextend FPRegs:$src1),
- (fextend FPRegs:$src2)))]>;
+ [(set f64:$dst, (fmul (fextend f32:$src1),
+ (fextend f32:$src2)))]>;
def FDIVS : F3_3<2, 0b110100, 0b001001101,
(outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2),
"fdivs $src1, $src2, $dst",
- [(set FPRegs:$dst, (fdiv FPRegs:$src1, FPRegs:$src2))]>;
+ [(set f32:$dst, (fdiv f32:$src1, f32:$src2))]>;
def FDIVD : F3_3<2, 0b110100, 0b001001110,
(outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2),
"fdivd $src1, $src2, $dst",
- [(set DFPRegs:$dst, (fdiv DFPRegs:$src1, DFPRegs:$src2))]>;
+ [(set f64:$dst, (fdiv f64:$src1, f64:$src2))]>;
// Floating-point Compare Instructions, p. 148
// Note: the 2nd template arg is different for these guys.
@@ -685,11 +686,11 @@ let Defs = [FCC] in {
def FCMPS : F3_3<2, 0b110101, 0b001010001,
(outs), (ins FPRegs:$src1, FPRegs:$src2),
"fcmps $src1, $src2\n\tnop",
- [(SPcmpfcc FPRegs:$src1, FPRegs:$src2)]>;
+ [(SPcmpfcc f32:$src1, f32:$src2)]>;
def FCMPD : F3_3<2, 0b110101, 0b001010010,
(outs), (ins DFPRegs:$src1, DFPRegs:$src2),
"fcmpd $src1, $src2\n\tnop",
- [(SPcmpfcc DFPRegs:$src1, DFPRegs:$src2)]>;
+ [(SPcmpfcc f64:$src1, f64:$src2)]>;
}
//===----------------------------------------------------------------------===//
@@ -704,52 +705,45 @@ let Predicates = [HasV9], Constraints = "$T = $dst" in {
def MOVICCrr
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
"mov$cc %icc, $F, $dst",
- [(set IntRegs:$dst,
- (SPselecticc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
+ [(set i32:$dst, (SPselecticc i32:$F, i32:$T, imm:$cc))]>;
def MOVICCri
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
"mov$cc %icc, $F, $dst",
- [(set IntRegs:$dst,
- (SPselecticc simm11:$F, IntRegs:$T, imm:$cc))]>;
+ [(set i32:$dst, (SPselecticc simm11:$F, i32:$T, imm:$cc))]>;
}
let Uses = [FCC] in {
def MOVFCCrr
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
"mov$cc %fcc0, $F, $dst",
- [(set IntRegs:$dst,
- (SPselectfcc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
+ [(set i32:$dst, (SPselectfcc i32:$F, i32:$T, imm:$cc))]>;
def MOVFCCri
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
"mov$cc %fcc0, $F, $dst",
- [(set IntRegs:$dst,
- (SPselectfcc simm11:$F, IntRegs:$T, imm:$cc))]>;
+ [(set i32:$dst, (SPselectfcc simm11:$F, i32:$T, imm:$cc))]>;
}
let Uses = [ICC] in {
def FMOVS_ICC
: Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
"fmovs$cc %icc, $F, $dst",
- [(set FPRegs:$dst,
- (SPselecticc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
+ [(set f32:$dst,
+ (SPselecticc f32:$F, f32:$T, imm:$cc))]>;
def FMOVD_ICC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
"fmovd$cc %icc, $F, $dst",
- [(set DFPRegs:$dst,
- (SPselecticc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
+ [(set f64:$dst, (SPselecticc f64:$F, f64:$T, imm:$cc))]>;
}
let Uses = [FCC] in {
def FMOVS_FCC
: Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
"fmovs$cc %fcc0, $F, $dst",
- [(set FPRegs:$dst,
- (SPselectfcc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
+ [(set f32:$dst, (SPselectfcc f32:$F, f32:$T, imm:$cc))]>;
def FMOVD_FCC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
"fmovd$cc %fcc0, $F, $dst",
- [(set DFPRegs:$dst,
- (SPselectfcc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
+ [(set f64:$dst, (SPselectfcc f64:$F, f64:$T, imm:$cc))]>;
}
}
@@ -762,11 +756,11 @@ let Predicates = [HasV9] in {
def FNEGD : F3_3<2, 0b110100, 0b000000110,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fnegd $src, $dst",
- [(set DFPRegs:$dst, (fneg DFPRegs:$src))]>;
+ [(set f64:$dst, (fneg f64:$src))]>;
def FABSD : F3_3<2, 0b110100, 0b000001010,
(outs DFPRegs:$dst), (ins DFPRegs:$src),
"fabsd $src, $dst",
- [(set DFPRegs:$dst, (fabs DFPRegs:$src))]>;
+ [(set f64:$dst, (fabs f64:$src))]>;
}
// POPCrr - This does a ctpop of a 64-bit register. As such, we have to clear
@@ -774,8 +768,8 @@ let Predicates = [HasV9] in {
def POPCrr : F3_1<2, 0b101110,
(outs IntRegs:$dst), (ins IntRegs:$src),
"popc $src, $dst", []>, Requires<[HasV9]>;
-def : Pat<(ctpop IntRegs:$src),
- (POPCrr (SLLri IntRegs:$src, 0))>;
+def : Pat<(ctpop i32:$src),
+ (POPCrr (SLLri $src, 0))>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -783,28 +777,26 @@ def : Pat<(ctpop IntRegs:$src),
// Small immediates.
def : Pat<(i32 simm13:$val),
- (ORri G0, imm:$val)>;
+ (ORri (i32 G0), imm:$val)>;
// Arbitrary immediates.
def : Pat<(i32 imm:$val),
(ORri (SETHIi (HI22 imm:$val)), (LO10 imm:$val))>;
// subc
-def : Pat<(subc IntRegs:$b, IntRegs:$c),
- (SUBCCrr IntRegs:$b, IntRegs:$c)>;
-def : Pat<(subc IntRegs:$b, simm13:$val),
- (SUBCCri IntRegs:$b, imm:$val)>;
+def : Pat<(subc i32:$b, i32:$c),
+ (SUBCCrr $b, $c)>;
+def : Pat<(subc i32:$b, simm13:$val),
+ (SUBCCri $b, imm:$val)>;
// Global addresses, constant pool entries
def : Pat<(SPhi tglobaladdr:$in), (SETHIi tglobaladdr:$in)>;
-def : Pat<(SPlo tglobaladdr:$in), (ORri G0, tglobaladdr:$in)>;
+def : Pat<(SPlo tglobaladdr:$in), (ORri (i32 G0), tglobaladdr:$in)>;
def : Pat<(SPhi tconstpool:$in), (SETHIi tconstpool:$in)>;
-def : Pat<(SPlo tconstpool:$in), (ORri G0, tconstpool:$in)>;
+def : Pat<(SPlo tconstpool:$in), (ORri (i32 G0), tconstpool:$in)>;
// Add reg, lo. This is used when taking the addr of a global/constpool entry.
-def : Pat<(add IntRegs:$r, (SPlo tglobaladdr:$in)),
- (ADDri IntRegs:$r, tglobaladdr:$in)>;
-def : Pat<(add IntRegs:$r, (SPlo tconstpool:$in)),
- (ADDri IntRegs:$r, tconstpool:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tglobaladdr:$in)), (ADDri $r, tglobaladdr:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tconstpool:$in)), (ADDri $r, tconstpool:$in)>;
// Calls:
def : Pat<(call tglobaladdr:$dst),
@@ -823,3 +815,5 @@ def : Pat<(i32 (extloadi16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
// zextload bool -> zextload byte
def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+
+include "SparcInstr64Bit.td"
diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp
index 25e90b7..3af4c61 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -56,6 +56,12 @@ BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
+const TargetRegisterClass*
+SparcRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ return Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
+}
+
void
SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
@@ -68,8 +74,9 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Addressable stack objects are accessed using neg. offsets from %fp
MachineFunction &MF = *MI.getParent()->getParent();
- int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
- MI.getOperand(FIOperandNum + 1).getImm();
+ int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
+ MI.getOperand(FIOperandNum + 1).getImm() +
+ Subtarget.getStackPointerBias();
// Replace frame index with a frame pointer reference.
if (Offset >= -4096 && Offset <= 4095) {
diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h
index b53a1ed..f91df53 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/lib/Target/Sparc/SparcRegisterInfo.h
@@ -36,6 +36,9 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const;
+ const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const;
+
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = NULL) const;
diff --git a/lib/Target/Sparc/SparcRegisterInfo.td b/lib/Target/Sparc/SparcRegisterInfo.td
index 81bff6c..497e7c5 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/lib/Target/Sparc/SparcRegisterInfo.td
@@ -43,7 +43,7 @@ class Rd<bits<5> num, string n, list<Register> subregs> : SparcReg<n> {
}
// Control Registers
-def ICC : SparcCtrlReg<"ICC">;
+def ICC : SparcCtrlReg<"ICC">; // This represents icc and xcc in 64-bit code.
def FCC : SparcCtrlReg<"FCC">;
// Y register
@@ -140,7 +140,10 @@ def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>;
// FIXME: the register order should be defined in terms of the preferred
// allocation order...
//
-def IntRegs : RegisterClass<"SP", [i32], 32,
+// This register class should not be used to hold i64 values, use the I64Regs
+// register class for that. The i64 type is included here to allow i64 patterns
+// using the integer instructions.
+def IntRegs : RegisterClass<"SP", [i32, i64], 32,
(add L0, L1, L2, L3, L4, L5, L6,
L7, I0, I1, I2, I3, I4, I5,
O0, O1, O2, O3, O4, O5, O7,
@@ -155,6 +158,13 @@ def IntRegs : RegisterClass<"SP", [i32], 32,
G5, G6, G7 // reserved for kernel
)>;
+// Register class for 64-bit mode, with a 64-bit spill slot size.
+// These are the same as the 32-bit registers, so TableGen will consider this
+// to be a sub-class of IntRegs. That works out because requiring a 64-bit
+// spill slot is a stricter constraint than only requiring a 32-bit spill slot.
+def I64Regs : RegisterClass<"SP", [i64], 64, (add IntRegs)>;
+
+// Floating point register classes.
def FPRegs : RegisterClass<"SP", [f32], 32, (sequence "F%u", 0, 31)>;
def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 15)>;
diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h
index a81931b..b94dd11 100644
--- a/lib/Target/Sparc/SparcSubtarget.h
+++ b/lib/Target/Sparc/SparcSubtarget.h
@@ -52,6 +52,12 @@ public:
}
return std::string(p);
}
+
+ /// The 64-bit ABI uses biased stack and frame pointers, so the stack frame
+ /// of the current function is the area from [%sp+BIAS] to [%fp+BIAS].
+ int64_t getStackPointerBias() const {
+ return is64Bit() ? 2047 : 0;
+ }
};
} // end namespace llvm