diff options
author | Juergen Ributzka <juergen@apple.com> | 2013-11-08 23:28:16 +0000 |
---|---|---|
committer | Juergen Ributzka <juergen@apple.com> | 2013-11-08 23:28:16 +0000 |
commit | 623d2e618f4e672c47edff9ec63ed6d733ac81d3 (patch) | |
tree | b979de9c381f0ca66085b02e248b2fe3b9c50966 /lib/Target/X86 | |
parent | d900b1179535298510490030a5d2ecce93f79eb0 (diff) | |
download | external_llvm-623d2e618f4e672c47edff9ec63ed6d733ac81d3.zip external_llvm-623d2e618f4e672c47edff9ec63ed6d733ac81d3.tar.gz external_llvm-623d2e618f4e672c47edff9ec63ed6d733ac81d3.tar.bz2 |
[Stackmap] Add AnyReg calling convention support for patchpoint intrinsic.
The idea of the AnyReg Calling Convention is to provide the call arguments in
registers, but not to force them to be placed in a paticular order into a
specified set of registers. Instead it is up tp the register allocator to assign
any register as it sees fit. The same applies to the return value (if
applicable).
Differential Revision: http://llvm-reviews.chandlerc.com/D2009
Reviewed by Andy
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194293 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r-- | lib/Target/X86/X86CallingConv.h | 35 | ||||
-rw-r--r-- | lib/Target/X86/X86CallingConv.td | 23 | ||||
-rw-r--r-- | lib/Target/X86/X86FastISel.cpp | 1 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 1 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 10 | ||||
-rw-r--r-- | lib/Target/X86/X86MCInstLower.cpp | 35 | ||||
-rw-r--r-- | lib/Target/X86/X86RegisterInfo.cpp | 6 |
7 files changed, 101 insertions, 10 deletions
diff --git a/lib/Target/X86/X86CallingConv.h b/lib/Target/X86/X86CallingConv.h new file mode 100644 index 0000000..e76f9fd --- /dev/null +++ b/lib/Target/X86/X86CallingConv.h @@ -0,0 +1,35 @@ +//=== X86CallingConv.h - X86 Custom Calling Convention Routines -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the custom routines for the X86 Calling Convention that +// aren't done by tablegen. +// +//===----------------------------------------------------------------------===// + +#ifndef X86CALLINGCONV_H +#define X86CALLINGCONV_H + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/IR/CallingConv.h" + +namespace llvm { + +inline bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &, + CCValAssign::LocInfo &, ISD::ArgFlagsTy &, + CCState &) { + llvm_unreachable("The AnyReg calling convention is only supported by the " \ + "stackmap and patchpoint intrinsics."); + // gracefully fallback to X86 C calling convention on Release builds. + return false; +} + +} // End llvm namespace + +#endif + diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index f5c8d9f..a78b5c0 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -160,6 +160,17 @@ def RetCC_X86_64_WebKit_JS : CallingConv<[ CCIfType<[i64], CCAssignToReg<[RAX]>> ]>; +// X86-64 AnyReg return-value convention. No explicit register is specified for +// the return-value. The register allocator is allowed and expected to choose +// any free register. +// +// This calling convention is currently only supported by the stackmap and +// patchpoint intrinsics. All other uses will result in an assert on Debug +// builds. On Release builds we fallback to the X86 C calling convention. +def RetCC_X86_64_AnyReg : CallingConv<[ + CCCustom<"CC_X86_AnyReg_Error"> +]>; + // This is the root return-value convention for the X86-32 backend. def RetCC_X86_32 : CallingConv<[ // If FastCC, use RetCC_X86_32_Fast. @@ -178,6 +189,7 @@ def RetCC_X86_64 : CallingConv<[ // Handle JavaScript calls. CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>, + CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>, // Handle explicit CC selection CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>, @@ -350,6 +362,16 @@ def CC_X86_64_WebKit_JS : CallingConv<[ CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>> ]>; +// No explicit register is specified for the AnyReg calling convention. The +// register allocator may assign the arguments to any free register. +// +// This calling convention is currently only supported by the stackmap and +// patchpoint intrinsics. All other uses will result in an assert on Debug +// builds. On Release builds we fallback to the X86 C calling convention. +def CC_X86_64_AnyReg : CallingConv<[ + CCCustom<"CC_X86_AnyReg_Error"> +]>; + //===----------------------------------------------------------------------===// // X86 C Calling Convention //===----------------------------------------------------------------------===// @@ -542,6 +564,7 @@ def CC_X86_64 : CallingConv<[ CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>, CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>, CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>, + CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>, CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>, CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>, diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 7984e76..928dea9 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -14,6 +14,7 @@ //===----------------------------------------------------------------------===// #include "X86.h" +#include "X86CallingConv.h" #include "X86ISelLowering.h" #include "X86InstrBuilder.h" #include "X86RegisterInfo.h" diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 86ad262..55bfab4 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16,6 +16,7 @@ #include "X86ISelLowering.h" #include "Utils/X86ShuffleDecode.h" #include "X86.h" +#include "X86CallingConv.h" #include "X86InstrBuilder.h" #include "X86TargetMachine.h" #include "X86TargetObjectFile.h" diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 369b031..b81b244 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -4198,14 +4198,20 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, const SmallVectorImpl<unsigned> &Ops, int FrameIndex, const TargetInstrInfo &TII) { + bool hasDef = MI->getOperand(0).isReg() && MI->getOperand(0).isDef() && + !MI->getOperand(0).isImplicit(); + unsigned StartIdx = hasDef ? 1 : 0; + MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true); MachineInstrBuilder MIB(MF, NewMI); bool isPatchPoint = MI->getOpcode() == TargetOpcode::PATCHPOINT; - unsigned StartIdx = isPatchPoint ? MI->getOperand(3).getImm() + 4 : 2; + StartIdx = isPatchPoint ? + StartIdx + MI->getOperand(StartIdx+3).getImm() + 5 : + StartIdx + 2; - // No need to fold the meta data and function arguments + // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) MIB.addOperand(MI->getOperand(i)); diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index 92a1118..fa15114 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -780,26 +780,45 @@ static void LowerSTACKMAP(MCStreamer &OutStreamer, static void LowerPATCHPOINT(MCStreamer &OutStreamer, X86MCInstLower &MCInstLowering, StackMaps &SM, - const MachineInstr &MI) -{ - int64_t ID = MI.getOperand(0).getImm(); + const MachineInstr &MI) { + bool hasDef = MI.getOperand(0).isReg() && MI.getOperand(0).isDef() && + !MI.getOperand(0).isImplicit(); + unsigned StartIdx = hasDef ? 1 : 0; +#ifndef NDEBUG + unsigned StartIdx2 = 0, e = MI.getNumOperands(); + while (StartIdx2 < e && MI.getOperand(StartIdx2).isReg() && + MI.getOperand(StartIdx2).isDef() && + !MI.getOperand(StartIdx2).isImplicit()) + ++StartIdx2; + + assert(StartIdx == StartIdx2 && + "Unexpected additonal definition in Patchpoint intrinsic."); +#endif + + int64_t ID = MI.getOperand(StartIdx).getImm(); assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs"); // Get the number of arguments participating in the call. This number was // adjusted during call lowering by subtracting stack args. - int64_t StackMapIdx = MI.getOperand(3).getImm() + 4; - assert(StackMapIdx <= MI.getNumOperands() && "Patchpoint dropped args."); + bool isAnyRegCC = MI.getOperand(StartIdx + 4).getImm() == CallingConv::AnyReg; + assert(((hasDef && isAnyRegCC) || !hasDef) && + "Only Patchpoints with AnyReg calling convention may have a result"); + int64_t StackMapIdx = isAnyRegCC ? StartIdx + 5 : + StartIdx + 5 + MI.getOperand(StartIdx + 3).getImm(); + assert(StackMapIdx <= MI.getNumOperands() && + "Patchpoint intrinsic dropped arguments."); SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), StackMapIdx), - getStackMapEndMOP(MI.operands_begin(), MI.operands_end())); + getStackMapEndMOP(MI.operands_begin(), MI.operands_end()), + isAnyRegCC && hasDef); // Emit call. We need to know how many bytes we encoded here. unsigned EncodedBytes = 2; OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r) - .addReg(MI.getOperand(2).getReg())); + .addReg(MI.getOperand(StartIdx + 2).getReg())); // Emit padding. - unsigned NumNOPBytes = MI.getOperand(1).getImm(); + unsigned NumNOPBytes = MI.getOperand(StartIdx + 1).getImm(); assert(NumNOPBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call."); diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 0cb9ac3..7598715 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -239,6 +239,10 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { case CallingConv::HiPE: return CSR_NoRegs_SaveList; + case CallingConv::WebKit_JS: + case CallingConv::AnyReg: + return CSR_MostRegs_64_SaveList; + case CallingConv::Intel_OCL_BI: { bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512(); @@ -296,6 +300,8 @@ X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { } if (CC == CallingConv::GHC || CC == CallingConv::HiPE) return CSR_NoRegs_RegMask; + if (CC == CallingConv::WebKit_JS || CC == CallingConv::AnyReg) + return CSR_MostRegs_64_RegMask; if (!Is64Bit) return CSR_32_RegMask; if (CC == CallingConv::Cold) |