diff options
author | Andrew Trick <atrick@apple.com> | 2013-10-31 22:11:56 +0000 |
---|---|---|
committer | Andrew Trick <atrick@apple.com> | 2013-10-31 22:11:56 +0000 |
commit | 3d74dea4bddc84d1881efc21eb5eefbddbfa9aed (patch) | |
tree | 76963fde5a977fb0fa931940c4db9351d916d0e4 /lib/Target/X86/X86MCInstLower.cpp | |
parent | 53446e50a08451df106f76205c6081bf5a103c9f (diff) | |
download | external_llvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.zip external_llvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.tar.gz external_llvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.tar.bz2 |
Add support for stack map generation in the X86 backend.
Originally implemented by Lang Hames.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193811 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86MCInstLower.cpp')
-rw-r--r-- | lib/Target/X86/X86MCInstLower.cpp | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index bd2be3e..04d1ad5 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -17,6 +17,7 @@ #include "X86COFFMachineModuleInfo.h" #include "llvm/ADT/SmallString.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/IR/Type.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" @@ -686,6 +687,123 @@ static void LowerTlsAddr(MCStreamer &OutStreamer, .addExpr(tlsRef)); } +static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator> +parseMemoryOperand(StackMaps::Location::LocationType LocTy, + MachineInstr::const_mop_iterator MOI, + MachineInstr::const_mop_iterator MOE) { + + typedef StackMaps::Location Location; + + assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op."); + + const MachineOperand &Base = *MOI; + const MachineOperand &Scale = *(++MOI); + const MachineOperand &Index = *(++MOI); + const MachineOperand &Disp = *(++MOI); + const MachineOperand &ZeroReg = *(++MOI); + + // Sanity check for supported operand format. + assert(Base.isReg() && + Scale.isImm() && Scale.getImm() == 1 && + Index.isReg() && Index.getReg() == 0 && + Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) && + "Unsupported x86 memory operand sequence."); + + return std::make_pair( + Location(LocTy, Base.getReg(), Disp.getImm()), ++MOI); +} + +std::pair<StackMaps::Location, MachineInstr::const_mop_iterator> +X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI, + MachineInstr::const_mop_iterator MOE) { + + typedef StackMaps::Location Location; + + const MachineOperand &MOP = *MOI; + assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) && + "Register mask and implicit operands should not be processed."); + + if (MOP.isImm()) { + switch (MOP.getImm()) { + default: llvm_unreachable("Unrecognized operand type."); + case StackMaps::DirectMemRefOp: + return parseMemoryOperand(StackMaps::Location::Direct, + llvm::next(MOI), MOE); + case StackMaps::IndirectMemRefOp: + return parseMemoryOperand(StackMaps::Location::Indirect, + llvm::next(MOI), MOE); + case StackMaps::ConstantOp: { + ++MOI; + assert(MOI->isImm() && "Expected constant operand."); + int64_t Imm = MOI->getImm(); + return std::make_pair(Location(Location::Constant, 0, Imm), ++MOI); + } + } + } + + // Otherwise this is a reg operand. + assert(MOP.isReg() && "Expected register operand here."); + assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) && + "Virtreg operands should have been rewritten before now."); + return std::make_pair(Location(Location::Register, MOP.getReg(), 0), ++MOI); +} + +static MachineInstr::const_mop_iterator +getStackMapEndMOP(MachineInstr::const_mop_iterator MOI, + MachineInstr::const_mop_iterator MOE) { + for (; MOI != MOE; ++MOI) + if (MOI->isRegMask() || (MOI->isReg() && MOI->isImplicit())) + break; + + return MOI; +} + +static void LowerSTACKMAP(MCStreamer &OutStreamer, + X86MCInstLower &MCInstLowering, + StackMaps &SM, + const MachineInstr &MI) +{ + int64_t ID = MI.getOperand(0).getImm(); + unsigned NumNOPBytes = MI.getOperand(1).getImm(); + + assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs"); + SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), 2), + getStackMapEndMOP(MI.operands_begin(), MI.operands_end())); + // Emit padding. + for (unsigned i = 0; i < NumNOPBytes; ++i) + OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP)); +} + +static void LowerPATCHPOINT(MCStreamer &OutStreamer, + X86MCInstLower &MCInstLowering, + StackMaps &SM, + const MachineInstr &MI) +{ + int64_t ID = MI.getOperand(0).getImm(); + assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs"); + + // Get the number of arguments participating in the call. This number was + // adjusted during call lowering by subtracting stack args. + int64_t StackMapIdx = MI.getOperand(3).getImm() + 4; + assert(StackMapIdx <= MI.getNumOperands() && "Patchpoint dropped args."); + + SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), StackMapIdx), + getStackMapEndMOP(MI.operands_begin(), MI.operands_end())); + + // Emit call. We need to know how many bytes we encoded here. + unsigned EncodedBytes = 2; + OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r) + .addReg(MI.getOperand(2).getReg())); + + // Emit padding. + unsigned NumNOPBytes = MI.getOperand(1).getImm(); + assert(NumNOPBytes >= EncodedBytes && + "Patchpoint can't request size less than the length of a call."); + + for (unsigned i = EncodedBytes; i < NumNOPBytes; ++i) + OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP)); +} + void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { X86MCInstLower MCInstLowering(*MF, *this); switch (MI->getOpcode()) { @@ -775,6 +893,12 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { .addExpr(DotExpr)); return; } + + case TargetOpcode::STACKMAP: + return LowerSTACKMAP(OutStreamer, MCInstLowering, SM, *MI); + + case TargetOpcode::PATCHPOINT: + return LowerPATCHPOINT(OutStreamer, MCInstLowering, SM, *MI); } MCInst TmpInst; |