aboutsummaryrefslogtreecommitdiffstats
path: root/lib/ExecutionEngine
diff options
context:
space:
mode:
authorShih-wei Liao <sliao@google.com>2010-02-10 11:10:31 -0800
committerShih-wei Liao <sliao@google.com>2010-02-10 11:10:31 -0800
commite264f62ca09a8f65c87a46d562a4d0f9ec5d457e (patch)
tree59e3d57ef656cef79afa708ae0a3daf25cd91fcf /lib/ExecutionEngine
downloadexternal_llvm-e264f62ca09a8f65c87a46d562a4d0f9ec5d457e.zip
external_llvm-e264f62ca09a8f65c87a46d562a4d0f9ec5d457e.tar.gz
external_llvm-e264f62ca09a8f65c87a46d562a4d0f9ec5d457e.tar.bz2
Check in LLVM r95781.
Diffstat (limited to 'lib/ExecutionEngine')
-rw-r--r--lib/ExecutionEngine/CMakeLists.txt4
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp1079
-rw-r--r--lib/ExecutionEngine/ExecutionEngineBindings.cpp209
-rw-r--r--lib/ExecutionEngine/Interpreter/CMakeLists.txt5
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp1352
-rw-r--r--lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp490
-rw-r--r--lib/ExecutionEngine/Interpreter/Interpreter.cpp98
-rw-r--r--lib/ExecutionEngine/Interpreter/Interpreter.h244
-rw-r--r--lib/ExecutionEngine/Interpreter/Makefile13
-rw-r--r--lib/ExecutionEngine/JIT/CMakeLists.txt13
-rw-r--r--lib/ExecutionEngine/JIT/Intercept.cpp149
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp741
-rw-r--r--lib/ExecutionEngine/JIT/JIT.h211
-rw-r--r--lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp209
-rw-r--r--lib/ExecutionEngine/JIT/JITDebugRegisterer.h116
-rw-r--r--lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp1048
-rw-r--r--lib/ExecutionEngine/JIT/JITDwarfEmitter.h87
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp1609
-rw-r--r--lib/ExecutionEngine/JIT/JITMemoryManager.cpp729
-rw-r--r--lib/ExecutionEngine/JIT/Makefile38
-rw-r--r--lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp174
-rw-r--r--lib/ExecutionEngine/JIT/TargetSelect.cpp91
-rw-r--r--lib/ExecutionEngine/Makefile13
23 files changed, 8722 insertions, 0 deletions
diff --git a/lib/ExecutionEngine/CMakeLists.txt b/lib/ExecutionEngine/CMakeLists.txt
new file mode 100644
index 0000000..0e118cc
--- /dev/null
+++ b/lib/ExecutionEngine/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_llvm_library(LLVMExecutionEngine
+ ExecutionEngine.cpp
+ ExecutionEngineBindings.cpp
+ )
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
new file mode 100644
index 0000000..3e684e1
--- /dev/null
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -0,0 +1,1079 @@
+//===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interface used by the various execution engine
+// subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/System/Host.h"
+#include "llvm/Target/TargetData.h"
+#include <cmath>
+#include <cstring>
+using namespace llvm;
+
+STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
+STATISTIC(NumGlobals , "Number of global vars initialized");
+
+ExecutionEngine *(*ExecutionEngine::JITCtor)(
+ Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) = 0;
+ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
+ std::string *ErrorStr) = 0;
+ExecutionEngine::EERegisterFn ExecutionEngine::ExceptionTableRegister = 0;
+
+
+ExecutionEngine::ExecutionEngine(Module *M)
+ : EEState(*this),
+ LazyFunctionCreator(0) {
+ CompilingLazily = false;
+ GVCompilationDisabled = false;
+ SymbolSearchingDisabled = false;
+ Modules.push_back(M);
+ assert(M && "Module is null?");
+}
+
+ExecutionEngine::~ExecutionEngine() {
+ clearAllGlobalMappings();
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i)
+ delete Modules[i];
+}
+
+char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
+ return new char[GVSize];
+}
+
+/// removeModule - Remove a Module from the list of modules.
+bool ExecutionEngine::removeModule(Module *M) {
+ for(SmallVector<Module *, 1>::iterator I = Modules.begin(),
+ E = Modules.end(); I != E; ++I) {
+ Module *Found = *I;
+ if (Found == M) {
+ Modules.erase(I);
+ clearGlobalMappingsFromModule(M);
+ return true;
+ }
+ }
+ return false;
+}
+
+/// FindFunctionNamed - Search all of the active modules to find the one that
+/// defines FnName. This is very slow operation and shouldn't be used for
+/// general code.
+Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ if (Function *F = Modules[i]->getFunction(FnName))
+ return F;
+ }
+ return 0;
+}
+
+
+void *ExecutionEngineState::RemoveMapping(
+ const MutexGuard &, const GlobalValue *ToUnmap) {
+ GlobalAddressMapTy::iterator I = GlobalAddressMap.find(ToUnmap);
+ void *OldVal;
+ if (I == GlobalAddressMap.end())
+ OldVal = 0;
+ else {
+ OldVal = I->second;
+ GlobalAddressMap.erase(I);
+ }
+
+ GlobalAddressReverseMap.erase(OldVal);
+ return OldVal;
+}
+
+/// addGlobalMapping - Tell the execution engine that the specified global is
+/// at the specified location. This is used internally as functions are JIT'd
+/// and as global variables are laid out in memory. It can and should also be
+/// used by clients of the EE that want to have an LLVM global overlay
+/// existing data in memory.
+void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
+ MutexGuard locked(lock);
+
+ DEBUG(dbgs() << "JIT: Map \'" << GV->getName()
+ << "\' to [" << Addr << "]\n";);
+ void *&CurVal = EEState.getGlobalAddressMap(locked)[GV];
+ assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too
+ if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ AssertingVH<const GlobalValue> &V =
+ EEState.getGlobalAddressReverseMap(locked)[Addr];
+ assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ V = GV;
+ }
+}
+
+/// clearAllGlobalMappings - Clear all global mappings and start over again
+/// use in dynamic compilation scenarios when you want to move globals
+void ExecutionEngine::clearAllGlobalMappings() {
+ MutexGuard locked(lock);
+
+ EEState.getGlobalAddressMap(locked).clear();
+ EEState.getGlobalAddressReverseMap(locked).clear();
+}
+
+/// clearGlobalMappingsFromModule - Clear all global mappings that came from a
+/// particular module, because it has been removed from the JIT.
+void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
+ MutexGuard locked(lock);
+
+ for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) {
+ EEState.RemoveMapping(locked, FI);
+ }
+ for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
+ GI != GE; ++GI) {
+ EEState.RemoveMapping(locked, GI);
+ }
+}
+
+/// updateGlobalMapping - Replace an existing mapping for GV with a new
+/// address. This updates both maps as required. If "Addr" is null, the
+/// entry for the global is removed from the mappings.
+void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
+ MutexGuard locked(lock);
+
+ ExecutionEngineState::GlobalAddressMapTy &Map =
+ EEState.getGlobalAddressMap(locked);
+
+ // Deleting from the mapping?
+ if (Addr == 0) {
+ return EEState.RemoveMapping(locked, GV);
+ }
+
+ void *&CurVal = Map[GV];
+ void *OldVal = CurVal;
+
+ if (CurVal && !EEState.getGlobalAddressReverseMap(locked).empty())
+ EEState.getGlobalAddressReverseMap(locked).erase(CurVal);
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too
+ if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ AssertingVH<const GlobalValue> &V =
+ EEState.getGlobalAddressReverseMap(locked)[Addr];
+ assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ V = GV;
+ }
+ return OldVal;
+}
+
+/// getPointerToGlobalIfAvailable - This returns the address of the specified
+/// global value if it is has already been codegen'd, otherwise it returns null.
+///
+void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
+ MutexGuard locked(lock);
+
+ ExecutionEngineState::GlobalAddressMapTy::iterator I =
+ EEState.getGlobalAddressMap(locked).find(GV);
+ return I != EEState.getGlobalAddressMap(locked).end() ? I->second : 0;
+}
+
+/// getGlobalValueAtAddress - Return the LLVM global value object that starts
+/// at the specified address.
+///
+const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
+ MutexGuard locked(lock);
+
+ // If we haven't computed the reverse mapping yet, do so first.
+ if (EEState.getGlobalAddressReverseMap(locked).empty()) {
+ for (ExecutionEngineState::GlobalAddressMapTy::iterator
+ I = EEState.getGlobalAddressMap(locked).begin(),
+ E = EEState.getGlobalAddressMap(locked).end(); I != E; ++I)
+ EEState.getGlobalAddressReverseMap(locked).insert(std::make_pair(I->second,
+ I->first));
+ }
+
+ std::map<void *, AssertingVH<const GlobalValue> >::iterator I =
+ EEState.getGlobalAddressReverseMap(locked).find(Addr);
+ return I != EEState.getGlobalAddressReverseMap(locked).end() ? I->second : 0;
+}
+
+// CreateArgv - Turn a vector of strings into a nice argv style array of
+// pointers to null terminated strings.
+//
+static void *CreateArgv(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv) {
+ unsigned PtrSize = EE->getTargetData()->getPointerSize();
+ char *Result = new char[(InputArgv.size()+1)*PtrSize];
+
+ DEBUG(dbgs() << "JIT: ARGV = " << (void*)Result << "\n");
+ const Type *SBytePtr = Type::getInt8PtrTy(C);
+
+ for (unsigned i = 0; i != InputArgv.size(); ++i) {
+ unsigned Size = InputArgv[i].size()+1;
+ char *Dest = new char[Size];
+ DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void*)Dest << "\n");
+
+ std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest);
+ Dest[Size-1] = 0;
+
+ // Endian safe: Result[i] = (PointerTy)Dest;
+ EE->StoreValueToMemory(PTOGV(Dest), (GenericValue*)(Result+i*PtrSize),
+ SBytePtr);
+ }
+
+ // Null terminate it
+ EE->StoreValueToMemory(PTOGV(0),
+ (GenericValue*)(Result+InputArgv.size()*PtrSize),
+ SBytePtr);
+ return Result;
+}
+
+
+/// runStaticConstructorsDestructors - This method is used to execute all of
+/// the static constructors or destructors for a module, depending on the
+/// value of isDtors.
+void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
+ bool isDtors) {
+ const char *Name = isDtors ? "llvm.global_dtors" : "llvm.global_ctors";
+
+ // Execute global ctors/dtors for each module in the program.
+
+ GlobalVariable *GV = module->getNamedGlobal(Name);
+
+ // If this global has internal linkage, or if it has a use, then it must be
+ // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
+ // this is the case, don't execute any of the global ctors, __main will do
+ // it.
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
+
+ // Should be an array of '{ int, void ()* }' structs. The first value is
+ // the init priority, which we ignore.
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList) return;
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
+ if (ConstantStruct *CS =
+ dyn_cast<ConstantStruct>(InitList->getOperand(i))) {
+ if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
+
+ Constant *FP = CS->getOperand(1);
+ if (FP->isNullValue())
+ break; // Found a null terminator, exit.
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+ if (Function *F = dyn_cast<Function>(FP)) {
+ // Execute the ctor/dtor function!
+ runFunction(F, std::vector<GenericValue>());
+ }
+ }
+}
+
+/// runStaticConstructorsDestructors - This method is used to execute all of
+/// the static constructors or destructors for a program, depending on the
+/// value of isDtors.
+void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m)
+ runStaticConstructorsDestructors(Modules[m], isDtors);
+}
+
+#ifndef NDEBUG
+/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
+static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
+ unsigned PtrSize = EE->getTargetData()->getPointerSize();
+ for (unsigned i = 0; i < PtrSize; ++i)
+ if (*(i + (uint8_t*)Loc))
+ return false;
+ return true;
+}
+#endif
+
+/// runFunctionAsMain - This is a helper function which wraps runFunction to
+/// handle the common task of starting up main with the specified argc, argv,
+/// and envp parameters.
+int ExecutionEngine::runFunctionAsMain(Function *Fn,
+ const std::vector<std::string> &argv,
+ const char * const * envp) {
+ std::vector<GenericValue> GVArgs;
+ GenericValue GVArgc;
+ GVArgc.IntVal = APInt(32, argv.size());
+
+ // Check main() type
+ unsigned NumArgs = Fn->getFunctionType()->getNumParams();
+ const FunctionType *FTy = Fn->getFunctionType();
+ const Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
+ switch (NumArgs) {
+ case 3:
+ if (FTy->getParamType(2) != PPInt8Ty) {
+ llvm_report_error("Invalid type for third argument of main() supplied");
+ }
+ // FALLS THROUGH
+ case 2:
+ if (FTy->getParamType(1) != PPInt8Ty) {
+ llvm_report_error("Invalid type for second argument of main() supplied");
+ }
+ // FALLS THROUGH
+ case 1:
+ if (!FTy->getParamType(0)->isInteger(32)) {
+ llvm_report_error("Invalid type for first argument of main() supplied");
+ }
+ // FALLS THROUGH
+ case 0:
+ if (!isa<IntegerType>(FTy->getReturnType()) &&
+ !FTy->getReturnType()->isVoidTy()) {
+ llvm_report_error("Invalid return type of main() supplied");
+ }
+ break;
+ default:
+ llvm_report_error("Invalid number of arguments of main() supplied");
+ }
+
+ if (NumArgs) {
+ GVArgs.push_back(GVArgc); // Arg #0 = argc.
+ if (NumArgs > 1) {
+ // Arg #1 = argv.
+ GVArgs.push_back(PTOGV(CreateArgv(Fn->getContext(), this, argv)));
+ assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
+ "argv[0] was null after CreateArgv");
+ if (NumArgs > 2) {
+ std::vector<std::string> EnvVars;
+ for (unsigned i = 0; envp[i]; ++i)
+ EnvVars.push_back(envp[i]);
+ // Arg #2 = envp.
+ GVArgs.push_back(PTOGV(CreateArgv(Fn->getContext(), this, EnvVars)));
+ }
+ }
+ }
+ return runFunction(Fn, GVArgs).IntVal.getZExtValue();
+}
+
+/// If possible, create a JIT, unless the caller specifically requests an
+/// Interpreter or there's an error. If even an Interpreter cannot be created,
+/// NULL is returned.
+///
+ExecutionEngine *ExecutionEngine::create(Module *M,
+ bool ForceInterpreter,
+ std::string *ErrorStr,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode) {
+ return EngineBuilder(M)
+ .setEngineKind(ForceInterpreter
+ ? EngineKind::Interpreter
+ : EngineKind::JIT)
+ .setErrorStr(ErrorStr)
+ .setOptLevel(OptLevel)
+ .setAllocateGVsWithCode(GVsWithCode)
+ .create();
+}
+
+ExecutionEngine *EngineBuilder::create() {
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
+ return 0;
+
+ // If the user specified a memory manager but didn't specify which engine to
+ // create, we assume they only want the JIT, and we fail if they only want
+ // the interpreter.
+ if (JMM) {
+ if (WhichEngine & EngineKind::JIT)
+ WhichEngine = EngineKind::JIT;
+ else {
+ if (ErrorStr)
+ *ErrorStr = "Cannot create an interpreter with a memory manager.";
+ return 0;
+ }
+ }
+
+ // Unless the interpreter was explicitly selected or the JIT is not linked,
+ // try making a JIT.
+ if (WhichEngine & EngineKind::JIT) {
+ if (ExecutionEngine::JITCtor) {
+ ExecutionEngine *EE =
+ ExecutionEngine::JITCtor(M, ErrorStr, JMM, OptLevel,
+ AllocateGVsWithCode, CMModel,
+ MArch, MCPU, MAttrs);
+ if (EE) return EE;
+ }
+ }
+
+ // If we can't make a JIT and we didn't request one specifically, try making
+ // an interpreter instead.
+ if (WhichEngine & EngineKind::Interpreter) {
+ if (ExecutionEngine::InterpCtor)
+ return ExecutionEngine::InterpCtor(M, ErrorStr);
+ if (ErrorStr)
+ *ErrorStr = "Interpreter has not been linked in.";
+ return 0;
+ }
+
+ if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0) {
+ if (ErrorStr)
+ *ErrorStr = "JIT has not been linked in.";
+ }
+ return 0;
+}
+
+/// getPointerToGlobal - This returns the address of the specified global
+/// value. This may involve code generation if it's a function.
+///
+void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
+ if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
+ return getPointerToFunction(F);
+
+ MutexGuard locked(lock);
+ void *p = EEState.getGlobalAddressMap(locked)[GV];
+ if (p)
+ return p;
+
+ // Global variable might have been added since interpreter started.
+ if (GlobalVariable *GVar =
+ const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV)))
+ EmitGlobalVariable(GVar);
+ else
+ llvm_unreachable("Global hasn't had an address allocated yet!");
+ return EEState.getGlobalAddressMap(locked)[GV];
+}
+
+/// This function converts a Constant* into a GenericValue. The interesting
+/// part is if C is a ConstantExpr.
+/// @brief Get a GenericValue for a Constant*
+GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C)) {
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ case Type::IntegerTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ // Although the value is undefined, we still have to construct an APInt
+ // with the correct bit width.
+ Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0);
+ break;
+ default:
+ break;
+ }
+ return Result;
+ }
+
+ // If the value is a ConstantExpr
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr: {
+ // Compute the index
+ GenericValue Result = getConstantValue(Op0);
+ SmallVector<Value*, 8> Indices(CE->op_begin()+1, CE->op_end());
+ uint64_t Offset =
+ TD->getIndexedOffset(Op0->getType(), &Indices[0], Indices.size());
+
+ char* tmp = (char*) Result.PointerVal;
+ Result = PTOGV(tmp + Offset);
+ return Result;
+ }
+ case Instruction::Trunc: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.trunc(BitWidth);
+ return GV;
+ }
+ case Instruction::ZExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.zext(BitWidth);
+ return GV;
+ }
+ case Instruction::SExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.sext(BitWidth);
+ return GV;
+ }
+ case Instruction::FPTrunc: {
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.FloatVal = float(GV.DoubleVal);
+ return GV;
+ }
+ case Instruction::FPExt:{
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.DoubleVal = double(GV.FloatVal);
+ return GV;
+ }
+ case Instruction::UIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.roundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.roundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ const uint64_t zero[] = {0, 0};
+ APFloat apf = APFloat(APInt(80, 2, zero));
+ (void)apf.convertFromAPInt(GV.IntVal,
+ false,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::SIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.signedRoundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.signedRoundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ const uint64_t zero[] = { 0, 0};
+ APFloat apf = APFloat(APInt(80, 2, zero));
+ (void)apf.convertFromAPInt(GV.IntVal,
+ true,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::FPToUI: // double->APInt conversion handles sign
+ case Instruction::FPToSI: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ if (Op0->getType()->isFloatTy())
+ GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth);
+ else if (Op0->getType()->isDoubleTy())
+ GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth);
+ else if (Op0->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat(GV.IntVal);
+ uint64_t v;
+ bool ignored;
+ (void)apf.convertToInteger(&v, BitWidth,
+ CE->getOpcode()==Instruction::FPToSI,
+ APFloat::rmTowardZero, &ignored);
+ GV.IntVal = v; // endian?
+ }
+ return GV;
+ }
+ case Instruction::PtrToInt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = TD->getPointerSizeInBits();
+ GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
+ return GV;
+ }
+ case Instruction::IntToPtr: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = TD->getPointerSizeInBits();
+ if (PtrWidth != GV.IntVal.getBitWidth())
+ GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
+ assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
+ GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
+ return GV;
+ }
+ case Instruction::BitCast: {
+ GenericValue GV = getConstantValue(Op0);
+ const Type* DestTy = CE->getType();
+ switch (Op0->getType()->getTypeID()) {
+ default: llvm_unreachable("Invalid bitcast operand");
+ case Type::IntegerTyID:
+ assert(DestTy->isFloatingPoint() && "invalid bitcast");
+ if (DestTy->isFloatTy())
+ GV.FloatVal = GV.IntVal.bitsToFloat();
+ else if (DestTy->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.bitsToDouble();
+ break;
+ case Type::FloatTyID:
+ assert(DestTy->isInteger(32) && "Invalid bitcast");
+ GV.IntVal.floatToBits(GV.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ assert(DestTy->isInteger(64) && "Invalid bitcast");
+ GV.IntVal.doubleToBits(GV.DoubleVal);
+ break;
+ case Type::PointerTyID:
+ assert(isa<PointerType>(DestTy) && "Invalid bitcast");
+ break; // getConstantValue(Op0) above already converted it
+ }
+ return GV;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ GenericValue LHS = getConstantValue(Op0);
+ GenericValue RHS = getConstantValue(CE->getOperand(1));
+ GenericValue GV;
+ switch (CE->getOperand(0)->getType()->getTypeID()) {
+ default: llvm_unreachable("Bad add type!");
+ case Type::IntegerTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid integer opcode");
+ case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break;
+ case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break;
+ case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break;
+ case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break;
+ case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break;
+ case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break;
+ case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break;
+ case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break;
+ case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break;
+ case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break;
+ }
+ break;
+ case Type::FloatTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid float opcode");
+ case Instruction::FAdd:
+ GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break;
+ case Instruction::FSub:
+ GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
+ case Instruction::FMul:
+ GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
+ case Instruction::FDiv:
+ GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
+ case Instruction::FRem:
+ GV.FloatVal = ::fmodf(LHS.FloatVal,RHS.FloatVal); break;
+ }
+ break;
+ case Type::DoubleTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid double opcode");
+ case Instruction::FAdd:
+ GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break;
+ case Instruction::FSub:
+ GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
+ case Instruction::FMul:
+ GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
+ case Instruction::FDiv:
+ GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
+ case Instruction::FRem:
+ GV.DoubleVal = ::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
+ }
+ break;
+ case Type::X86_FP80TyID:
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID: {
+ APFloat apfLHS = APFloat(LHS.IntVal);
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid long double opcode");llvm_unreachable(0);
+ case Instruction::FAdd:
+ apfLHS.add(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FSub:
+ apfLHS.subtract(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FMul:
+ apfLHS.multiply(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FDiv:
+ apfLHS.divide(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FRem:
+ apfLHS.mod(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ }
+ }
+ break;
+ }
+ return GV;
+ }
+ default:
+ break;
+ }
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "ConstantExpr not handled: " << *CE;
+ llvm_report_error(Msg.str());
+ }
+
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ case Type::FloatTyID:
+ Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
+ break;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt();
+ break;
+ case Type::IntegerTyID:
+ Result.IntVal = cast<ConstantInt>(C)->getValue();
+ break;
+ case Type::PointerTyID:
+ if (isa<ConstantPointerNull>(C))
+ Result.PointerVal = 0;
+ else if (const Function *F = dyn_cast<Function>(C))
+ Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
+ else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
+ Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV)));
+ else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
+ Result = PTOGV(getPointerToBasicBlock(const_cast<BasicBlock*>(
+ BA->getBasicBlock())));
+ else
+ llvm_unreachable("Unknown constant pointer type!");
+ break;
+ default:
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
+ llvm_report_error(Msg.str());
+ }
+ return Result;
+}
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
+ unsigned StoreBytes) {
+ assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
+ uint8_t *Src = (uint8_t *)IntVal.getRawData();
+
+ if (sys::isLittleEndianHost())
+ // Little-endian host - the source is ordered from LSB to MSB. Order the
+ // destination from LSB to MSB: Do a straight copy.
+ memcpy(Dst, Src, StoreBytes);
+ else {
+ // Big-endian host - the source is an array of 64 bit words ordered from
+ // LSW to MSW. Each word is ordered from MSB to LSB. Order the destination
+ // from MSB to LSB: Reverse the word order, but not the bytes in a word.
+ while (StoreBytes > sizeof(uint64_t)) {
+ StoreBytes -= sizeof(uint64_t);
+ // May not be aligned so use memcpy.
+ memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
+ Src += sizeof(uint64_t);
+ }
+
+ memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
+ }
+}
+
+/// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr. Ptr
+/// is the address of the memory at which to store Val, cast to GenericValue *.
+/// It is not a pointer to a GenericValue containing the address at which to
+/// store Val.
+void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
+ GenericValue *Ptr, const Type *Ty) {
+ const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes);
+ break;
+ case Type::FloatTyID:
+ *((float*)Ptr) = Val.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ *((double*)Ptr) = Val.DoubleVal;
+ break;
+ case Type::X86_FP80TyID:
+ memcpy(Ptr, Val.IntVal.getRawData(), 10);
+ break;
+ case Type::PointerTyID:
+ // Ensure 64 bit target pointers are fully initialized on 32 bit hosts.
+ if (StoreBytes != sizeof(PointerTy))
+ memset(Ptr, 0, StoreBytes);
+
+ *((PointerTy*)Ptr) = Val.PointerVal;
+ break;
+ default:
+ dbgs() << "Cannot store value of type " << *Ty << "!\n";
+ }
+
+ if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian())
+ // Host and target are different endian - reverse the stored bytes.
+ std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
+}
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
+ assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
+ uint8_t *Dst = (uint8_t *)IntVal.getRawData();
+
+ if (sys::isLittleEndianHost())
+ // Little-endian host - the destination must be ordered from LSB to MSB.
+ // The source is ordered from LSB to MSB: Do a straight copy.
+ memcpy(Dst, Src, LoadBytes);
+ else {
+ // Big-endian - the destination is an array of 64 bit words ordered from
+ // LSW to MSW. Each word must be ordered from MSB to LSB. The source is
+ // ordered from MSB to LSB: Reverse the word order, but not the bytes in
+ // a word.
+ while (LoadBytes > sizeof(uint64_t)) {
+ LoadBytes -= sizeof(uint64_t);
+ // May not be aligned so use memcpy.
+ memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
+ Dst += sizeof(uint64_t);
+ }
+
+ memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
+ }
+}
+
+/// FIXME: document
+///
+void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
+ GenericValue *Ptr,
+ const Type *Ty) {
+ const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ // An APInt with all words initially zero.
+ Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0);
+ LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes);
+ break;
+ case Type::FloatTyID:
+ Result.FloatVal = *((float*)Ptr);
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = *((double*)Ptr);
+ break;
+ case Type::PointerTyID:
+ Result.PointerVal = *((PointerTy*)Ptr);
+ break;
+ case Type::X86_FP80TyID: {
+ // This is endian dependent, but it will only work on x86 anyway.
+ // FIXME: Will not trap if loading a signaling NaN.
+ uint64_t y[2];
+ memcpy(y, Ptr, 10);
+ Result.IntVal = APInt(80, 2, y);
+ break;
+ }
+ default:
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "Cannot load value of type " << *Ty << "!";
+ llvm_report_error(Msg.str());
+ }
+}
+
+// InitializeMemory - Recursive function to apply a Constant value into the
+// specified memory location...
+//
+void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
+ DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
+ DEBUG(Init->dump());
+ if (isa<UndefValue>(Init)) {
+ return;
+ } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
+ unsigned ElementSize =
+ getTargetData()->getTypeAllocSize(CP->getType()->getElementType());
+ for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
+ InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ } else if (isa<ConstantAggregateZero>(Init)) {
+ memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType()));
+ return;
+ } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
+ unsigned ElementSize =
+ getTargetData()->getTypeAllocSize(CPA->getType()->getElementType());
+ for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
+ const StructLayout *SL =
+ getTargetData()->getStructLayout(cast<StructType>(CPS->getType()));
+ for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
+ return;
+ } else if (Init->getType()->isFirstClassType()) {
+ GenericValue Val = getConstantValue(Init);
+ StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
+ return;
+ }
+
+ dbgs() << "Bad Type: " << *Init->getType() << "\n";
+ llvm_unreachable("Unknown constant type to initialize memory with!");
+}
+
+/// EmitGlobals - Emit all of the global variables to memory, storing their
+/// addresses into GlobalAddress. This must make sure to copy the contents of
+/// their initializers into the memory.
+///
+void ExecutionEngine::emitGlobals() {
+
+ // Loop over all of the global variables in the program, allocating the memory
+ // to hold them. If there is more than one module, do a prepass over globals
+ // to figure out how the different modules should link together.
+ //
+ std::map<std::pair<std::string, const Type*>,
+ const GlobalValue*> LinkedGlobalsMap;
+
+ if (Modules.size() != 1) {
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (Module::const_global_iterator I = M.global_begin(),
+ E = M.global_end(); I != E; ++I) {
+ const GlobalValue *GV = I;
+ if (GV->hasLocalLinkage() || GV->isDeclaration() ||
+ GV->hasAppendingLinkage() || !GV->hasName())
+ continue;// Ignore external globals and globals with internal linkage.
+
+ const GlobalValue *&GVEntry =
+ LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
+
+ // If this is the first time we've seen this global, it is the canonical
+ // version.
+ if (!GVEntry) {
+ GVEntry = GV;
+ continue;
+ }
+
+ // If the existing global is strong, never replace it.
+ if (GVEntry->hasExternalLinkage() ||
+ GVEntry->hasDLLImportLinkage() ||
+ GVEntry->hasDLLExportLinkage())
+ continue;
+
+ // Otherwise, we know it's linkonce/weak, replace it if this is a strong
+ // symbol. FIXME is this right for common?
+ if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = GV;
+ }
+ }
+ }
+
+ std::vector<const GlobalValue*> NonCanonicalGlobals;
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I) {
+ // In the multi-module case, see what this global maps to.
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry =
+ LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) {
+ // If something else is the canonical global, ignore this one.
+ if (GVEntry != &*I) {
+ NonCanonicalGlobals.push_back(I);
+ continue;
+ }
+ }
+ }
+
+ if (!I->isDeclaration()) {
+ addGlobalMapping(I, getMemoryForGV(I));
+ } else {
+ // External variable reference. Try to use the dynamic loader to
+ // get a pointer to it.
+ if (void *SymAddr =
+ sys::DynamicLibrary::SearchForAddressOfSymbol(I->getName()))
+ addGlobalMapping(I, SymAddr);
+ else {
+ llvm_report_error("Could not resolve external global address: "
+ +I->getName());
+ }
+ }
+ }
+
+ // If there are multiple modules, map the non-canonical globals to their
+ // canonical location.
+ if (!NonCanonicalGlobals.empty()) {
+ for (unsigned i = 0, e = NonCanonicalGlobals.size(); i != e; ++i) {
+ const GlobalValue *GV = NonCanonicalGlobals[i];
+ const GlobalValue *CGV =
+ LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
+ void *Ptr = getPointerToGlobalIfAvailable(CGV);
+ assert(Ptr && "Canonical global wasn't codegen'd!");
+ addGlobalMapping(GV, Ptr);
+ }
+ }
+
+ // Now that all of the globals are set up in memory, loop through them all
+ // and initialize their contents.
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I) {
+ if (!I->isDeclaration()) {
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry =
+ LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())])
+ if (GVEntry != &*I) // Not the canonical variable.
+ continue;
+ }
+ EmitGlobalVariable(I);
+ }
+ }
+ }
+}
+
+// EmitGlobalVariable - This method emits the specified global variable to the
+// address specified in GlobalAddresses, or allocates new memory if it's not
+// already in the map.
+void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
+ void *GA = getPointerToGlobalIfAvailable(GV);
+
+ if (GA == 0) {
+ // If it's not already specified, allocate memory for the global.
+ GA = getMemoryForGV(GV);
+ addGlobalMapping(GV, GA);
+ }
+
+ // Don't initialize if it's thread local, let the client do it.
+ if (!GV->isThreadLocal())
+ InitializeMemory(GV->getInitializer(), GA);
+
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
+ NumInitBytes += (unsigned)GVSize;
+ ++NumGlobals;
+}
+
+ExecutionEngineState::ExecutionEngineState(ExecutionEngine &EE)
+ : EE(EE), GlobalAddressMap(this) {
+}
+
+sys::Mutex *ExecutionEngineState::AddressMapConfig::getMutex(
+ ExecutionEngineState *EES) {
+ return &EES->EE.lock;
+}
+void ExecutionEngineState::AddressMapConfig::onDelete(
+ ExecutionEngineState *EES, const GlobalValue *Old) {
+ void *OldVal = EES->GlobalAddressMap.lookup(Old);
+ EES->GlobalAddressReverseMap.erase(OldVal);
+}
+
+void ExecutionEngineState::AddressMapConfig::onRAUW(
+ ExecutionEngineState *, const GlobalValue *, const GlobalValue *) {
+ assert(false && "The ExecutionEngine doesn't know how to handle a"
+ " RAUW on a value it has a global mapping for.");
+}
diff --git a/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/lib/ExecutionEngine/ExecutionEngineBindings.cpp
new file mode 100644
index 0000000..141cb27
--- /dev/null
+++ b/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -0,0 +1,209 @@
+//===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C bindings for the ExecutionEngine library.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstring>
+
+using namespace llvm;
+
+/*===-- Operations on generic values --------------------------------------===*/
+
+LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
+ unsigned long long N,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned);
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->PointerVal = P;
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) {
+ GenericValue *GenVal = new GenericValue();
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ GenVal->FloatVal = N;
+ break;
+ case Type::DoubleTyID:
+ GenVal->DoubleVal = N;
+ break;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+ return wrap(GenVal);
+}
+
+unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) {
+ return unwrap(GenValRef)->IntVal.getBitWidth();
+}
+
+unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = unwrap(GenValRef);
+ if (IsSigned)
+ return GenVal->IntVal.getSExtValue();
+ else
+ return GenVal->IntVal.getZExtValue();
+}
+
+void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) {
+ return unwrap(GenVal)->PointerVal;
+}
+
+double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ return unwrap(GenVal)->FloatVal;
+ case Type::DoubleTyID:
+ return unwrap(GenVal)->DoubleVal;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ break;
+ }
+ return 0; // Not reached
+}
+
+void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
+ delete unwrap(GenVal);
+}
+
+/*===-- Operations on execution engines -----------------------------------===*/
+
+LLVMBool LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE,
+ LLVMModuleProviderRef MP,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(unwrap(MP));
+ builder.setEngineKind(EngineKind::Either)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *EE = builder.create()){
+ *OutEE = wrap(EE);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateInterpreter(LLVMExecutionEngineRef *OutInterp,
+ LLVMModuleProviderRef MP,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(unwrap(MP));
+ builder.setEngineKind(EngineKind::Interpreter)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *Interp = builder.create()) {
+ *OutInterp = wrap(Interp);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateJITCompiler(LLVMExecutionEngineRef *OutJIT,
+ LLVMModuleProviderRef MP,
+ unsigned OptLevel,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(unwrap(MP));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOpt::Level)OptLevel);
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) {
+ delete unwrap(EE);
+}
+
+void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->runStaticConstructorsDestructors(false);
+}
+
+void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->runStaticConstructorsDestructors(true);
+}
+
+int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned ArgC, const char * const *ArgV,
+ const char * const *EnvP) {
+ std::vector<std::string> ArgVec;
+ for (unsigned I = 0; I != ArgC; ++I)
+ ArgVec.push_back(ArgV[I]);
+
+ return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP);
+}
+
+LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned NumArgs,
+ LLVMGenericValueRef *Args) {
+ std::vector<GenericValue> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ ArgVec.push_back(*unwrap(Args[I]));
+
+ GenericValue *Result = new GenericValue();
+ *Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
+ return wrap(Result);
+}
+
+void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
+ unwrap(EE)->freeMachineCodeForFunction(unwrap<Function>(F));
+}
+
+void LLVMAddModuleProvider(LLVMExecutionEngineRef EE, LLVMModuleProviderRef MP){
+ unwrap(EE)->addModule(unwrap(MP));
+}
+
+LLVMBool LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE,
+ LLVMModuleProviderRef MP,
+ LLVMModuleRef *OutMod, char **OutError) {
+ Module *M = unwrap(MP);
+ unwrap(EE)->removeModule(M);
+ *OutMod = wrap(M);
+ return 0;
+}
+
+LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
+ LLVMValueRef *OutFn) {
+ if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) {
+ *OutFn = wrap(F);
+ return 0;
+ }
+ return 1;
+}
+
+LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetData());
+}
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+ void* Addr) {
+ unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
+}
+
+void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
+ return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
+}
diff --git a/lib/ExecutionEngine/Interpreter/CMakeLists.txt b/lib/ExecutionEngine/Interpreter/CMakeLists.txt
new file mode 100644
index 0000000..dff97fa
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_llvm_library(LLVMInterpreter
+ Execution.cpp
+ ExternalFunctions.cpp
+ Interpreter.cpp
+ )
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
new file mode 100644
index 0000000..73f5558
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -0,0 +1,1352 @@
+//===-- Execution.cpp - Implement code to simulate the program ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the actual instruction interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "interpreter"
+#include "Interpreter.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cmath>
+using namespace llvm;
+
+STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
+
+static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
+ cl::desc("make the interpreter print every volatile load and store"));
+
+//===----------------------------------------------------------------------===//
+// Various Helper Functions
+//===----------------------------------------------------------------------===//
+
+static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
+ SF.Values[V] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
+ break
+
+static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(+, Float);
+ IMPLEMENT_BINARY_OPERATOR(+, Double);
+ default:
+ dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+}
+
+static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(-, Float);
+ IMPLEMENT_BINARY_OPERATOR(-, Double);
+ default:
+ dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+}
+
+static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(*, Float);
+ IMPLEMENT_BINARY_OPERATOR(*, Double);
+ default:
+ dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+}
+
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(/, Float);
+ IMPLEMENT_BINARY_OPERATOR(/, Double);
+ default:
+ dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+}
+
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
+ break;
+ default:
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+}
+
+#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
+ case Type::IntegerTyID: \
+ Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
+ break;
+
+// Handle pointers specially because they must be compared with only as much
+// width as the host has. We _do not_ want to be comparing 64 bit values when
+// running on a 32-bit target, otherwise the upper 32 bits might mess up
+// comparisons if they contain garbage.
+#define IMPLEMENT_POINTER_ICMP(OP) \
+ case Type::PointerTyID: \
+ Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
+ (void*)(intptr_t)Src2.PointerVal); \
+ break;
+
+static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_POINTER_ICMP(==);
+ default:
+ dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_POINTER_ICMP(!=);
+ default:
+ dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+void Interpreter::visitICmpInst(ICmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ const Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
+ llvm_unreachable(0);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+#define IMPLEMENT_FCMP(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
+ break
+
+static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(==, Float);
+ IMPLEMENT_FCMP(==, Double);
+ default:
+ dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(!=, Float);
+ IMPLEMENT_FCMP(!=, Double);
+
+ default:
+ dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<=, Float);
+ IMPLEMENT_FCMP(<=, Double);
+ default:
+ dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>=, Float);
+ IMPLEMENT_FCMP(>=, Double);
+ default:
+ dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<, Float);
+ IMPLEMENT_FCMP(<, Double);
+ default:
+ dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>, Float);
+ IMPLEMENT_FCMP(>, Double);
+ default:
+ dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_UNORDERED(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ } \
+ } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ }
+
+
+static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_OEQ(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_ONE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_OLE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_OGE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_OLT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ return executeFCMP_OGT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Src2.FloatVal == Src2.FloatVal));
+ else
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Src2.DoubleVal == Src2.DoubleVal));
+ return Dest;
+}
+
+static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
+ const Type *Ty) {
+ GenericValue Dest;
+ if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Src2.FloatVal != Src2.FloatVal));
+ else
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Src2.DoubleVal != Src2.DoubleVal));
+ return Dest;
+}
+
+void Interpreter::visitFCmpInst(FCmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ const Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case FCmpInst::FCMP_FALSE: R.IntVal = APInt(1,false); break;
+ case FCmpInst::FCMP_TRUE: R.IntVal = APInt(1,true); break;
+ case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
+ llvm_unreachable(0);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
+ GenericValue Src2, const Type *Ty) {
+ GenericValue Result;
+ switch (predicate) {
+ case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
+ case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_FALSE: {
+ GenericValue Result;
+ Result.IntVal = APInt(1, false);
+ return Result;
+ }
+ case FCmpInst::FCMP_TRUE: {
+ GenericValue Result;
+ Result.IntVal = APInt(1, true);
+ return Result;
+ }
+ default:
+ dbgs() << "Unhandled Cmp predicate\n";
+ llvm_unreachable(0);
+ }
+}
+
+void Interpreter::visitBinaryOperator(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ const Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getOpcode()) {
+ case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
+ case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
+ case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
+ case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
+ case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
+ case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
+ case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
+ case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
+ case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
+ case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
+ case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
+ case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
+ case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
+ case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(0);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
+ GenericValue Src3) {
+ return Src1.IntVal == 0 ? Src3 : Src2;
+}
+
+void Interpreter::visitSelectInst(SelectInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue R = executeSelectInst(Src1, Src2, Src3);
+ SetValue(&I, R, SF);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Terminator Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::exitCalled(GenericValue GV) {
+ // runAtExitHandlers() assumes there are no stack frames, but
+ // if exit() was called, then it had a stack frame. Blow away
+ // the stack before interpreting atexit handlers.
+ ECStack.clear();
+ runAtExitHandlers();
+ exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
+}
+
+/// Pop the last stack frame off of ECStack and then copy the result
+/// back into the result variable if we are not returning void. The
+/// result variable may be the ExitValue, or the Value of the calling
+/// CallInst if there was a previous stack frame. This method may
+/// invalidate any ECStack iterators you have. This method also takes
+/// care of switching to the normal destination BB, if we are returning
+/// from an invoke.
+///
+void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
+ GenericValue Result) {
+ // Pop the current stack frame.
+ ECStack.pop_back();
+
+ if (ECStack.empty()) { // Finished main. Put result into exit code...
+ if (RetTy && RetTy->isInteger()) { // Nonvoid return type?
+ ExitValue = Result; // Capture the exit value of the program
+ } else {
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ }
+ } else {
+ // If we have a previous stack frame, and we have a previous call,
+ // fill in the return value...
+ ExecutionContext &CallingSF = ECStack.back();
+ if (Instruction *I = CallingSF.Caller.getInstruction()) {
+ // Save result...
+ if (!CallingSF.Caller.getType()->isVoidTy())
+ SetValue(I, Result, CallingSF);
+ if (InvokeInst *II = dyn_cast<InvokeInst> (I))
+ SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
+ CallingSF.Caller = CallSite(); // We returned from the call...
+ }
+ }
+}
+
+void Interpreter::visitReturnInst(ReturnInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ const Type *RetTy = Type::getVoidTy(I.getContext());
+ GenericValue Result;
+
+ // Save away the return value... (if we are not 'ret void')
+ if (I.getNumOperands()) {
+ RetTy = I.getReturnValue()->getType();
+ Result = getOperandValue(I.getReturnValue(), SF);
+ }
+
+ popStackAndReturnValueToCaller(RetTy, Result);
+}
+
+void Interpreter::visitUnwindInst(UnwindInst &I) {
+ // Unwind stack
+ Instruction *Inst;
+ do {
+ ECStack.pop_back();
+ if (ECStack.empty())
+ llvm_report_error("Empty stack during unwind!");
+ Inst = ECStack.back().Caller.getInstruction();
+ } while (!(Inst && isa<InvokeInst>(Inst)));
+
+ // Return from invoke
+ ExecutionContext &InvokingSF = ECStack.back();
+ InvokingSF.Caller = CallSite();
+
+ // Go to exceptional destination BB of invoke instruction
+ SwitchToNewBasicBlock(cast<InvokeInst>(Inst)->getUnwindDest(), InvokingSF);
+}
+
+void Interpreter::visitUnreachableInst(UnreachableInst &I) {
+ llvm_report_error("Program executed an 'unreachable' instruction!");
+}
+
+void Interpreter::visitBranchInst(BranchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ BasicBlock *Dest;
+
+ Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
+ if (!I.isUnconditional()) {
+ Value *Cond = I.getCondition();
+ if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
+ Dest = I.getSuccessor(1);
+ }
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitSwitchInst(SwitchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue CondVal = getOperandValue(I.getOperand(0), SF);
+ const Type *ElTy = I.getOperand(0)->getType();
+
+ // Check to see if any of the cases match...
+ BasicBlock *Dest = 0;
+ for (unsigned i = 2, e = I.getNumOperands(); i != e; i += 2)
+ if (executeICMP_EQ(CondVal, getOperandValue(I.getOperand(i), SF), ElTy)
+ .IntVal != 0) {
+ Dest = cast<BasicBlock>(I.getOperand(i+1));
+ break;
+ }
+
+ if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
+ SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
+}
+
+
+// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
+// This function handles the actual updating of block and instruction iterators
+// as well as execution of all of the PHI nodes in the destination block.
+//
+// This method does this because all of the PHI nodes must be executed
+// atomically, reading their inputs before any of the results are updated. Not
+// doing this can cause problems if the PHI nodes depend on other PHI nodes for
+// their inputs. If the input PHI node is updated before it is read, incorrect
+// results can happen. Thus we use a two phase approach.
+//
+void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
+ BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
+ SF.CurBB = Dest; // Update CurBB to branch destination
+ SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
+
+ if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
+
+ // Loop over all of the PHI nodes in the current block, reading their inputs.
+ std::vector<GenericValue> ResultValues;
+
+ for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
+ // Search for the value corresponding to this previous bb...
+ int i = PN->getBasicBlockIndex(PrevBB);
+ assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
+ Value *IncomingValue = PN->getIncomingValue(i);
+
+ // Save the incoming value for this PHI node...
+ ResultValues.push_back(getOperandValue(IncomingValue, SF));
+ }
+
+ // Now loop over all of the PHI nodes setting their values...
+ SF.CurInst = SF.CurBB->begin();
+ for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
+ PHINode *PN = cast<PHINode>(SF.CurInst);
+ SetValue(PN, ResultValues[i], SF);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitAllocaInst(AllocaInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ const Type *Ty = I.getType()->getElementType(); // Type to be allocated
+
+ // Get the number of elements being allocated by the array...
+ unsigned NumElements =
+ getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
+
+ unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
+
+ // Avoid malloc-ing zero bytes, use max()...
+ unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
+
+ // Allocate enough memory to hold the type...
+ void *Memory = malloc(MemToAlloc);
+
+ DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
+ << NumElements << " (Total: " << MemToAlloc << ") at "
+ << uintptr_t(Memory) << '\n');
+
+ GenericValue Result = PTOGV(Memory);
+ assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
+ SetValue(&I, Result, SF);
+
+ if (I.getOpcode() == Instruction::Alloca)
+ ECStack.back().Allocas.add(Memory);
+}
+
+// getElementOffset - The workhorse for getelementptr.
+//
+GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E,
+ ExecutionContext &SF) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "Cannot getElementOffset of a nonpointer type!");
+
+ uint64_t Total = 0;
+
+ for (; I != E; ++I) {
+ if (const StructType *STy = dyn_cast<StructType>(*I)) {
+ const StructLayout *SLO = TD.getStructLayout(STy);
+
+ const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
+ unsigned Index = unsigned(CPU->getZExtValue());
+
+ Total += SLO->getElementOffset(Index);
+ } else {
+ const SequentialType *ST = cast<SequentialType>(*I);
+ // Get the index number for the array... which must be long type...
+ GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
+
+ int64_t Idx;
+ unsigned BitWidth =
+ cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
+ if (BitWidth == 32)
+ Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
+ else {
+ assert(BitWidth == 64 && "Invalid index type for getelementptr");
+ Idx = (int64_t)IdxGV.IntVal.getZExtValue();
+ }
+ Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
+ }
+ }
+
+ GenericValue Result;
+ Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
+ DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ return Result;
+}
+
+void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeGEPOperation(I.getPointerOperand(),
+ gep_type_begin(I), gep_type_end(I), SF), SF);
+}
+
+void Interpreter::visitLoadInst(LoadInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
+ GenericValue Result;
+ LoadValueFromMemory(Result, Ptr, I.getType());
+ SetValue(&I, Result, SF);
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile load " << I;
+}
+
+void Interpreter::visitStoreInst(StoreInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Val = getOperandValue(I.getOperand(0), SF);
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
+ I.getOperand(0)->getType());
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile store: " << I;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitCallSite(CallSite CS) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Check to see if this is an intrinsic function call...
+ Function *F = CS.getCalledFunction();
+ if (F && F->isDeclaration())
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::not_intrinsic:
+ break;
+ case Intrinsic::vastart: { // va_start
+ GenericValue ArgIndex;
+ ArgIndex.UIntPairVal.first = ECStack.size() - 1;
+ ArgIndex.UIntPairVal.second = 0;
+ SetValue(CS.getInstruction(), ArgIndex, SF);
+ return;
+ }
+ case Intrinsic::vaend: // va_end is a noop for the interpreter
+ return;
+ case Intrinsic::vacopy: // va_copy: dest = src
+ SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
+ return;
+ default:
+ // If it is an unknown intrinsic function, use the intrinsic lowering
+ // class to transform it into hopefully tasty LLVM code.
+ //
+ BasicBlock::iterator me(CS.getInstruction());
+ BasicBlock *Parent = CS.getInstruction()->getParent();
+ bool atBegin(Parent->begin() == me);
+ if (!atBegin)
+ --me;
+ IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
+
+ // Restore the CurInst pointer to the first instruction newly inserted, if
+ // any.
+ if (atBegin) {
+ SF.CurInst = Parent->begin();
+ } else {
+ SF.CurInst = me;
+ ++SF.CurInst;
+ }
+ return;
+ }
+
+
+ SF.Caller = CS;
+ std::vector<GenericValue> ArgVals;
+ const unsigned NumArgs = SF.Caller.arg_size();
+ ArgVals.reserve(NumArgs);
+ uint16_t pNum = 1;
+ for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
+ e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
+ Value *V = *i;
+ ArgVals.push_back(getOperandValue(V, SF));
+ }
+
+ // To handle indirect calls, we must get the pointer value from the argument
+ // and treat it as a function pointer.
+ GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
+ callFunction((Function*)GVTOP(SRC), ArgVals);
+}
+
+void Interpreter::visitShl(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
+ Dest.IntVal = Src1.IntVal.shl(Src2.IntVal.getZExtValue());
+ else
+ Dest.IntVal = Src1.IntVal;
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitLShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
+ Dest.IntVal = Src1.IntVal.lshr(Src2.IntVal.getZExtValue());
+ else
+ Dest.IntVal = Src1.IntVal;
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitAShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
+ Dest.IntVal = Src1.IntVal.ashr(Src2.IntVal.getZExtValue());
+ else
+ Dest.IntVal = Src1.IntVal;
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ const IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.trunc(DBitWidth);
+ return Dest;
+}
+
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ const IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.sext(DBitWidth);
+ return Dest;
+}
+
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ const IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.zext(DBitWidth);
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.FloatVal = (float) Src.DoubleVal;
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.DoubleVal = (double) Src.FloatVal;
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ const Type *SrcTy = SrcVal->getType();
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcTy->isFloatingPoint() && "Invalid FPToUI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ const Type *SrcTy = SrcVal->getType();
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcTy->isFloatingPoint() && "Invalid FPToSI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ return Dest;
+}
+
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isFloatingPoint() && "Invalid UIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
+ else
+ Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isFloatingPoint() && "Invalid SIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
+ else
+ Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
+ return Dest;
+
+}
+
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(isa<PointerType>(SrcVal->getType()) && "Invalid PtrToInt instruction");
+
+ Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(isa<PointerType>(DstTy) && "Invalid PtrToInt instruction");
+
+ uint32_t PtrSize = TD.getPointerSizeInBits();
+ if (PtrSize != Src.IntVal.getBitWidth())
+ Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
+
+ Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
+ return Dest;
+}
+
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF) {
+
+ const Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (isa<PointerType>(DstTy)) {
+ assert(isa<PointerType>(SrcTy) && "Invalid BitCast");
+ Dest.PointerVal = Src.PointerVal;
+ } else if (DstTy->isInteger()) {
+ if (SrcTy->isFloatTy()) {
+ Dest.IntVal.zext(sizeof(Src.FloatVal) * CHAR_BIT);
+ Dest.IntVal.floatToBits(Src.FloatVal);
+ } else if (SrcTy->isDoubleTy()) {
+ Dest.IntVal.zext(sizeof(Src.DoubleVal) * CHAR_BIT);
+ Dest.IntVal.doubleToBits(Src.DoubleVal);
+ } else if (SrcTy->isInteger()) {
+ Dest.IntVal = Src.IntVal;
+ } else
+ llvm_unreachable("Invalid BitCast");
+ } else if (DstTy->isFloatTy()) {
+ if (SrcTy->isInteger())
+ Dest.FloatVal = Src.IntVal.bitsToFloat();
+ else
+ Dest.FloatVal = Src.FloatVal;
+ } else if (DstTy->isDoubleTy()) {
+ if (SrcTy->isInteger())
+ Dest.DoubleVal = Src.IntVal.bitsToDouble();
+ else
+ Dest.DoubleVal = Src.DoubleVal;
+ } else
+ llvm_unreachable("Invalid Bitcast");
+
+ return Dest;
+}
+
+void Interpreter::visitTruncInst(TruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSExtInst(SExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitZExtInst(ZExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPTruncInst(FPTruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPExtInst(FPExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitUIToFPInst(UIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSIToFPInst(SIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToUIInst(FPToUIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToSIInst(FPToSIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitBitCastInst(BitCastInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+#define IMPLEMENT_VAARG(TY) \
+ case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
+
+void Interpreter::visitVAArgInst(VAArgInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Get the incoming valist parameter. LLI treats the valist as a
+ // (ec-stack-depth var-arg-index) pair.
+ GenericValue VAList = getOperandValue(I.getOperand(0), SF);
+ GenericValue Dest;
+ GenericValue Src = ECStack[VAList.UIntPairVal.first]
+ .VarArgs[VAList.UIntPairVal.second];
+ const Type *Ty = I.getType();
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID: Dest.IntVal = Src.IntVal;
+ IMPLEMENT_VAARG(Pointer);
+ IMPLEMENT_VAARG(Float);
+ IMPLEMENT_VAARG(Double);
+ default:
+ dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
+ llvm_unreachable(0);
+ }
+
+ // Set the Value of this Instruction.
+ SetValue(&I, Dest, SF);
+
+ // Move the pointer to the next vararg.
+ ++VAList.UIntPairVal.second;
+}
+
+GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
+ ExecutionContext &SF) {
+ switch (CE->getOpcode()) {
+ case Instruction::Trunc:
+ return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::ZExt:
+ return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SExt:
+ return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPTrunc:
+ return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPExt:
+ return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::UIToFP:
+ return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SIToFP:
+ return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToUI:
+ return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToSI:
+ return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::PtrToInt:
+ return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::IntToPtr:
+ return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::BitCast:
+ return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::GetElementPtr:
+ return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
+ gep_type_end(CE), SF);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return executeCmpInst(CE->getPredicate(),
+ getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ CE->getOperand(0)->getType());
+ case Instruction::Select:
+ return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ getOperandValue(CE->getOperand(2), SF));
+ default :
+ break;
+ }
+
+ // The cases below here require a GenericValue parameter for the result
+ // so we initialize one, compute it and then return it.
+ GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
+ GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
+ GenericValue Dest;
+ const Type * Ty = CE->getOperand(0)->getType();
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
+ case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
+ case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
+ case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
+ case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
+ case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
+ case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
+ case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
+ case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
+ case Instruction::Shl:
+ Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::LShr:
+ Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::AShr:
+ Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
+ break;
+ default:
+ dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
+ llvm_unreachable(0);
+ return GenericValue();
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return getConstantExprValue(CE, SF);
+ } else if (Constant *CPV = dyn_cast<Constant>(V)) {
+ return getConstantValue(CPV);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ return PTOGV(getPointerToGlobal(GV));
+ } else {
+ return SF.Values[V];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Dispatch and Execution Code
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// callFunction - Execute the specified function...
+//
+void Interpreter::callFunction(Function *F,
+ const std::vector<GenericValue> &ArgVals) {
+ assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
+ ECStack.back().Caller.arg_size() == ArgVals.size()) &&
+ "Incorrect number of arguments passed into function call!");
+ // Make a new stack frame... and fill it in.
+ ECStack.push_back(ExecutionContext());
+ ExecutionContext &StackFrame = ECStack.back();
+ StackFrame.CurFunction = F;
+
+ // Special handling for external functions.
+ if (F->isDeclaration()) {
+ GenericValue Result = callExternalFunction (F, ArgVals);
+ // Simulate a 'ret' instruction of the appropriate type.
+ popStackAndReturnValueToCaller (F->getReturnType (), Result);
+ return;
+ }
+
+ // Get pointers to first LLVM BB & Instruction in function.
+ StackFrame.CurBB = F->begin();
+ StackFrame.CurInst = StackFrame.CurBB->begin();
+
+ // Run through the function arguments and initialize their values...
+ assert((ArgVals.size() == F->arg_size() ||
+ (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
+ "Invalid number of values passed to function invocation!");
+
+ // Handle non-varargs arguments...
+ unsigned i = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++i)
+ SetValue(AI, ArgVals[i], StackFrame);
+
+ // Handle varargs arguments...
+ StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
+}
+
+
+void Interpreter::run() {
+ while (!ECStack.empty()) {
+ // Interpret a single instruction & increment the "PC".
+ ExecutionContext &SF = ECStack.back(); // Current stack frame
+ Instruction &I = *SF.CurInst++; // Increment before execute
+
+ // Track the number of dynamic instructions executed.
+ ++NumDynamicInsts;
+
+ DEBUG(dbgs() << "About to interpret: " << I);
+ visit(I); // Dispatch to one of the visit* methods...
+#if 0
+ // This is not safe, as visiting the instruction could lower it and free I.
+DEBUG(
+ if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
+ I.getType() != Type::VoidTy) {
+ dbgs() << " --> ";
+ const GenericValue &Val = SF.Values[&I];
+ switch (I.getType()->getTypeID()) {
+ default: llvm_unreachable("Invalid GenericValue Type");
+ case Type::VoidTyID: dbgs() << "void"; break;
+ case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
+ case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
+ case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
+ break;
+ case Type::IntegerTyID:
+ dbgs() << "i" << Val.IntVal.getBitWidth() << " "
+ << Val.IntVal.toStringUnsigned(10)
+ << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
+ break;
+ }
+ });
+#endif
+ }
+}
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
new file mode 100644
index 0000000..7b061d3
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -0,0 +1,490 @@
+//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains both code to deal with invoking "external" functions, but
+// also contains code that implements "exported" external functions.
+//
+// There are currently two mechanisms for handling external functions in the
+// Interpreter. The first is to implement lle_* wrapper functions that are
+// specific to well-known library functions which manually translate the
+// arguments from GenericValues and make the call. If such a wrapper does
+// not exist, and libffi is available, then the Interpreter will attempt to
+// invoke the function using libffi, after finding its address.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Config/config.h" // Detect libffi
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/System/Mutex.h"
+#include <csignal>
+#include <cstdio>
+#include <map>
+#include <cmath>
+#include <cstring>
+
+#ifdef HAVE_FFI_CALL
+#ifdef HAVE_FFI_H
+#include <ffi.h>
+#define USE_LIBFFI
+#elif HAVE_FFI_FFI_H
+#include <ffi/ffi.h>
+#define USE_LIBFFI
+#endif
+#endif
+
+using namespace llvm;
+
+static ManagedStatic<sys::Mutex> FunctionsLock;
+
+typedef GenericValue (*ExFunc)(const FunctionType *,
+ const std::vector<GenericValue> &);
+static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
+static std::map<std::string, ExFunc> FuncNames;
+
+#ifdef USE_LIBFFI
+typedef void (*RawFunc)();
+static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
+#endif
+
+static Interpreter *TheInterpreter;
+
+static char getTypeID(const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return 'V';
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 1: return 'o';
+ case 8: return 'B';
+ case 16: return 'S';
+ case 32: return 'I';
+ case 64: return 'L';
+ default: return 'N';
+ }
+ case Type::FloatTyID: return 'F';
+ case Type::DoubleTyID: return 'D';
+ case Type::PointerTyID: return 'P';
+ case Type::FunctionTyID:return 'M';
+ case Type::StructTyID: return 'T';
+ case Type::ArrayTyID: return 'A';
+ case Type::OpaqueTyID: return 'O';
+ default: return 'U';
+ }
+}
+
+// Try to find address of external function given a Function object.
+// Please note, that interpreter doesn't know how to assemble a
+// real call in general case (this is JIT job), that's why it assumes,
+// that all external functions has the same (and pretty "general") signature.
+// The typical example of such functions are "lle_X_" ones.
+static ExFunc lookupFunction(const Function *F) {
+ // Function not found, look it up... start by figuring out what the
+ // composite function name should be.
+ std::string ExtName = "lle_";
+ const FunctionType *FT = F->getFunctionType();
+ for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
+ ExtName += getTypeID(FT->getContainedType(i));
+ ExtName + "_" + F->getNameStr();
+
+ sys::ScopedLock Writer(*FunctionsLock);
+ ExFunc FnPtr = FuncNames[ExtName];
+ if (FnPtr == 0)
+ FnPtr = FuncNames["lle_X_" + F->getNameStr()];
+ if (FnPtr == 0) // Try calling a generic function... if it exists...
+ FnPtr = (ExFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_"+F->getNameStr());
+ if (FnPtr != 0)
+ ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
+ return FnPtr;
+}
+
+#ifdef USE_LIBFFI
+static ffi_type *ffiTypeFor(const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return &ffi_type_void;
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: return &ffi_type_sint8;
+ case 16: return &ffi_type_sint16;
+ case 32: return &ffi_type_sint32;
+ case 64: return &ffi_type_sint64;
+ }
+ case Type::FloatTyID: return &ffi_type_float;
+ case Type::DoubleTyID: return &ffi_type_double;
+ case Type::PointerTyID: return &ffi_type_pointer;
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ llvm_report_error("Type could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static void *ffiValueFor(const Type *Ty, const GenericValue &AV,
+ void *ArgDataPtr) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: {
+ int8_t *I8Ptr = (int8_t *) ArgDataPtr;
+ *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 16: {
+ int16_t *I16Ptr = (int16_t *) ArgDataPtr;
+ *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 32: {
+ int32_t *I32Ptr = (int32_t *) ArgDataPtr;
+ *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 64: {
+ int64_t *I64Ptr = (int64_t *) ArgDataPtr;
+ *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ }
+ case Type::FloatTyID: {
+ float *FloatPtr = (float *) ArgDataPtr;
+ *FloatPtr = AV.FloatVal;
+ return ArgDataPtr;
+ }
+ case Type::DoubleTyID: {
+ double *DoublePtr = (double *) ArgDataPtr;
+ *DoublePtr = AV.DoubleVal;
+ return ArgDataPtr;
+ }
+ case Type::PointerTyID: {
+ void **PtrPtr = (void **) ArgDataPtr;
+ *PtrPtr = GVTOP(AV);
+ return ArgDataPtr;
+ }
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ llvm_report_error("Type value could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static bool ffiInvoke(RawFunc Fn, Function *F,
+ const std::vector<GenericValue> &ArgVals,
+ const TargetData *TD, GenericValue &Result) {
+ ffi_cif cif;
+ const FunctionType *FTy = F->getFunctionType();
+ const unsigned NumArgs = F->arg_size();
+
+ // TODO: We don't have type information about the remaining arguments, because
+ // this information is never passed into ExecutionEngine::runFunction().
+ if (ArgVals.size() > NumArgs && F->isVarArg()) {
+ llvm_report_error("Calling external var arg function '" + F->getName()
+ + "' is not supported by the Interpreter.");
+ }
+
+ unsigned ArgBytes = 0;
+
+ std::vector<ffi_type*> args(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ const Type *ArgTy = FTy->getParamType(ArgNo);
+ args[ArgNo] = ffiTypeFor(ArgTy);
+ ArgBytes += TD->getTypeStoreSize(ArgTy);
+ }
+
+ SmallVector<uint8_t, 128> ArgData;
+ ArgData.resize(ArgBytes);
+ uint8_t *ArgDataPtr = ArgData.data();
+ SmallVector<void*, 16> values(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ const Type *ArgTy = FTy->getParamType(ArgNo);
+ values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
+ ArgDataPtr += TD->getTypeStoreSize(ArgTy);
+ }
+
+ const Type *RetTy = FTy->getReturnType();
+ ffi_type *rtype = ffiTypeFor(RetTy);
+
+ if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) {
+ SmallVector<uint8_t, 128> ret;
+ if (RetTy->getTypeID() != Type::VoidTyID)
+ ret.resize(TD->getTypeStoreSize(RetTy));
+ ffi_call(&cif, Fn, ret.data(), values.data());
+ switch (RetTy->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(RetTy)->getBitWidth()) {
+ case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
+ case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
+ case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
+ case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
+ }
+ break;
+ case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
+ case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
+ case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
+ default: break;
+ }
+ return true;
+ }
+
+ return false;
+}
+#endif // USE_LIBFFI
+
+GenericValue Interpreter::callExternalFunction(Function *F,
+ const std::vector<GenericValue> &ArgVals) {
+ TheInterpreter = this;
+
+ FunctionsLock->acquire();
+
+ // Do a lookup to see if the function is in our cache... this should just be a
+ // deferred annotation!
+ std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
+ if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
+ : FI->second) {
+ FunctionsLock->release();
+ return Fn(F->getFunctionType(), ArgVals);
+ }
+
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
+ RawFunc RawFn;
+ if (RF == RawFunctions->end()) {
+ RawFn = (RawFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName());
+ if (RawFn != 0)
+ RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
+ } else {
+ RawFn = RF->second;
+ }
+
+ FunctionsLock->release();
+
+ GenericValue Result;
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result))
+ return Result;
+#endif // USE_LIBFFI
+
+ if (F->getName() == "__main")
+ errs() << "Tried to execute an unknown external function: "
+ << F->getType()->getDescription() << " __main\n";
+ else
+ llvm_report_error("Tried to execute an unknown external function: " +
+ F->getType()->getDescription() + " " +F->getName());
+#ifndef USE_LIBFFI
+ errs() << "Recompiling LLVM with --enable-libffi might help.\n";
+#endif
+ return GenericValue();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Functions "exported" to the running application...
+//
+
+// Visual Studio warns about returning GenericValue in extern "C" linkage
+#ifdef _MSC_VER
+ #pragma warning(disable : 4190)
+#endif
+
+extern "C" { // Don't add C++ manglings to llvm mangling :)
+
+// void atexit(Function*)
+GenericValue lle_X_atexit(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ assert(Args.size() == 1);
+ TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+// void exit(int)
+GenericValue lle_X_exit(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ TheInterpreter->exitCalled(Args[0]);
+ return GenericValue();
+}
+
+// void abort(void)
+GenericValue lle_X_abort(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ //FIXME: should we report or raise here?
+ //llvm_report_error("Interpreted program raised SIGABRT");
+ raise (SIGABRT);
+ return GenericValue();
+}
+
+// int sprintf(char *, const char *, ...) - a very rough implementation to make
+// output useful.
+GenericValue lle_X_sprintf(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ char *OutputBuffer = (char *)GVTOP(Args[0]);
+ const char *FmtStr = (const char *)GVTOP(Args[1]);
+ unsigned ArgNo = 2;
+
+ // printf should return # chars printed. This is completely incorrect, but
+ // close enough for now.
+ GenericValue GV;
+ GV.IntVal = APInt(32, strlen(FmtStr));
+ while (1) {
+ switch (*FmtStr) {
+ case 0: return GV; // Null terminator...
+ default: // Normal nonspecial character
+ sprintf(OutputBuffer++, "%c", *FmtStr++);
+ break;
+ case '\\': { // Handle escape codes
+ sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
+ FmtStr += 2; OutputBuffer += 2;
+ break;
+ }
+ case '%': { // Handle format specifiers
+ char FmtBuf[100] = "", Buffer[1000] = "";
+ char *FB = FmtBuf;
+ *FB++ = *FmtStr++;
+ char Last = *FB++ = *FmtStr++;
+ unsigned HowLong = 0;
+ while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
+ Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
+ Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
+ Last != 'p' && Last != 's' && Last != '%') {
+ if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
+ Last = *FB++ = *FmtStr++;
+ }
+ *FB = 0;
+
+ switch (Last) {
+ case '%':
+ memcpy(Buffer, "%", 2); break;
+ case 'c':
+ sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ if (HowLong >= 1) {
+ if (HowLong == 1 &&
+ TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 &&
+ sizeof(long) < sizeof(int64_t)) {
+ // Make sure we use %lld with a 64 bit argument because we might be
+ // compiling LLI on a 32 bit compiler.
+ unsigned Size = strlen(FmtBuf);
+ FmtBuf[Size] = FmtBuf[Size-1];
+ FmtBuf[Size+1] = 0;
+ FmtBuf[Size-1] = 'l';
+ }
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
+ } else
+ sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'e': case 'E': case 'g': case 'G': case 'f':
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
+ case 'p':
+ sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
+ case 's':
+ sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
+ default:
+ errs() << "<unknown printf code '" << *FmtStr << "'!>";
+ ArgNo++; break;
+ }
+ size_t Len = strlen(Buffer);
+ memcpy(OutputBuffer, Buffer, Len + 1);
+ OutputBuffer += Len;
+ }
+ break;
+ }
+ }
+ return GV;
+}
+
+// int printf(const char *, ...) - a very rough implementation to make output
+// useful.
+GenericValue lle_X_printf(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV((void*)&Buffer[0]));
+ NewArgs.insert(NewArgs.end(), Args.begin(), Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+ outs() << Buffer;
+ return GV;
+}
+
+// int sscanf(const char *format, ...);
+GenericValue lle_X_sscanf(const FunctionType *FT,
+ const std::vector<GenericValue> &args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int scanf(const char *format, ...);
+GenericValue lle_X_scanf(const FunctionType *FT,
+ const std::vector<GenericValue> &args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
+// output useful.
+GenericValue lle_X_fprintf(const FunctionType *FT,
+ const std::vector<GenericValue> &Args) {
+ assert(Args.size() >= 2);
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV(Buffer));
+ NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+
+ fputs(Buffer, (FILE *) GVTOP(Args[0]));
+ return GV;
+}
+
+} // End extern "C"
+
+// Done with externals; turn the warning back on
+#ifdef _MSC_VER
+ #pragma warning(default: 4190)
+#endif
+
+
+void Interpreter::initializeExternalFunctions() {
+ sys::ScopedLock Writer(*FunctionsLock);
+ FuncNames["lle_X_atexit"] = lle_X_atexit;
+ FuncNames["lle_X_exit"] = lle_X_exit;
+ FuncNames["lle_X_abort"] = lle_X_abort;
+
+ FuncNames["lle_X_printf"] = lle_X_printf;
+ FuncNames["lle_X_sprintf"] = lle_X_sprintf;
+ FuncNames["lle_X_sscanf"] = lle_X_sscanf;
+ FuncNames["lle_X_scanf"] = lle_X_scanf;
+ FuncNames["lle_X_fprintf"] = lle_X_fprintf;
+}
diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/lib/ExecutionEngine/Interpreter/Interpreter.cpp
new file mode 100644
index 0000000..43e3453
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -0,0 +1,98 @@
+//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level functionality for the LLVM interpreter.
+// This interpreter is designed to be a very simple, portable, inefficient
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include <cstring>
+using namespace llvm;
+
+namespace {
+
+static struct RegisterInterp {
+ RegisterInterp() { Interpreter::Register(); }
+} InterpRegistrator;
+
+}
+
+extern "C" void LLVMLinkInInterpreter() { }
+
+/// create - Create a new interpreter object. This can never fail.
+///
+ExecutionEngine *Interpreter::create(Module *M, std::string* ErrStr) {
+ // Tell this Module to materialize everything and release the GVMaterializer.
+ if (M->MaterializeAllPermanently(ErrStr))
+ // We got an error, just return 0
+ return 0;
+
+ return new Interpreter(M);
+}
+
+//===----------------------------------------------------------------------===//
+// Interpreter ctor - Initialize stuff
+//
+Interpreter::Interpreter(Module *M)
+ : ExecutionEngine(M), TD(M) {
+
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ setTargetData(&TD);
+ // Initialize the "backend"
+ initializeExecutionEngine();
+ initializeExternalFunctions();
+ emitGlobals();
+
+ IL = new IntrinsicLowering(TD);
+}
+
+Interpreter::~Interpreter() {
+ delete IL;
+}
+
+void Interpreter::runAtExitHandlers () {
+ while (!AtExitHandlers.empty()) {
+ callFunction(AtExitHandlers.back(), std::vector<GenericValue>());
+ AtExitHandlers.pop_back();
+ run();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue
+Interpreter::runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) {
+ assert (F && "Function *F was null at entry to run()");
+
+ // Try extra hard not to pass extra args to a function that isn't
+ // expecting them. C programmers frequently bend the rules and
+ // declare main() with fewer parameters than it actually gets
+ // passed, and the interpreter barfs if you pass a function more
+ // parameters than it is declared to take. This does not attempt to
+ // take into account gratuitous differences in declared types,
+ // though.
+ std::vector<GenericValue> ActualArgs;
+ const unsigned ArgCount = F->getFunctionType()->getNumParams();
+ for (unsigned i = 0; i < ArgCount; ++i)
+ ActualArgs.push_back(ArgValues[i]);
+
+ // Set up the function call.
+ callFunction(F, ActualArgs);
+
+ // Start executing the function.
+ run();
+
+ return ExitValue;
+}
diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h
new file mode 100644
index 0000000..bc4200b
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -0,0 +1,244 @@
+//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines the interpreter structure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLI_INTERPRETER_H
+#define LLI_INTERPRETER_H
+
+#include "llvm/Function.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/System/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/InstVisitor.h"
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+class IntrinsicLowering;
+struct FunctionInfo;
+template<typename T> class generic_gep_type_iterator;
+class ConstantExpr;
+typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
+
+
+// AllocaHolder - Object to track all of the blocks of memory allocated by
+// alloca. When the function returns, this object is popped off the execution
+// stack, which causes the dtor to be run, which frees all the alloca'd memory.
+//
+class AllocaHolder {
+ friend class AllocaHolderHandle;
+ std::vector<void*> Allocations;
+ unsigned RefCnt;
+public:
+ AllocaHolder() : RefCnt(0) {}
+ void add(void *mem) { Allocations.push_back(mem); }
+ ~AllocaHolder() {
+ for (unsigned i = 0; i < Allocations.size(); ++i)
+ free(Allocations[i]);
+ }
+};
+
+// AllocaHolderHandle gives AllocaHolder value semantics so we can stick it into
+// a vector...
+//
+class AllocaHolderHandle {
+ AllocaHolder *H;
+public:
+ AllocaHolderHandle() : H(new AllocaHolder()) { H->RefCnt++; }
+ AllocaHolderHandle(const AllocaHolderHandle &AH) : H(AH.H) { H->RefCnt++; }
+ ~AllocaHolderHandle() { if (--H->RefCnt == 0) delete H; }
+
+ void add(void *mem) { H->add(mem); }
+};
+
+typedef std::vector<GenericValue> ValuePlaneTy;
+
+// ExecutionContext struct - This struct represents one stack frame currently
+// executing.
+//
+struct ExecutionContext {
+ Function *CurFunction;// The currently executing function
+ BasicBlock *CurBB; // The currently executing BB
+ BasicBlock::iterator CurInst; // The next instruction to execute
+ std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
+ std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
+ CallSite Caller; // Holds the call that called subframes.
+ // NULL if main func or debugger invoked fn
+ AllocaHolderHandle Allocas; // Track memory allocated by alloca
+};
+
+// Interpreter - This class represents the entirety of the interpreter.
+//
+class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
+ GenericValue ExitValue; // The return value of the called function
+ TargetData TD;
+ IntrinsicLowering *IL;
+
+ // The runtime stack of executing code. The top of the stack is the current
+ // function record.
+ std::vector<ExecutionContext> ECStack;
+
+ // AtExitHandlers - List of functions to call when the program exits,
+ // registered with the atexit() library function.
+ std::vector<Function*> AtExitHandlers;
+
+public:
+ explicit Interpreter(Module *M);
+ ~Interpreter();
+
+ /// runAtExitHandlers - Run any functions registered by the program's calls to
+ /// atexit(3), which we intercept and store in AtExitHandlers.
+ ///
+ void runAtExitHandlers();
+
+ static void Register() {
+ InterpCtor = create;
+ }
+
+ /// create - Create an interpreter ExecutionEngine. This can never fail.
+ ///
+ static ExecutionEngine *create(Module *M, std::string *ErrorStr = 0);
+
+ /// run - Start execution with the specified function and arguments.
+ ///
+ virtual GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues);
+
+ /// recompileAndRelinkFunction - For the interpreter, functions are always
+ /// up-to-date.
+ ///
+ virtual void *recompileAndRelinkFunction(Function *F) {
+ return getPointerToFunction(F);
+ }
+
+ /// freeMachineCodeForFunction - The interpreter does not generate any code.
+ ///
+ void freeMachineCodeForFunction(Function *F) { }
+
+ // Methods used to execute code:
+ // Place a call on the stack
+ void callFunction(Function *F, const std::vector<GenericValue> &ArgVals);
+ void run(); // Execute instructions until nothing left to do
+
+ // Opcode Implementations
+ void visitReturnInst(ReturnInst &I);
+ void visitBranchInst(BranchInst &I);
+ void visitSwitchInst(SwitchInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+
+ void visitBinaryOperator(BinaryOperator &I);
+ void visitICmpInst(ICmpInst &I);
+ void visitFCmpInst(FCmpInst &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitGetElementPtrInst(GetElementPtrInst &I);
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
+ }
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitSelectInst(SelectInst &I);
+
+
+ void visitCallSite(CallSite CS);
+ void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); }
+ void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); }
+ void visitUnwindInst(UnwindInst &I);
+ void visitUnreachableInst(UnreachableInst &I);
+
+ void visitShl(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+
+ void visitVAArgInst(VAArgInst &I);
+ void visitInstruction(Instruction &I) {
+ errs() << I;
+ llvm_unreachable("Instruction not interpretable yet!");
+ }
+
+ GenericValue callExternalFunction(Function *F,
+ const std::vector<GenericValue> &ArgVals);
+ void exitCalled(GenericValue GV);
+
+ void addAtExitHandler(Function *F) {
+ AtExitHandlers.push_back(F);
+ }
+
+ GenericValue *getFirstVarArg () {
+ return &(ECStack.back ().VarArgs[0]);
+ }
+
+ //FIXME: private:
+public:
+ GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E, ExecutionContext &SF);
+
+private: // Helper functions
+ // SwitchToNewBasicBlock - Start execution in a new basic block and run any
+ // PHI nodes in the top of the block. This is used for intraprocedural
+ // control flow.
+ //
+ void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
+
+ void *getPointerToFunction(Function *F) { return (void*)F; }
+ void *getPointerToBasicBlock(BasicBlock *BB) { return (void*)BB; }
+
+ void initializeExecutionEngine() { }
+ void initializeExternalFunctions();
+ GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
+ GenericValue getOperandValue(Value *V, ExecutionContext &SF);
+ GenericValue executeTruncInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeZExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPTruncInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPExtInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToUIInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToSIInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeUIToFPInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSIToFPInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executePtrToIntInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeBitCastInst(Value *SrcVal, const Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
+ const Type *Ty, ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(const Type *RetTy, GenericValue Result);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/lib/ExecutionEngine/Interpreter/Makefile b/lib/ExecutionEngine/Interpreter/Makefile
new file mode 100644
index 0000000..5def136
--- /dev/null
+++ b/lib/ExecutionEngine/Interpreter/Makefile
@@ -0,0 +1,13 @@
+##===- lib/ExecutionEngine/Interpreter/Makefile ------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMInterpreter
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/ExecutionEngine/JIT/CMakeLists.txt b/lib/ExecutionEngine/JIT/CMakeLists.txt
new file mode 100644
index 0000000..42020d6
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/CMakeLists.txt
@@ -0,0 +1,13 @@
+# TODO: Support other architectures. See Makefile.
+add_definitions(-DENABLE_X86_JIT)
+
+add_llvm_library(LLVMJIT
+ Intercept.cpp
+ JIT.cpp
+ JITDebugRegisterer.cpp
+ JITDwarfEmitter.cpp
+ JITEmitter.cpp
+ JITMemoryManager.cpp
+ OProfileJITEventListener.cpp
+ TargetSelect.cpp
+ )
diff --git a/lib/ExecutionEngine/JIT/Intercept.cpp b/lib/ExecutionEngine/JIT/Intercept.cpp
new file mode 100644
index 0000000..c00b60a
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/Intercept.cpp
@@ -0,0 +1,149 @@
+//===-- Intercept.cpp - System function interception routines -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// If a function call occurs to an external function, the JIT is designed to use
+// the dynamic loader interface to find a function to call. This is useful for
+// calling system calls and library functions that are not available in LLVM.
+// Some system calls, however, need to be handled specially. For this reason,
+// we intercept some of them here and use our own stubs to handle them.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Config/config.h"
+using namespace llvm;
+
+// AtExitHandlers - List of functions to call when the program exits,
+// registered with the atexit() library function.
+static std::vector<void (*)()> AtExitHandlers;
+
+/// runAtExitHandlers - Run any functions registered by the program's
+/// calls to atexit(3), which we intercept and store in
+/// AtExitHandlers.
+///
+static void runAtExitHandlers() {
+ while (!AtExitHandlers.empty()) {
+ void (*Fn)() = AtExitHandlers.back();
+ AtExitHandlers.pop_back();
+ Fn();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Function stubs that are invoked instead of certain library calls
+//===----------------------------------------------------------------------===//
+
+// Force the following functions to be linked in to anything that uses the
+// JIT. This is a hack designed to work around the all-too-clever Glibc
+// strategy of making these functions work differently when inlined vs. when
+// not inlined, and hiding their real definitions in a separate archive file
+// that the dynamic linker can't see. For more info, search for
+// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+#if defined(__linux__)
+#if defined(HAVE_SYS_STAT_H)
+#include <sys/stat.h>
+#endif
+#include <fcntl.h>
+/* stat functions are redirecting to __xstat with a version number. On x86-64
+ * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
+ * available as an exported symbol, so we have to add it explicitly.
+ */
+namespace {
+class StatSymbols {
+public:
+ StatSymbols() {
+ sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
+ sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
+ sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
+ sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
+ sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
+ sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
+ sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
+ sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
+ sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
+ }
+};
+}
+static StatSymbols initStatSymbols;
+#endif // __linux__
+
+// jit_exit - Used to intercept the "exit" library call.
+static void jit_exit(int Status) {
+ runAtExitHandlers(); // Run atexit handlers...
+ exit(Status);
+}
+
+// jit_atexit - Used to intercept the "atexit" library call.
+static int jit_atexit(void (*Fn)()) {
+ AtExitHandlers.push_back(Fn); // Take note of atexit handler...
+ return 0; // Always successful
+}
+
+//===----------------------------------------------------------------------===//
+//
+/// getPointerToNamedFunction - This method returns the address of the specified
+/// function by using the dynamic loader interface. As such it is only useful
+/// for resolving library symbols, not code generated symbols.
+///
+void *JIT::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ if (!isSymbolSearchingDisabled()) {
+ // Check to see if this is one of the functions we want to intercept. Note,
+ // we cast to intptr_t here to silence a -pedantic warning that complains
+ // about casting a function pointer to a normal pointer.
+ if (Name == "exit") return (void*)(intptr_t)&jit_exit;
+ if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
+
+ const char *NameStr = Name.c_str();
+ // If this is an asm specifier, skip the sentinal.
+ if (NameStr[0] == 1) ++NameStr;
+
+ // If it's an external function, look it up in the process image...
+ void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+ if (Ptr) return Ptr;
+
+ // If it wasn't found and if it starts with an underscore ('_') character,
+ // and has an asm specifier, try again without the underscore.
+ if (Name[0] == 1 && NameStr[0] == '_') {
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+ if (Ptr) return Ptr;
+ }
+
+ // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These
+ // are references to hidden visibility symbols that dlsym cannot resolve.
+ // If we have one of these, strip off $LDBLStub and try again.
+#if defined(__APPLE__) && defined(__ppc__)
+ if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
+ memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
+ // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
+ // This mirrors logic in libSystemStubs.a.
+ std::string Prefix = std::string(Name.begin(), Name.end()-9);
+ if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false))
+ return Ptr;
+ if (void *Ptr = getPointerToNamedFunction(Prefix, false))
+ return Ptr;
+ }
+#endif
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(Name))
+ return RP;
+
+ if (AbortOnFailure) {
+ llvm_report_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return 0;
+}
diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp
new file mode 100644
index 0000000..616a66e
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JIT.cpp
@@ -0,0 +1,741 @@
+//===-- JIT.cpp - LLVM Just in Time Compiler ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tool implements a just-in-time compiler for LLVM, allowing direct
+// execution of LLVM bitcode in an efficient manner.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Instructions.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineCodeInfo.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetJITInfo.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Config/config.h"
+
+using namespace llvm;
+
+#ifdef __APPLE__
+// Apple gcc defaults to -fuse-cxa-atexit (i.e. calls __cxa_atexit instead
+// of atexit). It passes the address of linker generated symbol __dso_handle
+// to the function.
+// This configuration change happened at version 5330.
+# include <AvailabilityMacros.h>
+# if defined(MAC_OS_X_VERSION_10_4) && \
+ ((MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_4) || \
+ (MAC_OS_X_VERSION_MIN_REQUIRED == MAC_OS_X_VERSION_10_4 && \
+ __APPLE_CC__ >= 5330))
+# ifndef HAVE___DSO_HANDLE
+# define HAVE___DSO_HANDLE 1
+# endif
+# endif
+#endif
+
+#if HAVE___DSO_HANDLE
+extern void *__dso_handle __attribute__ ((__visibility__ ("hidden")));
+#endif
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { JIT::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInJIT() {
+}
+
+
+#if defined(__GNUC__) && !defined(__ARM__EABI__)
+
+// libgcc defines the __register_frame function to dynamically register new
+// dwarf frames for exception handling. This functionality is not portable
+// across compilers and is only provided by GCC. We use the __register_frame
+// function here so that code generated by the JIT cooperates with the unwinding
+// runtime of libgcc. When JITting with exception handling enable, LLVM
+// generates dwarf frames and registers it to libgcc with __register_frame.
+//
+// The __register_frame function works with Linux.
+//
+// Unfortunately, this functionality seems to be in libgcc after the unwinding
+// library of libgcc for darwin was written. The code for darwin overwrites the
+// value updated by __register_frame with a value fetched with "keymgr".
+// "keymgr" is an obsolete functionality, which should be rewritten some day.
+// In the meantime, since "keymgr" is on all libgccs shipped with apple-gcc, we
+// need a workaround in LLVM which uses the "keymgr" to dynamically modify the
+// values of an opaque key, used by libgcc to find dwarf tables.
+
+extern "C" void __register_frame(void*);
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED <= 1050
+# define USE_KEYMGR 1
+#else
+# define USE_KEYMGR 0
+#endif
+
+#if USE_KEYMGR
+
+namespace {
+
+// LibgccObject - This is the structure defined in libgcc. There is no #include
+// provided for this structure, so we also define it here. libgcc calls it
+// "struct object". The structure is undocumented in libgcc.
+struct LibgccObject {
+ void *unused1;
+ void *unused2;
+ void *unused3;
+
+ /// frame - Pointer to the exception table.
+ void *frame;
+
+ /// encoding - The encoding of the object?
+ union {
+ struct {
+ unsigned long sorted : 1;
+ unsigned long from_array : 1;
+ unsigned long mixed_encoding : 1;
+ unsigned long encoding : 8;
+ unsigned long count : 21;
+ } b;
+ size_t i;
+ } encoding;
+
+ /// fde_end - libgcc defines this field only if some macro is defined. We
+ /// include this field even if it may not there, to make libgcc happy.
+ char *fde_end;
+
+ /// next - At least we know it's a chained list!
+ struct LibgccObject *next;
+};
+
+// "kemgr" stuff. Apparently, all frame tables are stored there.
+extern "C" void _keymgr_set_and_unlock_processwide_ptr(int, void *);
+extern "C" void *_keymgr_get_and_lock_processwide_ptr(int);
+#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */
+
+/// LibgccObjectInfo - libgcc defines this struct as km_object_info. It
+/// probably contains all dwarf tables that are loaded.
+struct LibgccObjectInfo {
+
+ /// seenObjects - LibgccObjects already parsed by the unwinding runtime.
+ ///
+ struct LibgccObject* seenObjects;
+
+ /// unseenObjects - LibgccObjects not parsed yet by the unwinding runtime.
+ ///
+ struct LibgccObject* unseenObjects;
+
+ unsigned unused[2];
+};
+
+/// darwin_register_frame - Since __register_frame does not work with darwin's
+/// libgcc,we provide our own function, which "tricks" libgcc by modifying the
+/// "Dwarf2 object list" key.
+void DarwinRegisterFrame(void* FrameBegin) {
+ // Get the key.
+ LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
+ _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
+ assert(LOI && "This should be preallocated by the runtime");
+
+ // Allocate a new LibgccObject to represent this frame. Deallocation of this
+ // object may be impossible: since darwin code in libgcc was written after
+ // the ability to dynamically register frames, things may crash if we
+ // deallocate it.
+ struct LibgccObject* ob = (struct LibgccObject*)
+ malloc(sizeof(struct LibgccObject));
+
+ // Do like libgcc for the values of the field.
+ ob->unused1 = (void *)-1;
+ ob->unused2 = 0;
+ ob->unused3 = 0;
+ ob->frame = FrameBegin;
+ ob->encoding.i = 0;
+ ob->encoding.b.encoding = llvm::dwarf::DW_EH_PE_omit;
+
+ // Put the info on both places, as libgcc uses the first or the second
+ // field. Note that we rely on having two pointers here. If fde_end was a
+ // char, things would get complicated.
+ ob->fde_end = (char*)LOI->unseenObjects;
+ ob->next = LOI->unseenObjects;
+
+ // Update the key's unseenObjects list.
+ LOI->unseenObjects = ob;
+
+ // Finally update the "key". Apparently, libgcc requires it.
+ _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST,
+ LOI);
+
+}
+
+}
+#endif // __APPLE__
+#endif // __GNUC__
+
+/// createJIT - This is the factory method for creating a JIT for the current
+/// machine, it does not fall back to the interpreter. This takes ownership
+/// of the module.
+ExecutionEngine *ExecutionEngine::createJIT(Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM) {
+ // Use the defaults for extra parameters. Users can use EngineBuilder to
+ // set them.
+ StringRef MArch = "";
+ StringRef MCPU = "";
+ SmallVector<std::string, 1> MAttrs;
+ return JIT::createJIT(M, ErrorStr, JMM, OptLevel, GVsWithCode, CMM,
+ MArch, MCPU, MAttrs);
+}
+
+ExecutionEngine *JIT::createJIT(Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) {
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
+ return 0;
+
+ // Pick a target either via -march or by guessing the native arch.
+ TargetMachine *TM = JIT::selectTarget(M, MArch, MCPU, MAttrs, ErrorStr);
+ if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
+ TM->setCodeModel(CMM);
+
+ // If the target supports JIT code generation, create a the JIT.
+ if (TargetJITInfo *TJ = TM->getJITInfo()) {
+ return new JIT(M, *TM, *TJ, JMM, OptLevel, GVsWithCode);
+ } else {
+ if (ErrorStr)
+ *ErrorStr = "target does not support JIT code generation";
+ return 0;
+ }
+}
+
+JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel, bool GVsWithCode)
+ : ExecutionEngine(M), TM(tm), TJI(tji), AllocateGVsWithCode(GVsWithCode) {
+ setTargetData(TM.getTargetData());
+
+ jitstate = new JITState(M);
+
+ // Initialize JCE
+ JCE = createEmitter(*this, JMM, TM);
+
+ // Add target data
+ MutexGuard locked(lock);
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory that
+ // may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, OptLevel)) {
+ llvm_report_error("Target does not support machine code emission!");
+ }
+
+ // Register routine for informing unwinding runtime about new EH frames
+#if defined(__GNUC__) && !defined(__ARM_EABI__)
+#if USE_KEYMGR
+ struct LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
+ _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
+
+ // The key is created on demand, and libgcc creates it the first time an
+ // exception occurs. Since we need the key to register frames, we create
+ // it now.
+ if (!LOI)
+ LOI = (LibgccObjectInfo*)calloc(sizeof(struct LibgccObjectInfo), 1);
+ _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, LOI);
+ InstallExceptionTableRegister(DarwinRegisterFrame);
+#else
+ InstallExceptionTableRegister(__register_frame);
+#endif // __APPLE__
+#endif // __GNUC__
+
+ // Initialize passes.
+ PM.doInitialization();
+}
+
+JIT::~JIT() {
+ delete jitstate;
+ delete JCE;
+ delete &TM;
+}
+
+/// addModule - Add a new Module to the JIT. If we previously removed the last
+/// Module, we need re-initialize jitstate with a valid Module.
+void JIT::addModule(Module *M) {
+ MutexGuard locked(lock);
+
+ if (Modules.empty()) {
+ assert(!jitstate && "jitstate should be NULL if Modules vector is empty!");
+
+ jitstate = new JITState(M);
+
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ llvm_report_error("Target does not support machine code emission!");
+ }
+
+ // Initialize passes.
+ PM.doInitialization();
+ }
+
+ ExecutionEngine::addModule(M);
+}
+
+/// removeModule - If we are removing the last Module, invalidate the jitstate
+/// since the PassManager it contains references a released Module.
+bool JIT::removeModule(Module *M) {
+ bool result = ExecutionEngine::removeModule(M);
+
+ MutexGuard locked(lock);
+
+ if (jitstate->getModule() == M) {
+ delete jitstate;
+ jitstate = 0;
+ }
+
+ if (!jitstate && !Modules.empty()) {
+ jitstate = new JITState(Modules[0]);
+
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ llvm_report_error("Target does not support machine code emission!");
+ }
+
+ // Initialize passes.
+ PM.doInitialization();
+ }
+ return result;
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue JIT::runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ const FunctionType *FTy = F->getFunctionType();
+ const Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isInteger(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isInteger(32) &&
+ isa<PointerType>(FTy->getParamType(1)) &&
+ isa<PointerType>(FTy->getParamType(2))) {
+ int (*PF)(int, char **, const char **) =
+ (int(*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isInteger(32) &&
+ isa<PointerType>(FTy->getParamType(1))) {
+ int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 &&
+ FTy->getParamType(0)->isInteger(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default: llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ return rv;
+ case Type::PointerTyID:
+ return PTOGV(((void*(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ // Okay, this is not one of our quick and easy cases. Because we don't have a
+ // full FFI, we have to codegen a nullary stub function that just calls the
+ // function we are interested in, passing in constants for all of the
+ // arguments. Make this function and return.
+
+ // First, create the function.
+ FunctionType *STy=FunctionType::get(RetTy, false);
+ Function *Stub = Function::Create(STy, Function::InternalLinkage, "",
+ F->getParent());
+
+ // Insert a basic block.
+ BasicBlock *StubBB = BasicBlock::Create(F->getContext(), "", Stub);
+
+ // Convert all of the GenericValue arguments over to constants. Note that we
+ // currently don't support varargs.
+ SmallVector<Value*, 8> Args;
+ for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
+ Constant *C = 0;
+ const Type *ArgTy = FTy->getParamType(i);
+ const GenericValue &AV = ArgValues[i];
+ switch (ArgTy->getTypeID()) {
+ default: llvm_unreachable("Unknown argument type for function call!");
+ case Type::IntegerTyID:
+ C = ConstantInt::get(F->getContext(), AV.IntVal);
+ break;
+ case Type::FloatTyID:
+ C = ConstantFP::get(F->getContext(), APFloat(AV.FloatVal));
+ break;
+ case Type::DoubleTyID:
+ C = ConstantFP::get(F->getContext(), APFloat(AV.DoubleVal));
+ break;
+ case Type::PPC_FP128TyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ C = ConstantFP::get(F->getContext(), APFloat(AV.IntVal));
+ break;
+ case Type::PointerTyID:
+ void *ArgPtr = GVTOP(AV);
+ if (sizeof(void*) == 4)
+ C = ConstantInt::get(Type::getInt32Ty(F->getContext()),
+ (int)(intptr_t)ArgPtr);
+ else
+ C = ConstantInt::get(Type::getInt64Ty(F->getContext()),
+ (intptr_t)ArgPtr);
+ // Cast the integer to pointer
+ C = ConstantExpr::getIntToPtr(C, ArgTy);
+ break;
+ }
+ Args.push_back(C);
+ }
+
+ CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(),
+ "", StubBB);
+ TheCall->setCallingConv(F->getCallingConv());
+ TheCall->setTailCall();
+ if (!TheCall->getType()->isVoidTy())
+ // Return result of the call.
+ ReturnInst::Create(F->getContext(), TheCall, StubBB);
+ else
+ ReturnInst::Create(F->getContext(), StubBB); // Just return void.
+
+ // Finally, return the value returned by our nullary stub function.
+ return runFunction(Stub, std::vector<GenericValue>());
+}
+
+void JIT::RegisterJITEventListener(JITEventListener *L) {
+ if (L == NULL)
+ return;
+ MutexGuard locked(lock);
+ EventListeners.push_back(L);
+}
+void JIT::UnregisterJITEventListener(JITEventListener *L) {
+ if (L == NULL)
+ return;
+ MutexGuard locked(lock);
+ std::vector<JITEventListener*>::reverse_iterator I=
+ std::find(EventListeners.rbegin(), EventListeners.rend(), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+}
+void JIT::NotifyFunctionEmitted(
+ const Function &F,
+ void *Code, size_t Size,
+ const JITEvent_EmittedFunctionDetails &Details) {
+ MutexGuard locked(lock);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->NotifyFunctionEmitted(F, Code, Size, Details);
+ }
+}
+
+void JIT::NotifyFreeingMachineCode(void *OldPtr) {
+ MutexGuard locked(lock);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->NotifyFreeingMachineCode(OldPtr);
+ }
+}
+
+/// runJITOnFunction - Run the FunctionPassManager full of
+/// just-in-time compilation passes on F, hopefully filling in
+/// GlobalAddress[F] with the address of F's machine code.
+///
+void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
+ MutexGuard locked(lock);
+
+ class MCIListener : public JITEventListener {
+ MachineCodeInfo *const MCI;
+ public:
+ MCIListener(MachineCodeInfo *mci) : MCI(mci) {}
+ virtual void NotifyFunctionEmitted(const Function &,
+ void *Code, size_t Size,
+ const EmittedFunctionDetails &) {
+ MCI->setAddress(Code);
+ MCI->setSize(Size);
+ }
+ };
+ MCIListener MCIL(MCI);
+ if (MCI)
+ RegisterJITEventListener(&MCIL);
+
+ runJITOnFunctionUnlocked(F, locked);
+
+ if (MCI)
+ UnregisterJITEventListener(&MCIL);
+}
+
+void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
+ static bool isAlreadyCodeGenerating = false;
+ assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
+
+ // JIT the function
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*F);
+ isAlreadyCodeGenerating = false;
+
+ // If the function referred to another function that had not yet been
+ // read from bitcode, and we are jitting non-lazily, emit it now.
+ while (!jitstate->getPendingFunctions(locked).empty()) {
+ Function *PF = jitstate->getPendingFunctions(locked).back();
+ jitstate->getPendingFunctions(locked).pop_back();
+
+ assert(!PF->hasAvailableExternallyLinkage() &&
+ "Externally-defined function should not be in pending list.");
+
+ // JIT the function
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*PF);
+ isAlreadyCodeGenerating = false;
+
+ // Now that the function has been jitted, ask the JITEmitter to rewrite
+ // the stub with real address of the function.
+ updateFunctionStub(PF);
+ }
+}
+
+/// getPointerToFunction - This method is used to get the address of the
+/// specified function, compiling it if neccesary.
+///
+void *JIT::getPointerToFunction(Function *F) {
+
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr; // Check if function already code gen'd
+
+ MutexGuard locked(lock);
+
+ // Now that this thread owns the lock, make sure we read in the function if it
+ // exists in this Module.
+ std::string ErrorMsg;
+ if (F->Materialize(&ErrorMsg)) {
+ llvm_report_error("Error reading function '" + F->getName()+
+ "' from bitcode file: " + ErrorMsg);
+ }
+
+ // ... and check if another thread has already code gen'd the function.
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr;
+
+ if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
+ bool AbortOnFailure = !F->hasExternalWeakLinkage();
+ void *Addr = getPointerToNamedFunction(F->getName(), AbortOnFailure);
+ addGlobalMapping(F, Addr);
+ return Addr;
+ }
+
+ runJITOnFunctionUnlocked(F, locked);
+
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ assert(Addr && "Code generation didn't add function to GlobalAddress table!");
+ return Addr;
+}
+
+/// getOrEmitGlobalVariable - Return the address of the specified global
+/// variable, possibly emitting it to memory if needed. This is used by the
+/// Emitter.
+void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
+ MutexGuard locked(lock);
+
+ void *Ptr = getPointerToGlobalIfAvailable(GV);
+ if (Ptr) return Ptr;
+
+ // If the global is external, just remember the address.
+ if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
+#if HAVE___DSO_HANDLE
+ if (GV->getName() == "__dso_handle")
+ return (void*)&__dso_handle;
+#endif
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName());
+ if (Ptr == 0) {
+ llvm_report_error("Could not resolve external global address: "
+ +GV->getName());
+ }
+ addGlobalMapping(GV, Ptr);
+ } else {
+ // If the global hasn't been emitted to memory yet, allocate space and
+ // emit it into memory.
+ Ptr = getMemoryForGV(GV);
+ addGlobalMapping(GV, Ptr);
+ EmitGlobalVariable(GV); // Initialize the variable.
+ }
+ return Ptr;
+}
+
+/// recompileAndRelinkFunction - This method is used to force a function
+/// which has already been compiled, to be compiled again, possibly
+/// after it has been modified. Then the entry to the old copy is overwritten
+/// with a branch to the new copy. If there was no old copy, this acts
+/// just like JIT::getPointerToFunction().
+///
+void *JIT::recompileAndRelinkFunction(Function *F) {
+ void *OldAddr = getPointerToGlobalIfAvailable(F);
+
+ // If it's not already compiled there is no reason to patch it up.
+ if (OldAddr == 0) { return getPointerToFunction(F); }
+
+ // Delete the old function mapping.
+ addGlobalMapping(F, 0);
+
+ // Recodegen the function
+ runJITOnFunction(F);
+
+ // Update state, forward the old function to the new function.
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ assert(Addr && "Code generation didn't add function to GlobalAddress table!");
+ TJI.replaceMachineCodeForFunction(OldAddr, Addr);
+ return Addr;
+}
+
+/// getMemoryForGV - This method abstracts memory allocation of global
+/// variable so that the JIT can allocate thread local variables depending
+/// on the target.
+///
+char* JIT::getMemoryForGV(const GlobalVariable* GV) {
+ char *Ptr;
+
+ // GlobalVariable's which are not "constant" will cause trouble in a server
+ // situation. It's returned in the same block of memory as code which may
+ // not be writable.
+ if (isGVCompilationDisabled() && !GV->isConstant()) {
+ llvm_report_error("Compilation of non-internal GlobalValue is disabled!");
+ }
+
+ // Some applications require globals and code to live together, so they may
+ // be allocated into the same buffer, but in general globals are allocated
+ // through the memory manager which puts them near the code but not in the
+ // same buffer.
+ const Type *GlobalType = GV->getType()->getElementType();
+ size_t S = getTargetData()->getTypeAllocSize(GlobalType);
+ size_t A = getTargetData()->getPreferredAlignment(GV);
+ if (GV->isThreadLocal()) {
+ MutexGuard locked(lock);
+ Ptr = TJI.allocateThreadLocalMemory(S);
+ } else if (TJI.allocateSeparateGVMemory()) {
+ if (A <= 8) {
+ Ptr = (char*)malloc(S);
+ } else {
+ // Allocate S+A bytes of memory, then use an aligned pointer within that
+ // space.
+ Ptr = (char*)malloc(S+A);
+ unsigned MisAligned = ((intptr_t)Ptr & (A-1));
+ Ptr = Ptr + (MisAligned ? (A-MisAligned) : 0);
+ }
+ } else if (AllocateGVsWithCode) {
+ Ptr = (char*)JCE->allocateSpace(S, A);
+ } else {
+ Ptr = (char*)JCE->allocateGlobal(S, A);
+ }
+ return Ptr;
+}
+
+void JIT::addPendingFunction(Function *F) {
+ MutexGuard locked(lock);
+ jitstate->getPendingFunctions(locked).push_back(F);
+}
+
+
+JITEventListener::~JITEventListener() {}
diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h
new file mode 100644
index 0000000..bb8f851
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JIT.h
@@ -0,0 +1,211 @@
+//===-- JIT.h - Class definition for the JIT --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the top-level JIT data structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef JIT_H
+#define JIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/PassManager.h"
+#include "llvm/Support/ValueHandle.h"
+
+namespace llvm {
+
+class Function;
+struct JITEvent_EmittedFunctionDetails;
+class MachineCodeEmitter;
+class MachineCodeInfo;
+class TargetJITInfo;
+class TargetMachine;
+
+class JITState {
+private:
+ FunctionPassManager PM; // Passes to compile a function
+ Module *M; // Module used to create the PM
+
+ /// PendingFunctions - Functions which have not been code generated yet, but
+ /// were called from a function being code generated.
+ std::vector<AssertingVH<Function> > PendingFunctions;
+
+public:
+ explicit JITState(Module *M) : PM(M), M(M) {}
+
+ FunctionPassManager &getPM(const MutexGuard &L) {
+ return PM;
+ }
+
+ Module *getModule() const { return M; }
+ std::vector<AssertingVH<Function> > &getPendingFunctions(const MutexGuard &L){
+ return PendingFunctions;
+ }
+};
+
+
+class JIT : public ExecutionEngine {
+ TargetMachine &TM; // The current target we are compiling to
+ TargetJITInfo &TJI; // The JITInfo for the target we are compiling to
+ JITCodeEmitter *JCE; // JCE object
+ std::vector<JITEventListener*> EventListeners;
+
+ /// AllocateGVsWithCode - Some applications require that global variables and
+ /// code be allocated into the same region of memory, in which case this flag
+ /// should be set to true. Doing so breaks freeMachineCodeForFunction.
+ bool AllocateGVsWithCode;
+
+ JITState *jitstate;
+
+ JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
+ bool AllocateGVsWithCode);
+public:
+ ~JIT();
+
+ static void Register() {
+ JITCtor = createJIT;
+ }
+
+ /// getJITInfo - Return the target JIT information structure.
+ ///
+ TargetJITInfo &getJITInfo() const { return TJI; }
+
+ /// create - Create an return a new JIT compiler if there is one available
+ /// for the current target. Otherwise, return null.
+ ///
+ static ExecutionEngine *create(Module *M,
+ std::string *Err,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel =
+ CodeGenOpt::Default,
+ bool GVsWithCode = true,
+ CodeModel::Model CMM = CodeModel::Default) {
+ return ExecutionEngine::createJIT(M, Err, JMM, OptLevel, GVsWithCode,
+ CMM);
+ }
+
+ virtual void addModule(Module *M);
+
+ /// removeModule - Remove a Module from the list of modules. Returns true if
+ /// M is found.
+ virtual bool removeModule(Module *M);
+
+ /// runFunction - Start execution with the specified function and arguments.
+ ///
+ virtual GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues);
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
+ // CompilationCallback - Invoked the first time that a call site is found,
+ // which causes lazy compilation of the target function.
+ //
+ static void CompilationCallback();
+
+ /// getPointerToFunction - This returns the address of the specified function,
+ /// compiling it if necessary.
+ ///
+ void *getPointerToFunction(Function *F);
+
+ void *getPointerToBasicBlock(BasicBlock *BB) {
+ assert(0 && "JIT does not support address-of-label yet!");
+ return 0;
+ }
+
+ /// getOrEmitGlobalVariable - Return the address of the specified global
+ /// variable, possibly emitting it to memory if needed. This is used by the
+ /// Emitter.
+ void *getOrEmitGlobalVariable(const GlobalVariable *GV);
+
+ /// getPointerToFunctionOrStub - If the specified function has been
+ /// code-gen'd, return a pointer to the function. If not, compile it, or use
+ /// a stub to implement lazy compilation if available.
+ ///
+ void *getPointerToFunctionOrStub(Function *F);
+
+ /// recompileAndRelinkFunction - This method is used to force a function
+ /// which has already been compiled, to be compiled again, possibly
+ /// after it has been modified. Then the entry to the old copy is overwritten
+ /// with a branch to the new copy. If there was no old copy, this acts
+ /// just like JIT::getPointerToFunction().
+ ///
+ void *recompileAndRelinkFunction(Function *F);
+
+ /// freeMachineCodeForFunction - deallocate memory used to code-generate this
+ /// Function.
+ ///
+ void freeMachineCodeForFunction(Function *F);
+
+ /// addPendingFunction - while jitting non-lazily, a called but non-codegen'd
+ /// function was encountered. Add it to a pending list to be processed after
+ /// the current function.
+ ///
+ void addPendingFunction(Function *F);
+
+ /// getCodeEmitter - Return the code emitter this JIT is emitting into.
+ ///
+ JITCodeEmitter *getCodeEmitter() const { return JCE; }
+
+ /// selectTarget - Pick a target either via -march or by guessing the native
+ /// arch. Add any CPU features specified via -mcpu or -mattr.
+ static TargetMachine *selectTarget(Module *M,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs,
+ std::string *Err);
+
+ static ExecutionEngine *createJIT(Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
+
+ // Run the JIT on F and return information about the generated code
+ void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0);
+
+ virtual void RegisterJITEventListener(JITEventListener *L);
+ virtual void UnregisterJITEventListener(JITEventListener *L);
+ /// These functions correspond to the methods on JITEventListener. They
+ /// iterate over the registered listeners and call the corresponding method on
+ /// each.
+ void NotifyFunctionEmitted(
+ const Function &F, void *Code, size_t Size,
+ const JITEvent_EmittedFunctionDetails &Details);
+ void NotifyFreeingMachineCode(void *OldPtr);
+
+private:
+ static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
+ TargetMachine &tm);
+ void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
+ void updateFunctionStub(Function *F);
+
+protected:
+
+ /// getMemoryforGV - Allocate memory for a global variable.
+ virtual char* getMemoryForGV(const GlobalVariable* GV);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp b/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
new file mode 100644
index 0000000..565509c
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
@@ -0,0 +1,209 @@
+//===-- JITDebugRegisterer.cpp - Register debug symbols for JIT -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDebugRegisterer object that is used by the JIT to
+// register debug info with debuggers like GDB.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JITDebugRegisterer.h"
+#include "../../CodeGen/ELF.h"
+#include "../../CodeGen/ELFWriter.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Mutex.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+// This must be kept in sync with gdb/gdb/jit.h .
+extern "C" {
+
+ // Debuggers puts a breakpoint in this function.
+ DISABLE_INLINE void __jit_debug_register_code() { }
+
+ // We put information about the JITed function in this global, which the
+ // debugger reads. Make sure to specify the version statically, because the
+ // debugger checks the version before we can set it during runtime.
+ struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+
+}
+
+namespace {
+
+ /// JITDebugLock - Used to serialize all code registration events, since they
+ /// modify global variables.
+ sys::Mutex JITDebugLock;
+
+}
+
+JITDebugRegisterer::JITDebugRegisterer(TargetMachine &tm) : TM(tm), FnMap() { }
+
+JITDebugRegisterer::~JITDebugRegisterer() {
+ // Free all ELF memory.
+ for (RegisteredFunctionsMap::iterator I = FnMap.begin(), E = FnMap.end();
+ I != E; ++I) {
+ // Call the private method that doesn't update the map so our iterator
+ // doesn't break.
+ UnregisterFunctionInternal(I);
+ }
+ FnMap.clear();
+}
+
+std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) {
+ // Stack allocate an empty module with an empty LLVMContext for the ELFWriter
+ // API. We don't use the real module because then the ELFWriter would write
+ // out unnecessary GlobalValues during finalization.
+ LLVMContext Context;
+ Module M("", Context);
+
+ // Make a buffer for the ELF in memory.
+ std::string Buffer;
+ raw_string_ostream O(Buffer);
+ ELFWriter EW(O, TM);
+ EW.doInitialization(M);
+
+ // Copy the binary into the .text section. This isn't necessary, but it's
+ // useful to be able to disassemble the ELF by hand.
+ ELFSection &Text = EW.getTextSection((Function *)F);
+ Text.Addr = (uint64_t)I.FnStart;
+ // TODO: We could eliminate this copy if we somehow used a pointer/size pair
+ // instead of a vector.
+ Text.getData().assign(I.FnStart, I.FnEnd);
+
+ // Copy the exception handling call frame information into the .eh_frame
+ // section. This allows GDB to get a good stack trace, particularly on
+ // linux x86_64. Mark this as a PROGBITS section that needs to be loaded
+ // into memory at runtime.
+ ELFSection &EH = EW.getSection(".eh_frame", ELFSection::SHT_PROGBITS,
+ ELFSection::SHF_ALLOC);
+ // Pointers in the DWARF EH info are all relative to the EH frame start,
+ // which is stored here.
+ EH.Addr = (uint64_t)I.EhStart;
+ // TODO: We could eliminate this copy if we somehow used a pointer/size pair
+ // instead of a vector.
+ EH.getData().assign(I.EhStart, I.EhEnd);
+
+ // Add this single function to the symbol table, so the debugger prints the
+ // name instead of '???'. We give the symbol default global visibility.
+ ELFSym *FnSym = ELFSym::getGV(F,
+ ELFSym::STB_GLOBAL,
+ ELFSym::STT_FUNC,
+ ELFSym::STV_DEFAULT);
+ FnSym->SectionIdx = Text.SectionIdx;
+ FnSym->Size = I.FnEnd - I.FnStart;
+ FnSym->Value = 0; // Offset from start of section.
+ EW.SymbolList.push_back(FnSym);
+
+ EW.doFinalization(M);
+ O.flush();
+
+ // When trying to debug why GDB isn't getting the debug info right, it's
+ // awfully helpful to write the object file to disk so that it can be
+ // inspected with readelf and objdump.
+ if (JITEmitDebugInfoToDisk) {
+ std::string Filename;
+ raw_string_ostream O2(Filename);
+ O2 << "/tmp/llvm_function_" << I.FnStart << "_" << F->getNameStr() << ".o";
+ O2.flush();
+ std::string Errors;
+ raw_fd_ostream O3(Filename.c_str(), Errors);
+ O3 << Buffer;
+ O3.close();
+ }
+
+ return Buffer;
+}
+
+void JITDebugRegisterer::RegisterFunction(const Function *F, DebugInfo &I) {
+ // TODO: Support non-ELF platforms.
+ if (!TM.getELFWriterInfo())
+ return;
+
+ std::string Buffer = MakeELF(F, I);
+
+ jit_code_entry *JITCodeEntry = new jit_code_entry();
+ JITCodeEntry->symfile_addr = Buffer.c_str();
+ JITCodeEntry->symfile_size = Buffer.size();
+
+ // Add a mapping from F to the entry and buffer, so we can delete this
+ // info later.
+ FnMap[F] = std::make_pair<std::string, jit_code_entry*>(Buffer, JITCodeEntry);
+
+ // Acquire the lock and do the registration.
+ {
+ MutexGuard locked(JITDebugLock);
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+
+ // Insert this entry at the head of the list.
+ JITCodeEntry->prev_entry = NULL;
+ jit_code_entry *NextEntry = __jit_debug_descriptor.first_entry;
+ JITCodeEntry->next_entry = NextEntry;
+ if (NextEntry != NULL) {
+ NextEntry->prev_entry = JITCodeEntry;
+ }
+ __jit_debug_descriptor.first_entry = JITCodeEntry;
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+ }
+}
+
+void JITDebugRegisterer::UnregisterFunctionInternal(
+ RegisteredFunctionsMap::iterator I) {
+ jit_code_entry *JITCodeEntry = I->second.second;
+
+ // Acquire the lock and do the unregistration.
+ {
+ MutexGuard locked(JITDebugLock);
+ __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
+
+ // Remove the jit_code_entry from the linked list.
+ jit_code_entry *PrevEntry = JITCodeEntry->prev_entry;
+ jit_code_entry *NextEntry = JITCodeEntry->next_entry;
+ if (NextEntry) {
+ NextEntry->prev_entry = PrevEntry;
+ }
+ if (PrevEntry) {
+ PrevEntry->next_entry = NextEntry;
+ } else {
+ assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
+ __jit_debug_descriptor.first_entry = NextEntry;
+ }
+
+ // Tell GDB which entry we removed, and unregister the code.
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+ }
+
+ // Free the ELF file in memory.
+ std::string &Buffer = I->second.first;
+ Buffer.clear();
+}
+
+void JITDebugRegisterer::UnregisterFunction(const Function *F) {
+ // TODO: Support non-ELF platforms.
+ if (!TM.getELFWriterInfo())
+ return;
+
+ RegisteredFunctionsMap::iterator I = FnMap.find(F);
+ if (I == FnMap.end()) return;
+ UnregisterFunctionInternal(I);
+ FnMap.erase(I);
+}
+
+} // end namespace llvm
diff --git a/lib/ExecutionEngine/JIT/JITDebugRegisterer.h b/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
new file mode 100644
index 0000000..7e53d78
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
@@ -0,0 +1,116 @@
+//===-- JITDebugRegisterer.h - Register debug symbols for JIT -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDebugRegisterer object that is used by the JIT to
+// register debug info with debuggers like GDB.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
+#define LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/System/DataTypes.h"
+#include <string>
+
+// This must be kept in sync with gdb/gdb/jit.h .
+extern "C" {
+
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } jit_actions_t;
+
+ struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+ };
+
+ struct jit_descriptor {
+ uint32_t version;
+ // This should be jit_actions_t, but we want to be specific about the
+ // bit-width.
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+ };
+
+}
+
+namespace llvm {
+
+class ELFSection;
+class Function;
+class TargetMachine;
+
+
+/// This class encapsulates information we want to send to the debugger.
+///
+struct DebugInfo {
+ uint8_t *FnStart;
+ uint8_t *FnEnd;
+ uint8_t *EhStart;
+ uint8_t *EhEnd;
+
+ DebugInfo() : FnStart(0), FnEnd(0), EhStart(0), EhEnd(0) {}
+};
+
+typedef DenseMap< const Function*, std::pair<std::string, jit_code_entry*> >
+ RegisteredFunctionsMap;
+
+/// This class registers debug info for JITed code with an attached debugger.
+/// Without proper debug info, GDB can't do things like source level debugging
+/// or even produce a proper stack trace on linux-x86_64. To use this class,
+/// whenever a function is JITed, create a DebugInfo struct and pass it to the
+/// RegisterFunction method. The method will then do whatever is necessary to
+/// inform the debugger about the JITed function.
+class JITDebugRegisterer {
+
+ TargetMachine &TM;
+
+ /// FnMap - A map of functions that have been registered to the associated
+ /// temporary files. Used for cleanup.
+ RegisteredFunctionsMap FnMap;
+
+ /// MakeELF - Builds the ELF file in memory and returns a std::string that
+ /// contains the ELF.
+ std::string MakeELF(const Function *F, DebugInfo &I);
+
+public:
+ JITDebugRegisterer(TargetMachine &tm);
+
+ /// ~JITDebugRegisterer - Unregisters all code and frees symbol files.
+ ///
+ ~JITDebugRegisterer();
+
+ /// RegisterFunction - Register debug info for the given function with an
+ /// attached debugger. Clients must call UnregisterFunction on all
+ /// registered functions before deleting them to free the associated symbol
+ /// file and unregister it from the debugger.
+ void RegisterFunction(const Function *F, DebugInfo &I);
+
+ /// UnregisterFunction - Unregister the debug info for the given function
+ /// from the debugger and free associated memory.
+ void UnregisterFunction(const Function *F);
+
+private:
+ /// UnregisterFunctionInternal - Unregister the debug info for the given
+ /// function from the debugger and delete any temporary files. The private
+ /// version of this method does not remove the function from FnMap so that it
+ /// can be called while iterating over FnMap.
+ void UnregisterFunctionInternal(RegisteredFunctionsMap::iterator I);
+
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
new file mode 100644
index 0000000..c1051a9
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -0,0 +1,1048 @@
+//===----- JITDwarfEmitter.cpp - Write dwarf tables into memory -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDwarfEmitter object that is used by the JIT to
+// write dwarf tables to memory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "JITDwarfEmitter.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+using namespace llvm;
+
+JITDwarfEmitter::JITDwarfEmitter(JIT& theJit) : MMI(0), Jit(theJit) {}
+
+
+unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
+ JITCodeEmitter& jce,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* &EHFramePtr) {
+ assert(MMI && "MachineModuleInfo not registered!");
+
+ const TargetMachine& TM = F.getTarget();
+ TD = TM.getTargetData();
+ stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
+ RI = TM.getRegisterInfo();
+ JCE = &jce;
+
+ unsigned char* ExceptionTable = EmitExceptionTable(&F, StartFunction,
+ EndFunction);
+
+ unsigned char* Result = 0;
+
+ const std::vector<Function *> Personalities = MMI->getPersonalities();
+ EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]);
+
+ Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr,
+ StartFunction, EndFunction, ExceptionTable);
+
+ return Result;
+}
+
+
+void
+JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ bool IsLocal = false;
+ unsigned BaseLabelID = 0;
+
+ for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
+ const MachineMove &Move = Moves[i];
+ unsigned LabelID = Move.getLabelID();
+
+ if (LabelID) {
+ LabelID = MMI->MappedLabel(LabelID);
+
+ // Throw out move if the label is invalid.
+ if (!LabelID) continue;
+ }
+
+ intptr_t LabelPtr = 0;
+ if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
+
+ const MachineLocation &Dst = Move.getDestination();
+ const MachineLocation &Src = Move.getSource();
+
+ // Advance row if new location.
+ if (BaseLabelPtr && LabelID && (BaseLabelID != LabelID || !IsLocal)) {
+ JCE->emitByte(dwarf::DW_CFA_advance_loc4);
+ JCE->emitInt32(LabelPtr - BaseLabelPtr);
+
+ BaseLabelID = LabelID;
+ BaseLabelPtr = LabelPtr;
+ IsLocal = true;
+ }
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ if (!Src.isReg()) {
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa_offset);
+ } else {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa);
+ JCE->emitULEB128Bytes(RI->getDwarfRegNum(Src.getReg(), true));
+ }
+
+ JCE->emitULEB128Bytes(-Src.getOffset());
+ } else {
+ llvm_unreachable("Machine move not supported yet.");
+ }
+ } else if (Src.isReg() &&
+ Src.getReg() == MachineLocation::VirtualFP) {
+ if (Dst.isReg()) {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa_register);
+ JCE->emitULEB128Bytes(RI->getDwarfRegNum(Dst.getReg(), true));
+ } else {
+ llvm_unreachable("Machine move not supported yet.");
+ }
+ } else {
+ unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true);
+ int Offset = Dst.getOffset() / stackGrowth;
+
+ if (Offset < 0) {
+ JCE->emitByte(dwarf::DW_CFA_offset_extended_sf);
+ JCE->emitULEB128Bytes(Reg);
+ JCE->emitSLEB128Bytes(Offset);
+ } else if (Reg < 64) {
+ JCE->emitByte(dwarf::DW_CFA_offset + Reg);
+ JCE->emitULEB128Bytes(Offset);
+ } else {
+ JCE->emitByte(dwarf::DW_CFA_offset_extended);
+ JCE->emitULEB128Bytes(Reg);
+ JCE->emitULEB128Bytes(Offset);
+ }
+ }
+ }
+}
+
+/// SharedTypeIds - How many leading type ids two landing pads have in common.
+static unsigned SharedTypeIds(const LandingPadInfo *L,
+ const LandingPadInfo *R) {
+ const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
+ unsigned LSize = LIds.size(), RSize = RIds.size();
+ unsigned MinSize = LSize < RSize ? LSize : RSize;
+ unsigned Count = 0;
+
+ for (; Count != MinSize; ++Count)
+ if (LIds[Count] != RIds[Count])
+ return Count;
+
+ return Count;
+}
+
+
+/// PadLT - Order landing pads lexicographically by type id.
+static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
+ const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
+ unsigned LSize = LIds.size(), RSize = RIds.size();
+ unsigned MinSize = LSize < RSize ? LSize : RSize;
+
+ for (unsigned i = 0; i != MinSize; ++i)
+ if (LIds[i] != RIds[i])
+ return LIds[i] < RIds[i];
+
+ return LSize < RSize;
+}
+
+namespace {
+
+struct KeyInfo {
+ static inline unsigned getEmptyKey() { return -1U; }
+ static inline unsigned getTombstoneKey() { return -2U; }
+ static unsigned getHashValue(const unsigned &Key) { return Key; }
+ static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; }
+};
+
+/// ActionEntry - Structure describing an entry in the actions table.
+struct ActionEntry {
+ int ValueForTypeID; // The value to write - may not be equal to the type id.
+ int NextAction;
+ struct ActionEntry *Previous;
+};
+
+/// PadRange - Structure holding a try-range and the associated landing pad.
+struct PadRange {
+ // The index of the landing pad.
+ unsigned PadIndex;
+ // The index of the begin and end labels in the landing pad's label lists.
+ unsigned RangeIndex;
+};
+
+typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType;
+
+/// CallSiteEntry - Structure describing an entry in the call-site table.
+struct CallSiteEntry {
+ unsigned BeginLabel; // zero indicates the start of the function.
+ unsigned EndLabel; // zero indicates the end of the function.
+ unsigned PadLabel; // zero indicates that there is no landing pad.
+ unsigned Action;
+};
+
+}
+
+unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) const {
+ assert(MMI && "MachineModuleInfo not registered!");
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
+ const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ if (PadInfos.empty()) return 0;
+
+ // Sort the landing pads in order of their type ids. This is used to fold
+ // duplicate actions.
+ SmallVector<const LandingPadInfo *, 64> LandingPads;
+ LandingPads.reserve(PadInfos.size());
+ for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
+ LandingPads.push_back(&PadInfos[i]);
+ std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
+
+ // Negative type ids index into FilterIds, positive type ids index into
+ // TypeInfos. The value written for a positive type id is just the type
+ // id itself. For a negative type id, however, the value written is the
+ // (negative) byte offset of the corresponding FilterIds entry. The byte
+ // offset is usually equal to the type id, because the FilterIds entries
+ // are written using a variable width encoding which outputs one byte per
+ // entry as long as the value written is not too large, but can differ.
+ // This kind of complication does not occur for positive type ids because
+ // type infos are output using a fixed width encoding.
+ // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
+ SmallVector<int, 16> FilterOffsets;
+ FilterOffsets.reserve(FilterIds.size());
+ int Offset = -1;
+ for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
+ E = FilterIds.end(); I != E; ++I) {
+ FilterOffsets.push_back(Offset);
+ Offset -= MCAsmInfo::getULEB128Size(*I);
+ }
+
+ // Compute the actions table and gather the first action index for each
+ // landing pad site.
+ SmallVector<ActionEntry, 32> Actions;
+ SmallVector<unsigned, 64> FirstActions;
+ FirstActions.reserve(LandingPads.size());
+
+ int FirstAction = 0;
+ unsigned SizeActions = 0;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LP = LandingPads[i];
+ const std::vector<int> &TypeIds = LP->TypeIds;
+ const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
+ unsigned SizeSiteActions = 0;
+
+ if (NumShared < TypeIds.size()) {
+ unsigned SizeAction = 0;
+ ActionEntry *PrevAction = 0;
+
+ if (NumShared) {
+ const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
+ assert(Actions.size());
+ PrevAction = &Actions.back();
+ SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
+ MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ for (unsigned j = NumShared; j != SizePrevIds; ++j) {
+ SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ SizeAction += -PrevAction->NextAction;
+ PrevAction = PrevAction->Previous;
+ }
+ }
+
+ // Compute the actions.
+ for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
+ int TypeID = TypeIds[I];
+ assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
+ int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
+ unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
+
+ int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
+ SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
+ SizeSiteActions += SizeAction;
+
+ ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
+ Actions.push_back(Action);
+
+ PrevAction = &Actions.back();
+ }
+
+ // Record the first action of the landing pad site.
+ FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
+ } // else identical - re-use previous FirstAction
+
+ FirstActions.push_back(FirstAction);
+
+ // Compute this sites contribution to size.
+ SizeActions += SizeSiteActions;
+ }
+
+ // Compute the call-site table. Entries must be ordered by address.
+ SmallVector<CallSiteEntry, 64> CallSites;
+
+ RangeMapType PadMap;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LandingPad = LandingPads[i];
+ for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
+ unsigned BeginLabel = LandingPad->BeginLabels[j];
+ assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
+ PadRange P = { i, j };
+ PadMap[BeginLabel] = P;
+ }
+ }
+
+ bool MayThrow = false;
+ unsigned LastLabel = 0;
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
+ MI != E; ++MI) {
+ if (!MI->isLabel()) {
+ MayThrow |= MI->getDesc().isCall();
+ continue;
+ }
+
+ unsigned BeginLabel = MI->getOperand(0).getImm();
+ assert(BeginLabel && "Invalid label!");
+
+ if (BeginLabel == LastLabel)
+ MayThrow = false;
+
+ RangeMapType::iterator L = PadMap.find(BeginLabel);
+
+ if (L == PadMap.end())
+ continue;
+
+ PadRange P = L->second;
+ const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
+
+ assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
+ "Inconsistent landing pad map!");
+
+ // If some instruction between the previous try-range and this one may
+ // throw, create a call-site entry with no landing pad for the region
+ // between the try-ranges.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ LastLabel = LandingPad->EndLabels[P.RangeIndex];
+ CallSiteEntry Site = {BeginLabel, LastLabel,
+ LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
+
+ assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
+ "Invalid landing pad!");
+
+ // Try to merge with the previous call-site.
+ if (CallSites.size()) {
+ CallSiteEntry &Prev = CallSites.back();
+ if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
+ // Extend the range of the previous entry.
+ Prev.EndLabel = Site.EndLabel;
+ continue;
+ }
+ }
+
+ // Otherwise, create a new call-site.
+ CallSites.push_back(Site);
+ }
+ }
+ // If some instruction between the previous try-range and the end of the
+ // function may throw, create a call-site entry with no landing pad for the
+ // region following the try-range.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, 0, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ // Final tallies.
+ unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
+ sizeof(int32_t) + // Site length.
+ sizeof(int32_t)); // Landing pad.
+ for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
+ SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);
+
+ unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
+
+ unsigned TypeOffset = sizeof(int8_t) + // Call site format
+ // Call-site table length
+ MCAsmInfo::getULEB128Size(SizeSites) +
+ SizeSites + SizeActions + SizeTypes;
+
+ // Begin the exception table.
+ JCE->emitAlignmentWithFill(4, 0);
+ // Asm->EOL("Padding");
+
+ unsigned char* DwarfExceptionTable = (unsigned char*)JCE->getCurrentPCValue();
+
+ // Emit the header.
+ JCE->emitByte(dwarf::DW_EH_PE_omit);
+ // Asm->EOL("LPStart format (DW_EH_PE_omit)");
+ JCE->emitByte(dwarf::DW_EH_PE_absptr);
+ // Asm->EOL("TType format (DW_EH_PE_absptr)");
+ JCE->emitULEB128Bytes(TypeOffset);
+ // Asm->EOL("TType base offset");
+ JCE->emitByte(dwarf::DW_EH_PE_udata4);
+ // Asm->EOL("Call site format (DW_EH_PE_udata4)");
+ JCE->emitULEB128Bytes(SizeSites);
+ // Asm->EOL("Call-site table length");
+
+ // Emit the landing pad site information.
+ for (unsigned i = 0; i < CallSites.size(); ++i) {
+ CallSiteEntry &S = CallSites[i];
+ intptr_t BeginLabelPtr = 0;
+ intptr_t EndLabelPtr = 0;
+
+ if (!S.BeginLabel) {
+ BeginLabelPtr = (intptr_t)StartFunction;
+ JCE->emitInt32(0);
+ } else {
+ BeginLabelPtr = JCE->getLabelAddress(S.BeginLabel);
+ JCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction);
+ }
+
+ // Asm->EOL("Region start");
+
+ if (!S.EndLabel)
+ EndLabelPtr = (intptr_t)EndFunction;
+ else
+ EndLabelPtr = JCE->getLabelAddress(S.EndLabel);
+
+ JCE->emitInt32(EndLabelPtr - BeginLabelPtr);
+ //Asm->EOL("Region length");
+
+ if (!S.PadLabel) {
+ JCE->emitInt32(0);
+ } else {
+ unsigned PadLabelPtr = JCE->getLabelAddress(S.PadLabel);
+ JCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction);
+ }
+ // Asm->EOL("Landing pad");
+
+ JCE->emitULEB128Bytes(S.Action);
+ // Asm->EOL("Action");
+ }
+
+ // Emit the actions.
+ for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
+ ActionEntry &Action = Actions[I];
+
+ JCE->emitSLEB128Bytes(Action.ValueForTypeID);
+ //Asm->EOL("TypeInfo index");
+ JCE->emitSLEB128Bytes(Action.NextAction);
+ //Asm->EOL("Next action");
+ }
+
+ // Emit the type ids.
+ for (unsigned M = TypeInfos.size(); M; --M) {
+ GlobalVariable *GV = TypeInfos[M - 1];
+
+ if (GV) {
+ if (TD->getPointerSize() == sizeof(int32_t))
+ JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
+ else
+ JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
+ } else {
+ if (TD->getPointerSize() == sizeof(int32_t))
+ JCE->emitInt32(0);
+ else
+ JCE->emitInt64(0);
+ }
+ // Asm->EOL("TypeInfo");
+ }
+
+ // Emit the filter typeids.
+ for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
+ unsigned TypeID = FilterIds[j];
+ JCE->emitULEB128Bytes(TypeID);
+ //Asm->EOL("Filter TypeInfo index");
+ }
+
+ JCE->emitAlignmentWithFill(4, 0);
+
+ return DwarfExceptionTable;
+}
+
+unsigned char*
+JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+
+ unsigned char* StartCommonPtr = (unsigned char*)JCE->getCurrentPCValue();
+ // EH Common Frame header
+ JCE->allocateSpace(4, 0);
+ unsigned char* FrameCommonBeginPtr = (unsigned char*)JCE->getCurrentPCValue();
+ JCE->emitInt32((int)0);
+ JCE->emitByte(dwarf::DW_CIE_VERSION);
+ JCE->emitString(Personality ? "zPLR" : "zR");
+ JCE->emitULEB128Bytes(1);
+ JCE->emitSLEB128Bytes(stackGrowth);
+ JCE->emitByte(RI->getDwarfRegNum(RI->getRARegister(), true));
+
+ if (Personality) {
+ // Augmentation Size: 3 small ULEBs of one byte each, and the personality
+ // function which size is PointerSize.
+ JCE->emitULEB128Bytes(3 + PointerSize);
+
+ // We set the encoding of the personality as direct encoding because we use
+ // the function pointer. The encoding is not relative because the current
+ // PC value may be bigger than the personality function pointer.
+ if (PointerSize == 4) {
+ JCE->emitByte(dwarf::DW_EH_PE_sdata4);
+ JCE->emitInt32(((intptr_t)Jit.getPointerToGlobal(Personality)));
+ } else {
+ JCE->emitByte(dwarf::DW_EH_PE_sdata8);
+ JCE->emitInt64(((intptr_t)Jit.getPointerToGlobal(Personality)));
+ }
+
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+ } else {
+ JCE->emitULEB128Bytes(1);
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+ }
+
+ std::vector<MachineMove> Moves;
+ RI->getInitialFrameState(Moves);
+ EmitFrameMoves(0, Moves);
+
+ JCE->emitAlignmentWithFill(PointerSize, dwarf::DW_CFA_nop);
+
+ JCE->emitInt32At((uintptr_t*)StartCommonPtr,
+ (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() -
+ FrameCommonBeginPtr));
+
+ return StartCommonPtr;
+}
+
+
+unsigned char*
+JITDwarfEmitter::EmitEHFrame(const Function* Personality,
+ unsigned char* StartCommonPtr,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* ExceptionTable) const {
+ unsigned PointerSize = TD->getPointerSize();
+
+ // EH frame header.
+ unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue();
+ JCE->allocateSpace(4, 0);
+ unsigned char* FrameBeginPtr = (unsigned char*)JCE->getCurrentPCValue();
+ // FDE CIE Offset
+ JCE->emitInt32(FrameBeginPtr - StartCommonPtr);
+ JCE->emitInt32(StartFunction - (unsigned char*)JCE->getCurrentPCValue());
+ JCE->emitInt32(EndFunction - StartFunction);
+
+ // If there is a personality and landing pads then point to the language
+ // specific data area in the exception table.
+ if (Personality) {
+ JCE->emitULEB128Bytes(PointerSize == 4 ? 4 : 8);
+
+ if (PointerSize == 4) {
+ if (!MMI->getLandingPads().empty())
+ JCE->emitInt32(ExceptionTable-(unsigned char*)JCE->getCurrentPCValue());
+ else
+ JCE->emitInt32((int)0);
+ } else {
+ if (!MMI->getLandingPads().empty())
+ JCE->emitInt64(ExceptionTable-(unsigned char*)JCE->getCurrentPCValue());
+ else
+ JCE->emitInt64((int)0);
+ }
+ } else {
+ JCE->emitULEB128Bytes(0);
+ }
+
+ // Indicate locations of function specific callee saved registers in
+ // frame.
+ EmitFrameMoves((intptr_t)StartFunction, MMI->getFrameMoves());
+
+ JCE->emitAlignmentWithFill(PointerSize, dwarf::DW_CFA_nop);
+
+ // Indicate the size of the table
+ JCE->emitInt32At((uintptr_t*)StartEHPtr,
+ (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() -
+ StartEHPtr));
+
+ // Double zeroes for the unwind runtime
+ if (PointerSize == 8) {
+ JCE->emitInt64(0);
+ JCE->emitInt64(0);
+ } else {
+ JCE->emitInt32(0);
+ JCE->emitInt32(0);
+ }
+
+ return StartEHPtr;
+}
+
+unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F,
+ JITCodeEmitter& jce,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) {
+ const TargetMachine& TM = F.getTarget();
+ TD = TM.getTargetData();
+ stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
+ RI = TM.getRegisterInfo();
+ JCE = &jce;
+ unsigned FinalSize = 0;
+
+ FinalSize += GetExceptionTableSizeInBytes(&F);
+
+ const std::vector<Function *> Personalities = MMI->getPersonalities();
+ FinalSize +=
+ GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]);
+
+ FinalSize += GetEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()],
+ StartFunction);
+
+ return FinalSize;
+}
+
+/// RoundUpToAlign - Add the specified alignment to FinalSize and returns
+/// the new value.
+static unsigned RoundUpToAlign(unsigned FinalSize, unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+ // Since we do not know where the buffer will be allocated, be pessimistic.
+ return FinalSize + Alignment;
+}
+
+unsigned
+JITDwarfEmitter::GetEHFrameSizeInBytes(const Function* Personality,
+ unsigned char* StartFunction) const {
+ unsigned PointerSize = TD->getPointerSize();
+ unsigned FinalSize = 0;
+ // EH frame header.
+ FinalSize += PointerSize;
+ // FDE CIE Offset
+ FinalSize += 3 * PointerSize;
+ // If there is a personality and landing pads then point to the language
+ // specific data area in the exception table.
+ if (Personality) {
+ FinalSize += MCAsmInfo::getULEB128Size(4);
+ FinalSize += PointerSize;
+ } else {
+ FinalSize += MCAsmInfo::getULEB128Size(0);
+ }
+
+ // Indicate locations of function specific callee saved registers in
+ // frame.
+ FinalSize += GetFrameMovesSizeInBytes((intptr_t)StartFunction,
+ MMI->getFrameMoves());
+
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+
+ // Double zeroes for the unwind runtime
+ FinalSize += 2 * PointerSize;
+
+ return FinalSize;
+}
+
+unsigned JITDwarfEmitter::GetCommonEHFrameSizeInBytes(const Function* Personality)
+ const {
+
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ unsigned FinalSize = 0;
+ // EH Common Frame header
+ FinalSize += PointerSize;
+ FinalSize += 4;
+ FinalSize += 1;
+ FinalSize += Personality ? 5 : 3; // "zPLR" or "zR"
+ FinalSize += MCAsmInfo::getULEB128Size(1);
+ FinalSize += MCAsmInfo::getSLEB128Size(stackGrowth);
+ FinalSize += 1;
+
+ if (Personality) {
+ FinalSize += MCAsmInfo::getULEB128Size(7);
+
+ // Encoding
+ FinalSize+= 1;
+ //Personality
+ FinalSize += PointerSize;
+
+ FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+ FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+
+ } else {
+ FinalSize += MCAsmInfo::getULEB128Size(1);
+ FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+ }
+
+ std::vector<MachineMove> Moves;
+ RI->getInitialFrameState(Moves);
+ FinalSize += GetFrameMovesSizeInBytes(0, Moves);
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+ return FinalSize;
+}
+
+unsigned
+JITDwarfEmitter::GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ bool IsLocal = BaseLabelPtr;
+ unsigned FinalSize = 0;
+
+ for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
+ const MachineMove &Move = Moves[i];
+ unsigned LabelID = Move.getLabelID();
+
+ if (LabelID) {
+ LabelID = MMI->MappedLabel(LabelID);
+
+ // Throw out move if the label is invalid.
+ if (!LabelID) continue;
+ }
+
+ intptr_t LabelPtr = 0;
+ if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
+
+ const MachineLocation &Dst = Move.getDestination();
+ const MachineLocation &Src = Move.getSource();
+
+ // Advance row if new location.
+ if (BaseLabelPtr && LabelID && (BaseLabelPtr != LabelPtr || !IsLocal)) {
+ FinalSize++;
+ FinalSize += PointerSize;
+ BaseLabelPtr = LabelPtr;
+ IsLocal = true;
+ }
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ if (!Src.isReg()) {
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ ++FinalSize;
+ } else {
+ ++FinalSize;
+ unsigned RegNum = RI->getDwarfRegNum(Src.getReg(), true);
+ FinalSize += MCAsmInfo::getULEB128Size(RegNum);
+ }
+
+ int Offset = -Src.getOffset();
+
+ FinalSize += MCAsmInfo::getULEB128Size(Offset);
+ } else {
+ llvm_unreachable("Machine move no supported yet.");
+ }
+ } else if (Src.isReg() &&
+ Src.getReg() == MachineLocation::VirtualFP) {
+ if (Dst.isReg()) {
+ ++FinalSize;
+ unsigned RegNum = RI->getDwarfRegNum(Dst.getReg(), true);
+ FinalSize += MCAsmInfo::getULEB128Size(RegNum);
+ } else {
+ llvm_unreachable("Machine move no supported yet.");
+ }
+ } else {
+ unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true);
+ int Offset = Dst.getOffset() / stackGrowth;
+
+ if (Offset < 0) {
+ ++FinalSize;
+ FinalSize += MCAsmInfo::getULEB128Size(Reg);
+ FinalSize += MCAsmInfo::getSLEB128Size(Offset);
+ } else if (Reg < 64) {
+ ++FinalSize;
+ FinalSize += MCAsmInfo::getULEB128Size(Offset);
+ } else {
+ ++FinalSize;
+ FinalSize += MCAsmInfo::getULEB128Size(Reg);
+ FinalSize += MCAsmInfo::getULEB128Size(Offset);
+ }
+ }
+ }
+ return FinalSize;
+}
+
+unsigned
+JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const {
+ unsigned FinalSize = 0;
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
+ const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ if (PadInfos.empty()) return 0;
+
+ // Sort the landing pads in order of their type ids. This is used to fold
+ // duplicate actions.
+ SmallVector<const LandingPadInfo *, 64> LandingPads;
+ LandingPads.reserve(PadInfos.size());
+ for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
+ LandingPads.push_back(&PadInfos[i]);
+ std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
+
+ // Negative type ids index into FilterIds, positive type ids index into
+ // TypeInfos. The value written for a positive type id is just the type
+ // id itself. For a negative type id, however, the value written is the
+ // (negative) byte offset of the corresponding FilterIds entry. The byte
+ // offset is usually equal to the type id, because the FilterIds entries
+ // are written using a variable width encoding which outputs one byte per
+ // entry as long as the value written is not too large, but can differ.
+ // This kind of complication does not occur for positive type ids because
+ // type infos are output using a fixed width encoding.
+ // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
+ SmallVector<int, 16> FilterOffsets;
+ FilterOffsets.reserve(FilterIds.size());
+ int Offset = -1;
+ for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
+ E = FilterIds.end(); I != E; ++I) {
+ FilterOffsets.push_back(Offset);
+ Offset -= MCAsmInfo::getULEB128Size(*I);
+ }
+
+ // Compute the actions table and gather the first action index for each
+ // landing pad site.
+ SmallVector<ActionEntry, 32> Actions;
+ SmallVector<unsigned, 64> FirstActions;
+ FirstActions.reserve(LandingPads.size());
+
+ int FirstAction = 0;
+ unsigned SizeActions = 0;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LP = LandingPads[i];
+ const std::vector<int> &TypeIds = LP->TypeIds;
+ const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
+ unsigned SizeSiteActions = 0;
+
+ if (NumShared < TypeIds.size()) {
+ unsigned SizeAction = 0;
+ ActionEntry *PrevAction = 0;
+
+ if (NumShared) {
+ const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
+ assert(Actions.size());
+ PrevAction = &Actions.back();
+ SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
+ MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ for (unsigned j = NumShared; j != SizePrevIds; ++j) {
+ SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ SizeAction += -PrevAction->NextAction;
+ PrevAction = PrevAction->Previous;
+ }
+ }
+
+ // Compute the actions.
+ for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
+ int TypeID = TypeIds[I];
+ assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
+ int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
+ unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
+
+ int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
+ SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
+ SizeSiteActions += SizeAction;
+
+ ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
+ Actions.push_back(Action);
+
+ PrevAction = &Actions.back();
+ }
+
+ // Record the first action of the landing pad site.
+ FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
+ } // else identical - re-use previous FirstAction
+
+ FirstActions.push_back(FirstAction);
+
+ // Compute this sites contribution to size.
+ SizeActions += SizeSiteActions;
+ }
+
+ // Compute the call-site table. Entries must be ordered by address.
+ SmallVector<CallSiteEntry, 64> CallSites;
+
+ RangeMapType PadMap;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LandingPad = LandingPads[i];
+ for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
+ unsigned BeginLabel = LandingPad->BeginLabels[j];
+ assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
+ PadRange P = { i, j };
+ PadMap[BeginLabel] = P;
+ }
+ }
+
+ bool MayThrow = false;
+ unsigned LastLabel = 0;
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
+ MI != E; ++MI) {
+ if (!MI->isLabel()) {
+ MayThrow |= MI->getDesc().isCall();
+ continue;
+ }
+
+ unsigned BeginLabel = MI->getOperand(0).getImm();
+ assert(BeginLabel && "Invalid label!");
+
+ if (BeginLabel == LastLabel)
+ MayThrow = false;
+
+ RangeMapType::iterator L = PadMap.find(BeginLabel);
+
+ if (L == PadMap.end())
+ continue;
+
+ PadRange P = L->second;
+ const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
+
+ assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
+ "Inconsistent landing pad map!");
+
+ // If some instruction between the previous try-range and this one may
+ // throw, create a call-site entry with no landing pad for the region
+ // between the try-ranges.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ LastLabel = LandingPad->EndLabels[P.RangeIndex];
+ CallSiteEntry Site = {BeginLabel, LastLabel,
+ LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
+
+ assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
+ "Invalid landing pad!");
+
+ // Try to merge with the previous call-site.
+ if (CallSites.size()) {
+ CallSiteEntry &Prev = CallSites.back();
+ if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
+ // Extend the range of the previous entry.
+ Prev.EndLabel = Site.EndLabel;
+ continue;
+ }
+ }
+
+ // Otherwise, create a new call-site.
+ CallSites.push_back(Site);
+ }
+ }
+ // If some instruction between the previous try-range and the end of the
+ // function may throw, create a call-site entry with no landing pad for the
+ // region following the try-range.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, 0, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ // Final tallies.
+ unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
+ sizeof(int32_t) + // Site length.
+ sizeof(int32_t)); // Landing pad.
+ for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
+ SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);
+
+ unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
+
+ unsigned TypeOffset = sizeof(int8_t) + // Call site format
+ // Call-site table length
+ MCAsmInfo::getULEB128Size(SizeSites) +
+ SizeSites + SizeActions + SizeTypes;
+
+ unsigned TotalSize = sizeof(int8_t) + // LPStart format
+ sizeof(int8_t) + // TType format
+ MCAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
+ TypeOffset;
+
+ unsigned SizeAlign = (4 - TotalSize) & 3;
+
+ // Begin the exception table.
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+ for (unsigned i = 0; i != SizeAlign; ++i) {
+ ++FinalSize;
+ }
+
+ unsigned PointerSize = TD->getPointerSize();
+
+ // Emit the header.
+ ++FinalSize;
+ // Asm->EOL("LPStart format (DW_EH_PE_omit)");
+ ++FinalSize;
+ // Asm->EOL("TType format (DW_EH_PE_absptr)");
+ ++FinalSize;
+ // Asm->EOL("TType base offset");
+ ++FinalSize;
+ // Asm->EOL("Call site format (DW_EH_PE_udata4)");
+ ++FinalSize;
+ // Asm->EOL("Call-site table length");
+
+ // Emit the landing pad site information.
+ for (unsigned i = 0; i < CallSites.size(); ++i) {
+ CallSiteEntry &S = CallSites[i];
+
+ // Asm->EOL("Region start");
+ FinalSize += PointerSize;
+
+ //Asm->EOL("Region length");
+ FinalSize += PointerSize;
+
+ // Asm->EOL("Landing pad");
+ FinalSize += PointerSize;
+
+ FinalSize += MCAsmInfo::getULEB128Size(S.Action);
+ // Asm->EOL("Action");
+ }
+
+ // Emit the actions.
+ for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
+ ActionEntry &Action = Actions[I];
+
+ //Asm->EOL("TypeInfo index");
+ FinalSize += MCAsmInfo::getSLEB128Size(Action.ValueForTypeID);
+ //Asm->EOL("Next action");
+ FinalSize += MCAsmInfo::getSLEB128Size(Action.NextAction);
+ }
+
+ // Emit the type ids.
+ for (unsigned M = TypeInfos.size(); M; --M) {
+ // Asm->EOL("TypeInfo");
+ FinalSize += PointerSize;
+ }
+
+ // Emit the filter typeids.
+ for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
+ unsigned TypeID = FilterIds[j];
+ FinalSize += MCAsmInfo::getULEB128Size(TypeID);
+ //Asm->EOL("Filter TypeInfo index");
+ }
+
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+
+ return FinalSize;
+}
diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
new file mode 100644
index 0000000..e627550
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
@@ -0,0 +1,87 @@
+//===------ JITDwarfEmitter.h - Write dwarf tables into memory ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDwarfEmitter object that is used by the JIT to
+// write dwarf tables to memory.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
+#define LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
+
+namespace llvm {
+
+class Function;
+class JITCodeEmitter;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineMove;
+class TargetData;
+class TargetMachine;
+class TargetRegisterInfo;
+
+class JITDwarfEmitter {
+ const TargetData* TD;
+ JITCodeEmitter* JCE;
+ const TargetRegisterInfo* RI;
+ MachineModuleInfo* MMI;
+ JIT& Jit;
+ bool stackGrowthDirection;
+
+ unsigned char* EmitExceptionTable(MachineFunction* MF,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) const;
+
+ void EmitFrameMoves(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const;
+
+ unsigned char* EmitCommonEHFrame(const Function* Personality) const;
+
+ unsigned char* EmitEHFrame(const Function* Personality,
+ unsigned char* StartBufferPtr,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* ExceptionTable) const;
+
+ unsigned GetExceptionTableSizeInBytes(MachineFunction* MF) const;
+
+ unsigned
+ GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const;
+
+ unsigned GetCommonEHFrameSizeInBytes(const Function* Personality) const;
+
+ unsigned GetEHFrameSizeInBytes(const Function* Personality,
+ unsigned char* StartFunction) const;
+
+public:
+
+ JITDwarfEmitter(JIT& jit);
+
+ unsigned char* EmitDwarfTable(MachineFunction& F,
+ JITCodeEmitter& JCE,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* &EHFramePtr);
+
+
+ unsigned GetDwarfTableSizeInBytes(MachineFunction& F,
+ JITCodeEmitter& JCE,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction);
+
+ void setModuleInfo(MachineModuleInfo* Info) {
+ MMI = Info;
+ }
+};
+
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
new file mode 100644
index 0000000..34a9938
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -0,0 +1,1609 @@
+//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a MachineCodeEmitter object that is used by the JIT to
+// write machine code to memory and remember where relocatable values are.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "JIT.h"
+#include "JITDebugRegisterer.h"
+#include "JITDwarfEmitter.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Constants.h"
+#include "llvm/Module.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRelocation.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/CodeGen/MachineCodeInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetJITInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Disassembler.h"
+#include "llvm/System/Memory.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/ValueMap.h"
+#include <algorithm>
+#ifndef NDEBUG
+#include <iomanip>
+#endif
+using namespace llvm;
+
+STATISTIC(NumBytes, "Number of bytes of machine code compiled");
+STATISTIC(NumRelos, "Number of relocations applied");
+STATISTIC(NumRetries, "Number of retries with more memory");
+static JIT *TheJIT = 0;
+
+
+// A declaration may stop being a declaration once it's fully read from bitcode.
+// This function returns true if F is fully read and is still a declaration.
+static bool isNonGhostDeclaration(const Function *F) {
+ return F->isDeclaration() && !F->isMaterializable();
+}
+
+//===----------------------------------------------------------------------===//
+// JIT lazy compilation code.
+//
+namespace {
+ class JITEmitter;
+ class JITResolverState;
+
+ template<typename ValueTy>
+ struct NoRAUWValueMapConfig : public ValueMapConfig<ValueTy> {
+ typedef JITResolverState *ExtraData;
+ static void onRAUW(JITResolverState *, Value *Old, Value *New) {
+ assert(false && "The JIT doesn't know how to handle a"
+ " RAUW on a value it has emitted.");
+ }
+ };
+
+ struct CallSiteValueMapConfig : public NoRAUWValueMapConfig<Function*> {
+ typedef JITResolverState *ExtraData;
+ static void onDelete(JITResolverState *JRS, Function *F);
+ };
+
+ class JITResolverState {
+ public:
+ typedef ValueMap<Function*, void*, NoRAUWValueMapConfig<Function*> >
+ FunctionToLazyStubMapTy;
+ typedef std::map<void*, AssertingVH<Function> > CallSiteToFunctionMapTy;
+ typedef ValueMap<Function *, SmallPtrSet<void*, 1>,
+ CallSiteValueMapConfig> FunctionToCallSitesMapTy;
+ typedef std::map<AssertingVH<GlobalValue>, void*> GlobalToIndirectSymMapTy;
+ private:
+ /// FunctionToLazyStubMap - Keep track of the lazy stub created for a
+ /// particular function so that we can reuse them if necessary.
+ FunctionToLazyStubMapTy FunctionToLazyStubMap;
+
+ /// CallSiteToFunctionMap - Keep track of the function that each lazy call
+ /// site corresponds to, and vice versa.
+ CallSiteToFunctionMapTy CallSiteToFunctionMap;
+ FunctionToCallSitesMapTy FunctionToCallSitesMap;
+
+ /// GlobalToIndirectSymMap - Keep track of the indirect symbol created for a
+ /// particular GlobalVariable so that we can reuse them if necessary.
+ GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
+
+ public:
+ JITResolverState() : FunctionToLazyStubMap(this),
+ FunctionToCallSitesMap(this) {}
+
+ FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
+ const MutexGuard& locked) {
+ assert(locked.holds(TheJIT->lock));
+ return FunctionToLazyStubMap;
+ }
+
+ GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& locked) {
+ assert(locked.holds(TheJIT->lock));
+ return GlobalToIndirectSymMap;
+ }
+
+ pair<void *, Function *> LookupFunctionFromCallSite(
+ const MutexGuard &locked, void *CallSite) const {
+ assert(locked.holds(TheJIT->lock));
+
+ // The address given to us for the stub may not be exactly right, it might be
+ // a little bit after the stub. As such, use upper_bound to find it.
+ CallSiteToFunctionMapTy::const_iterator I =
+ CallSiteToFunctionMap.upper_bound(CallSite);
+ assert(I != CallSiteToFunctionMap.begin() &&
+ "This is not a known call site!");
+ --I;
+ return *I;
+ }
+
+ void AddCallSite(const MutexGuard &locked, void *CallSite, Function *F) {
+ assert(locked.holds(TheJIT->lock));
+
+ bool Inserted = CallSiteToFunctionMap.insert(
+ std::make_pair(CallSite, F)).second;
+ (void)Inserted;
+ assert(Inserted && "Pair was already in CallSiteToFunctionMap");
+ FunctionToCallSitesMap[F].insert(CallSite);
+ }
+
+ // Returns the Function of the stub if a stub was erased, or NULL if there
+ // was no stub. This function uses the call-site->function map to find a
+ // relevant function, but asserts that only stubs and not other call sites
+ // will be passed in.
+ Function *EraseStub(const MutexGuard &locked, void *Stub) {
+ CallSiteToFunctionMapTy::iterator C2F_I =
+ CallSiteToFunctionMap.find(Stub);
+ if (C2F_I == CallSiteToFunctionMap.end()) {
+ // Not a stub.
+ return NULL;
+ }
+
+ Function *const F = C2F_I->second;
+#ifndef NDEBUG
+ void *RealStub = FunctionToLazyStubMap.lookup(F);
+ assert(RealStub == Stub &&
+ "Call-site that wasn't a stub pass in to EraseStub");
+#endif
+ FunctionToLazyStubMap.erase(F);
+ CallSiteToFunctionMap.erase(C2F_I);
+
+ // Remove the stub from the function->call-sites map, and remove the whole
+ // entry from the map if that was the last call site.
+ FunctionToCallSitesMapTy::iterator F2C_I = FunctionToCallSitesMap.find(F);
+ assert(F2C_I != FunctionToCallSitesMap.end() &&
+ "FunctionToCallSitesMap broken");
+ bool Erased = F2C_I->second.erase(Stub);
+ (void)Erased;
+ assert(Erased && "FunctionToCallSitesMap broken");
+ if (F2C_I->second.empty())
+ FunctionToCallSitesMap.erase(F2C_I);
+
+ return F;
+ }
+
+ void EraseAllCallSites(const MutexGuard &locked, Function *F) {
+ assert(locked.holds(TheJIT->lock));
+ EraseAllCallSitesPrelocked(F);
+ }
+ void EraseAllCallSitesPrelocked(Function *F) {
+ FunctionToCallSitesMapTy::iterator F2C = FunctionToCallSitesMap.find(F);
+ if (F2C == FunctionToCallSitesMap.end())
+ return;
+ for (SmallPtrSet<void*, 1>::const_iterator I = F2C->second.begin(),
+ E = F2C->second.end(); I != E; ++I) {
+ bool Erased = CallSiteToFunctionMap.erase(*I);
+ (void)Erased;
+ assert(Erased && "Missing call site->function mapping");
+ }
+ FunctionToCallSitesMap.erase(F2C);
+ }
+ };
+
+ /// JITResolver - Keep track of, and resolve, call sites for functions that
+ /// have not yet been compiled.
+ class JITResolver {
+ typedef JITResolverState::FunctionToLazyStubMapTy FunctionToLazyStubMapTy;
+ typedef JITResolverState::CallSiteToFunctionMapTy CallSiteToFunctionMapTy;
+ typedef JITResolverState::GlobalToIndirectSymMapTy GlobalToIndirectSymMapTy;
+
+ /// LazyResolverFn - The target lazy resolver function that we actually
+ /// rewrite instructions to use.
+ TargetJITInfo::LazyResolverFn LazyResolverFn;
+
+ JITResolverState state;
+
+ /// ExternalFnToStubMap - This is the equivalent of FunctionToLazyStubMap
+ /// for external functions. TODO: Of course, external functions don't need
+ /// a lazy stub. It's actually here to make it more likely that far calls
+ /// succeed, but no single stub can guarantee that. I'll remove this in a
+ /// subsequent checkin when I actually fix far calls.
+ std::map<void*, void*> ExternalFnToStubMap;
+
+ /// revGOTMap - map addresses to indexes in the GOT
+ std::map<void*, unsigned> revGOTMap;
+ unsigned nextGOTIndex;
+
+ JITEmitter &JE;
+
+ static JITResolver *TheJITResolver;
+ public:
+ explicit JITResolver(JIT &jit, JITEmitter &je) : nextGOTIndex(0), JE(je) {
+ TheJIT = &jit;
+
+ LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
+ assert(TheJITResolver == 0 && "Multiple JIT resolvers?");
+ TheJITResolver = this;
+ }
+
+ ~JITResolver() {
+ TheJITResolver = 0;
+ }
+
+ /// getLazyFunctionStubIfAvailable - This returns a pointer to a function's
+ /// lazy-compilation stub if it has already been created.
+ void *getLazyFunctionStubIfAvailable(Function *F);
+
+ /// getLazyFunctionStub - This returns a pointer to a function's
+ /// lazy-compilation stub, creating one on demand as needed.
+ void *getLazyFunctionStub(Function *F);
+
+ /// getExternalFunctionStub - Return a stub for the function at the
+ /// specified address, created lazily on demand.
+ void *getExternalFunctionStub(void *FnAddr);
+
+ /// getGlobalValueIndirectSym - Return an indirect symbol containing the
+ /// specified GV address.
+ void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress);
+
+ void getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
+ SmallVectorImpl<void*> &Ptrs);
+
+ GlobalValue *invalidateStub(void *Stub);
+
+ /// getGOTIndexForAddress - Return a new or existing index in the GOT for
+ /// an address. This function only manages slots, it does not manage the
+ /// contents of the slots or the memory associated with the GOT.
+ unsigned getGOTIndexForAddr(void *addr);
+
+ /// JITCompilerFn - This function is called to resolve a stub to a compiled
+ /// address. If the LLVM Function corresponding to the stub has not yet
+ /// been compiled, this function compiles it first.
+ static void *JITCompilerFn(void *Stub);
+ };
+
+ /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
+ /// used to output functions to memory for execution.
+ class JITEmitter : public JITCodeEmitter {
+ JITMemoryManager *MemMgr;
+
+ // When outputting a function stub in the context of some other function, we
+ // save BufferBegin/BufferEnd/CurBufferPtr here.
+ uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
+
+ // When reattempting to JIT a function after running out of space, we store
+ // the estimated size of the function we're trying to JIT here, so we can
+ // ask the memory manager for at least this much space. When we
+ // successfully emit the function, we reset this back to zero.
+ uintptr_t SizeEstimate;
+
+ /// Relocations - These are the relocations that the function needs, as
+ /// emitted.
+ std::vector<MachineRelocation> Relocations;
+
+ /// MBBLocations - This vector is a mapping from MBB ID's to their address.
+ /// It is filled in by the StartMachineBasicBlock callback and queried by
+ /// the getMachineBasicBlockAddress callback.
+ std::vector<uintptr_t> MBBLocations;
+
+ /// ConstantPool - The constant pool for the current function.
+ ///
+ MachineConstantPool *ConstantPool;
+
+ /// ConstantPoolBase - A pointer to the first entry in the constant pool.
+ ///
+ void *ConstantPoolBase;
+
+ /// ConstPoolAddresses - Addresses of individual constant pool entries.
+ ///
+ SmallVector<uintptr_t, 8> ConstPoolAddresses;
+
+ /// JumpTable - The jump tables for the current function.
+ ///
+ MachineJumpTableInfo *JumpTable;
+
+ /// JumpTableBase - A pointer to the first entry in the jump table.
+ ///
+ void *JumpTableBase;
+
+ /// Resolver - This contains info about the currently resolved functions.
+ JITResolver Resolver;
+
+ /// DE - The dwarf emitter for the jit.
+ OwningPtr<JITDwarfEmitter> DE;
+
+ /// DR - The debug registerer for the jit.
+ OwningPtr<JITDebugRegisterer> DR;
+
+ /// LabelLocations - This vector is a mapping from Label ID's to their
+ /// address.
+ std::vector<uintptr_t> LabelLocations;
+
+ /// MMI - Machine module info for exception informations
+ MachineModuleInfo* MMI;
+
+ // GVSet - a set to keep track of which globals have been seen
+ SmallPtrSet<const GlobalVariable*, 8> GVSet;
+
+ // CurFn - The llvm function being emitted. Only valid during
+ // finishFunction().
+ const Function *CurFn;
+
+ /// Information about emitted code, which is passed to the
+ /// JITEventListeners. This is reset in startFunction and used in
+ /// finishFunction.
+ JITEvent_EmittedFunctionDetails EmissionDetails;
+
+ struct EmittedCode {
+ void *FunctionBody; // Beginning of the function's allocation.
+ void *Code; // The address the function's code actually starts at.
+ void *ExceptionTable;
+ EmittedCode() : FunctionBody(0), Code(0), ExceptionTable(0) {}
+ };
+ struct EmittedFunctionConfig : public ValueMapConfig<const Function*> {
+ typedef JITEmitter *ExtraData;
+ static void onDelete(JITEmitter *, const Function*);
+ static void onRAUW(JITEmitter *, const Function*, const Function*);
+ };
+ ValueMap<const Function *, EmittedCode,
+ EmittedFunctionConfig> EmittedFunctions;
+
+ // CurFnStubUses - For a given Function, a vector of stubs that it
+ // references. This facilitates the JIT detecting that a stub is no
+ // longer used, so that it may be deallocated.
+ DenseMap<AssertingVH<const Function>, SmallVector<void*, 1> > CurFnStubUses;
+
+ // StubFnRefs - For a given pointer to a stub, a set of Functions which
+ // reference the stub. When the count of a stub's references drops to zero,
+ // the stub is unused.
+ DenseMap<void *, SmallPtrSet<const Function*, 1> > StubFnRefs;
+
+ DILocation PrevDLT;
+
+ public:
+ JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
+ : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
+ EmittedFunctions(this), PrevDLT(NULL) {
+ MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
+ if (jit.getJITInfo().needsGOT()) {
+ MemMgr->AllocateGOT();
+ DEBUG(dbgs() << "JIT is managing a GOT\n");
+ }
+
+ if (DwarfExceptionHandling || JITEmitDebugInfo) {
+ DE.reset(new JITDwarfEmitter(jit));
+ }
+ if (JITEmitDebugInfo) {
+ DR.reset(new JITDebugRegisterer(TM));
+ }
+ }
+ ~JITEmitter() {
+ delete MemMgr;
+ }
+
+ /// classof - Methods for support type inquiry through isa, cast, and
+ /// dyn_cast:
+ ///
+ static inline bool classof(const JITEmitter*) { return true; }
+ static inline bool classof(const MachineCodeEmitter*) { return true; }
+
+ JITResolver &getJITResolver() { return Resolver; }
+
+ virtual void startFunction(MachineFunction &F);
+ virtual bool finishFunction(MachineFunction &F);
+
+ void emitConstantPool(MachineConstantPool *MCP);
+ void initJumpTableInfo(MachineJumpTableInfo *MJTI);
+ void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
+
+ void startGVStub(const GlobalValue* GV,
+ unsigned StubSize, unsigned Alignment = 1);
+ void startGVStub(void *Buffer, unsigned StubSize);
+ void finishGVStub();
+ virtual void *allocIndirectGV(const GlobalValue *GV,
+ const uint8_t *Buffer, size_t Size,
+ unsigned Alignment);
+
+ /// allocateSpace - Reserves space in the current block if any, or
+ /// allocate a new one of the given size.
+ virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
+
+ /// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
+ /// this method does not allocate memory in the current output buffer,
+ /// because a global may live longer than the current function.
+ virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment);
+
+ virtual void addRelocation(const MachineRelocation &MR) {
+ Relocations.push_back(MR);
+ }
+
+ virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
+ if (MBBLocations.size() <= (unsigned)MBB->getNumber())
+ MBBLocations.resize((MBB->getNumber()+1)*2);
+ MBBLocations[MBB->getNumber()] = getCurrentPCValue();
+ DEBUG(dbgs() << "JIT: Emitting BB" << MBB->getNumber() << " at ["
+ << (void*) getCurrentPCValue() << "]\n");
+ }
+
+ virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
+ virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
+
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
+ assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
+ MBBLocations[MBB->getNumber()] && "MBB not emitted!");
+ return MBBLocations[MBB->getNumber()];
+ }
+
+ /// retryWithMoreMemory - Log a retry and deallocate all memory for the
+ /// given function. Increase the minimum allocation size so that we get
+ /// more memory next time.
+ void retryWithMoreMemory(MachineFunction &F);
+
+ /// deallocateMemForFunction - Deallocate all memory for the specified
+ /// function body.
+ void deallocateMemForFunction(const Function *F);
+
+ /// AddStubToCurrentFunction - Mark the current function being JIT'd as
+ /// using the stub at the specified address. Allows
+ /// deallocateMemForFunction to also remove stubs no longer referenced.
+ void AddStubToCurrentFunction(void *Stub);
+
+ virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
+
+ virtual void emitLabel(uint64_t LabelID) {
+ if (LabelLocations.size() <= LabelID)
+ LabelLocations.resize((LabelID+1)*2);
+ LabelLocations[LabelID] = getCurrentPCValue();
+ }
+
+ virtual uintptr_t getLabelAddress(uint64_t LabelID) const {
+ assert(LabelLocations.size() > (unsigned)LabelID &&
+ LabelLocations[LabelID] && "Label not emitted!");
+ return LabelLocations[LabelID];
+ }
+
+ virtual void setModuleInfo(MachineModuleInfo* Info) {
+ MMI = Info;
+ if (DE.get()) DE->setModuleInfo(Info);
+ }
+
+ void setMemoryExecutable() {
+ MemMgr->setMemoryExecutable();
+ }
+
+ JITMemoryManager *getMemMgr() const { return MemMgr; }
+
+ private:
+ void *getPointerToGlobal(GlobalValue *GV, void *Reference,
+ bool MayNeedFarStub);
+ void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
+ unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size);
+ unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size);
+ unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size);
+ unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF);
+ };
+}
+
+JITResolver *JITResolver::TheJITResolver = 0;
+
+void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
+ JRS->EraseAllCallSitesPrelocked(F);
+}
+
+/// getLazyFunctionStubIfAvailable - This returns a pointer to a function stub
+/// if it has already been created.
+void *JITResolver::getLazyFunctionStubIfAvailable(Function *F) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a stub for this function, recycle it.
+ return state.getFunctionToLazyStubMap(locked).lookup(F);
+}
+
+/// getFunctionStub - This returns a pointer to a function stub, creating
+/// one on demand as needed.
+void *JITResolver::getLazyFunctionStub(Function *F) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a lazy stub for this function, recycle it.
+ void *&Stub = state.getFunctionToLazyStubMap(locked)[F];
+ if (Stub) return Stub;
+
+ // Call the lazy resolver function if we are JIT'ing lazily. Otherwise we
+ // must resolve the symbol now.
+ void *Actual = TheJIT->isCompilingLazily()
+ ? (void *)(intptr_t)LazyResolverFn : (void *)0;
+
+ // If this is an external declaration, attempt to resolve the address now
+ // to place in the stub.
+ if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage()) {
+ Actual = TheJIT->getPointerToFunction(F);
+
+ // If we resolved the symbol to a null address (eg. a weak external)
+ // don't emit a stub. Return a null pointer to the application.
+ if (!Actual) return 0;
+ }
+
+ TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
+ JE.startGVStub(F, SL.Size, SL.Alignment);
+ // Codegen a new stub, calling the lazy resolver or the actual address of the
+ // external function, if it was resolved.
+ Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, JE);
+ JE.finishGVStub();
+
+ if (Actual != (void*)(intptr_t)LazyResolverFn) {
+ // If we are getting the stub for an external function, we really want the
+ // address of the stub in the GlobalAddressMap for the JIT, not the address
+ // of the external function.
+ TheJIT->updateGlobalMapping(F, Stub);
+ }
+
+ DEBUG(dbgs() << "JIT: Lazy stub emitted at [" << Stub << "] for function '"
+ << F->getName() << "'\n");
+
+ // Finally, keep track of the stub-to-Function mapping so that the
+ // JITCompilerFn knows which function to compile!
+ state.AddCallSite(locked, Stub, F);
+
+ // If we are JIT'ing non-lazily but need to call a function that does not
+ // exist yet, add it to the JIT's work list so that we can fill in the stub
+ // address later.
+ if (!Actual && !TheJIT->isCompilingLazily())
+ if (!isNonGhostDeclaration(F) && !F->hasAvailableExternallyLinkage())
+ TheJIT->addPendingFunction(F);
+
+ return Stub;
+}
+
+/// getGlobalValueIndirectSym - Return a lazy pointer containing the specified
+/// GV address.
+void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a stub for this global variable, recycle it.
+ void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
+ if (IndirectSym) return IndirectSym;
+
+ // Otherwise, codegen a new indirect symbol.
+ IndirectSym = TheJIT->getJITInfo().emitGlobalValueIndirectSym(GV, GVAddress,
+ JE);
+
+ DEBUG(dbgs() << "JIT: Indirect symbol emitted at [" << IndirectSym
+ << "] for GV '" << GV->getName() << "'\n");
+
+ return IndirectSym;
+}
+
+/// getExternalFunctionStub - Return a stub for the function at the
+/// specified address, created lazily on demand.
+void *JITResolver::getExternalFunctionStub(void *FnAddr) {
+ // If we already have a stub for this function, recycle it.
+ void *&Stub = ExternalFnToStubMap[FnAddr];
+ if (Stub) return Stub;
+
+ TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
+ JE.startGVStub(0, SL.Size, SL.Alignment);
+ Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE);
+ JE.finishGVStub();
+
+ DEBUG(dbgs() << "JIT: Stub emitted at [" << Stub
+ << "] for external function at '" << FnAddr << "'\n");
+ return Stub;
+}
+
+unsigned JITResolver::getGOTIndexForAddr(void* addr) {
+ unsigned idx = revGOTMap[addr];
+ if (!idx) {
+ idx = ++nextGOTIndex;
+ revGOTMap[addr] = idx;
+ DEBUG(dbgs() << "JIT: Adding GOT entry " << idx << " for addr ["
+ << addr << "]\n");
+ }
+ return idx;
+}
+
+void JITResolver::getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
+ SmallVectorImpl<void*> &Ptrs) {
+ MutexGuard locked(TheJIT->lock);
+
+ const FunctionToLazyStubMapTy &FM = state.getFunctionToLazyStubMap(locked);
+ GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked);
+
+ for (FunctionToLazyStubMapTy::const_iterator i = FM.begin(), e = FM.end();
+ i != e; ++i){
+ Function *F = i->first;
+ if (F->isDeclaration() && F->hasExternalLinkage()) {
+ GVs.push_back(i->first);
+ Ptrs.push_back(i->second);
+ }
+ }
+ for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end();
+ i != e; ++i) {
+ GVs.push_back(i->first);
+ Ptrs.push_back(i->second);
+ }
+}
+
+GlobalValue *JITResolver::invalidateStub(void *Stub) {
+ MutexGuard locked(TheJIT->lock);
+
+ GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked);
+
+ // Look up the cheap way first, to see if it's a function stub we are
+ // invalidating. If so, remove it from both the forward and reverse maps.
+ if (Function *F = state.EraseStub(locked, Stub)) {
+ return F;
+ }
+
+ // Otherwise, it might be an indirect symbol stub. Find it and remove it.
+ for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end();
+ i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ GlobalValue *GV = i->first;
+ GM.erase(i);
+ return GV;
+ }
+
+ // Lastly, check to see if it's in the ExternalFnToStubMap.
+ for (std::map<void *, void *>::iterator i = ExternalFnToStubMap.begin(),
+ e = ExternalFnToStubMap.end(); i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ ExternalFnToStubMap.erase(i);
+ break;
+ }
+
+ return 0;
+}
+
+/// JITCompilerFn - This function is called when a lazy compilation stub has
+/// been entered. It looks up which function this stub corresponds to, compiles
+/// it if necessary, then returns the resultant function pointer.
+void *JITResolver::JITCompilerFn(void *Stub) {
+ JITResolver &JR = *TheJITResolver;
+
+ Function* F = 0;
+ void* ActualPtr = 0;
+
+ {
+ // Only lock for getting the Function. The call getPointerToFunction made
+ // in this function might trigger function materializing, which requires
+ // JIT lock to be unlocked.
+ MutexGuard locked(TheJIT->lock);
+
+ // The address given to us for the stub may not be exactly right, it might
+ // be a little bit after the stub. As such, use upper_bound to find it.
+ pair<void*, Function*> I =
+ JR.state.LookupFunctionFromCallSite(locked, Stub);
+ F = I.second;
+ ActualPtr = I.first;
+ }
+
+ // If we have already code generated the function, just return the address.
+ void *Result = TheJIT->getPointerToGlobalIfAvailable(F);
+
+ if (!Result) {
+ // Otherwise we don't have it, do lazy compilation now.
+
+ // If lazy compilation is disabled, emit a useful error message and abort.
+ if (!TheJIT->isCompilingLazily()) {
+ llvm_report_error("LLVM JIT requested to do lazy compilation of function '"
+ + F->getName() + "' when lazy compiles are disabled!");
+ }
+
+ DEBUG(dbgs() << "JIT: Lazily resolving function '" << F->getName()
+ << "' In stub ptr = " << Stub << " actual ptr = "
+ << ActualPtr << "\n");
+
+ Result = TheJIT->getPointerToFunction(F);
+ }
+
+ // Reacquire the lock to update the GOT map.
+ MutexGuard locked(TheJIT->lock);
+
+ // We might like to remove the call site from the CallSiteToFunction map, but
+ // we can't do that! Multiple threads could be stuck, waiting to acquire the
+ // lock above. As soon as the 1st function finishes compiling the function,
+ // the next one will be released, and needs to be able to find the function it
+ // needs to call.
+
+ // FIXME: We could rewrite all references to this stub if we knew them.
+
+ // What we will do is set the compiled function address to map to the
+ // same GOT entry as the stub so that later clients may update the GOT
+ // if they see it still using the stub address.
+ // Note: this is done so the Resolver doesn't have to manage GOT memory
+ // Do this without allocating map space if the target isn't using a GOT
+ if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
+ JR.revGOTMap[Result] = JR.revGOTMap[Stub];
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// JITEmitter code.
+//
+void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
+ bool MayNeedFarStub) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return TheJIT->getOrEmitGlobalVariable(GV);
+
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
+
+ // If we have already compiled the function, return a pointer to its body.
+ Function *F = cast<Function>(V);
+
+ void *FnStub = Resolver.getLazyFunctionStubIfAvailable(F);
+ if (FnStub) {
+ // Return the function stub if it's already created. We do this first so
+ // that we're returning the same address for the function as any previous
+ // call. TODO: Yes, this is wrong. The lazy stub isn't guaranteed to be
+ // close enough to call.
+ AddStubToCurrentFunction(FnStub);
+ return FnStub;
+ }
+
+ // If we know the target can handle arbitrary-distance calls, try to
+ // return a direct pointer.
+ if (!MayNeedFarStub) {
+ // If we have code, go ahead and return that.
+ void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
+ if (ResultPtr) return ResultPtr;
+
+ // If this is an external function pointer, we can force the JIT to
+ // 'compile' it, which really just adds it to the map.
+ if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage())
+ return TheJIT->getPointerToFunction(F);
+ }
+
+ // Otherwise, we may need a to emit a stub, and, conservatively, we
+ // always do so.
+ void *StubAddr = Resolver.getLazyFunctionStub(F);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed. It's possible to
+ // return null from getLazyFunctionStub in the case of a weak extern that
+ // fails to resolve.
+ if (StubAddr)
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
+}
+
+void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference) {
+ // Make sure GV is emitted first, and create a stub containing the fully
+ // resolved address.
+ void *GVAddress = getPointerToGlobal(V, Reference, false);
+ void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed.
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
+}
+
+void JITEmitter::AddStubToCurrentFunction(void *StubAddr) {
+ assert(CurFn && "Stub added to current function, but current function is 0!");
+
+ SmallVectorImpl<void*> &StubsUsed = CurFnStubUses[CurFn];
+ StubsUsed.push_back(StubAddr);
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[StubAddr];
+ FnRefs.insert(CurFn);
+}
+
+void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
+ if (!DL.isUnknown()) {
+ DILocation CurDLT = EmissionDetails.MF->getDILocation(DL);
+
+ if (BeforePrintingInsn) {
+ if (CurDLT.getScope().getNode() != 0
+ && PrevDLT.getNode() != CurDLT.getNode()) {
+ JITEvent_EmittedFunctionDetails::LineStart NextLine;
+ NextLine.Address = getCurrentPCValue();
+ NextLine.Loc = DL;
+ EmissionDetails.LineStarts.push_back(NextLine);
+ }
+
+ PrevDLT = CurDLT;
+ }
+ }
+}
+
+static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
+ const TargetData *TD) {
+ const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
+ if (Constants.empty()) return 0;
+
+ unsigned Size = 0;
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Size = (Size + AlignMask) & ~AlignMask;
+ const Type *Ty = CPE.getType();
+ Size += TD->getTypeAllocSize(Ty);
+ }
+ return Size;
+}
+
+static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) {
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty()) return 0;
+
+ unsigned NumEntries = 0;
+ for (unsigned i = 0, e = JT.size(); i != e; ++i)
+ NumEntries += JT[i].MBBs.size();
+
+ return NumEntries * MJTI->getEntrySize(*TheJIT->getTargetData());
+}
+
+static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+ // Since we do not know where the buffer will be allocated, be pessimistic.
+ return Size + Alignment;
+}
+
+/// addSizeOfGlobal - add the size of the global (plus any alignment padding)
+/// into the running total Size.
+
+unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) {
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)TheJIT->getTargetData()->getTypeAllocSize(ElTy);
+ size_t GVAlign =
+ (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV);
+ DEBUG(dbgs() << "JIT: Adding in size " << GVSize << " alignment " << GVAlign);
+ DEBUG(GV->dump());
+ // Assume code section ends with worst possible alignment, so first
+ // variable needs maximal padding.
+ if (Size==0)
+ Size = 1;
+ Size = ((Size+GVAlign-1)/GVAlign)*GVAlign;
+ Size += GVSize;
+ return Size;
+}
+
+/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet
+/// but are referenced from the constant; put them in GVSet and add their
+/// size into the running total Size.
+
+unsigned JITEmitter::addSizeOfGlobalsInConstantVal(const Constant *C,
+ unsigned Size) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C))
+ return Size;
+
+ // If the value is a ConstantExpr
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr:
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast: {
+ Size = addSizeOfGlobalsInConstantVal(Op0, Size);
+ break;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ Size = addSizeOfGlobalsInConstantVal(Op0, Size);
+ Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size);
+ break;
+ }
+ default: {
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "ConstantExpr not handled: " << *CE;
+ llvm_report_error(Msg.str());
+ }
+ }
+ }
+
+ if (C->getType()->getTypeID() == Type::PointerTyID)
+ if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C))
+ if (GVSet.insert(GV))
+ Size = addSizeOfGlobal(GV, Size);
+
+ return Size;
+}
+
+/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet
+/// but are referenced from the given initializer.
+
+unsigned JITEmitter::addSizeOfGlobalsInInitializer(const Constant *Init,
+ unsigned Size) {
+ if (!isa<UndefValue>(Init) &&
+ !isa<ConstantVector>(Init) &&
+ !isa<ConstantAggregateZero>(Init) &&
+ !isa<ConstantArray>(Init) &&
+ !isa<ConstantStruct>(Init) &&
+ Init->getType()->isFirstClassType())
+ Size = addSizeOfGlobalsInConstantVal(Init, Size);
+ return Size;
+}
+
+/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for
+/// globals; then walk the initializers of those globals looking for more.
+/// If their size has not been considered yet, add it into the running total
+/// Size.
+
+unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) {
+ unsigned Size = 0;
+ GVSet.clear();
+
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB) {
+ for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ const TargetInstrDesc &Desc = I->getDesc();
+ const MachineInstr &MI = *I;
+ unsigned NumOps = Desc.getNumOperands();
+ for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) {
+ const MachineOperand &MO = MI.getOperand(CurOp);
+ if (MO.isGlobal()) {
+ GlobalValue* V = MO.getGlobal();
+ const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V);
+ if (!GV)
+ continue;
+ // If seen in previous function, it will have an entry here.
+ if (TheJIT->getPointerToGlobalIfAvailable(GV))
+ continue;
+ // If seen earlier in this function, it will have an entry here.
+ // FIXME: it should be possible to combine these tables, by
+ // assuming the addresses of the new globals in this module
+ // start at 0 (or something) and adjusting them after codegen
+ // complete. Another possibility is to grab a marker bit in GV.
+ if (GVSet.insert(GV))
+ // A variable as yet unseen. Add in its size.
+ Size = addSizeOfGlobal(GV, Size);
+ }
+ }
+ }
+ }
+ DEBUG(dbgs() << "JIT: About to look through initializers\n");
+ // Look for more globals that are referenced only from initializers.
+ // GVSet.end is computed each time because the set can grow as we go.
+ for (SmallPtrSet<const GlobalVariable *, 8>::iterator I = GVSet.begin();
+ I != GVSet.end(); I++) {
+ const GlobalVariable* GV = *I;
+ if (GV->hasInitializer())
+ Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size);
+ }
+
+ return Size;
+}
+
+void JITEmitter::startFunction(MachineFunction &F) {
+ DEBUG(dbgs() << "JIT: Starting CodeGen of Function "
+ << F.getFunction()->getName() << "\n");
+
+ uintptr_t ActualSize = 0;
+ // Set the memory writable, if it's not already
+ MemMgr->setMemoryWritable();
+ if (MemMgr->NeedsExactSize()) {
+ DEBUG(dbgs() << "JIT: ExactSize\n");
+ const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
+ MachineConstantPool *MCP = F.getConstantPool();
+
+ // Ensure the constant pool/jump table info is at least 4-byte aligned.
+ ActualSize = RoundUpToAlign(ActualSize, 16);
+
+ // Add the alignment of the constant pool
+ ActualSize = RoundUpToAlign(ActualSize, MCP->getConstantPoolAlignment());
+
+ // Add the constant pool size
+ ActualSize += GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+
+ if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo()) {
+ // Add the aligment of the jump table info
+ ActualSize = RoundUpToAlign(ActualSize,
+ MJTI->getEntryAlignment(*TheJIT->getTargetData()));
+
+ // Add the jump table size
+ ActualSize += GetJumpTableSizeInBytes(MJTI);
+ }
+
+ // Add the alignment for the function
+ ActualSize = RoundUpToAlign(ActualSize,
+ std::max(F.getFunction()->getAlignment(), 8U));
+
+ // Add the function size
+ ActualSize += TII->GetFunctionSizeInBytes(F);
+
+ DEBUG(dbgs() << "JIT: ActualSize before globals " << ActualSize << "\n");
+ // Add the size of the globals that will be allocated after this function.
+ // These are all the ones referenced from this function that were not
+ // previously allocated.
+ ActualSize += GetSizeOfGlobalsInBytes(F);
+ DEBUG(dbgs() << "JIT: ActualSize after globals " << ActualSize << "\n");
+ } else if (SizeEstimate > 0) {
+ // SizeEstimate will be non-zero on reallocation attempts.
+ ActualSize = SizeEstimate;
+ }
+
+ BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
+ ActualSize);
+ BufferEnd = BufferBegin+ActualSize;
+ EmittedFunctions[F.getFunction()].FunctionBody = BufferBegin;
+
+ // Ensure the constant pool/jump table info is at least 4-byte aligned.
+ emitAlignment(16);
+
+ emitConstantPool(F.getConstantPool());
+ if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
+ initJumpTableInfo(MJTI);
+
+ // About to start emitting the machine code for the function.
+ emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
+ TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
+ EmittedFunctions[F.getFunction()].Code = CurBufferPtr;
+
+ MBBLocations.clear();
+
+ EmissionDetails.MF = &F;
+ EmissionDetails.LineStarts.clear();
+}
+
+bool JITEmitter::finishFunction(MachineFunction &F) {
+ if (CurBufferPtr == BufferEnd) {
+ // We must call endFunctionBody before retrying, because
+ // deallocateMemForFunction requires it.
+ MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
+ retryWithMoreMemory(F);
+ return true;
+ }
+
+ if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
+ emitJumpTableInfo(MJTI);
+
+ // FnStart is the start of the text, not the start of the constant pool and
+ // other per-function data.
+ uint8_t *FnStart =
+ (uint8_t *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
+
+ // FnEnd is the end of the function's machine code.
+ uint8_t *FnEnd = CurBufferPtr;
+
+ if (!Relocations.empty()) {
+ CurFn = F.getFunction();
+ NumRelos += Relocations.size();
+
+ // Resolve the relocations to concrete pointers.
+ for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
+ MachineRelocation &MR = Relocations[i];
+ void *ResultPtr = 0;
+ if (!MR.letTargetResolve()) {
+ if (MR.isExternalSymbol()) {
+ ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
+ false);
+ DEBUG(dbgs() << "JIT: Map \'" << MR.getExternalSymbol() << "\' to ["
+ << ResultPtr << "]\n");
+
+ // If the target REALLY wants a stub for this function, emit it now.
+ if (MR.mayNeedFarStub()) {
+ ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
+ }
+ } else if (MR.isGlobalValue()) {
+ ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
+ BufferBegin+MR.getMachineCodeOffset(),
+ MR.mayNeedFarStub());
+ } else if (MR.isIndirectSymbol()) {
+ ResultPtr = getPointerToGVIndirectSym(
+ MR.getGlobalValue(), BufferBegin+MR.getMachineCodeOffset());
+ } else if (MR.isBasicBlock()) {
+ ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
+ } else if (MR.isConstantPoolIndex()) {
+ ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
+ } else {
+ assert(MR.isJumpTableIndex());
+ ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
+ }
+
+ MR.setResultPointer(ResultPtr);
+ }
+
+ // if we are managing the GOT and the relocation wants an index,
+ // give it one
+ if (MR.isGOTRelative() && MemMgr->isManagingGOT()) {
+ unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
+ MR.setGOTIndex(idx);
+ if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
+ DEBUG(dbgs() << "JIT: GOT was out of date for " << ResultPtr
+ << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
+ << "\n");
+ ((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
+ }
+ }
+ }
+
+ CurFn = 0;
+ TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
+ Relocations.size(), MemMgr->getGOTBase());
+ }
+
+ // Update the GOT entry for F to point to the new code.
+ if (MemMgr->isManagingGOT()) {
+ unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
+ if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
+ DEBUG(dbgs() << "JIT: GOT was out of date for " << (void*)BufferBegin
+ << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
+ << "\n");
+ ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
+ }
+ }
+
+ // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
+ // global variables that were referenced in the relocations.
+ MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
+
+ if (CurBufferPtr == BufferEnd) {
+ retryWithMoreMemory(F);
+ return true;
+ } else {
+ // Now that we've succeeded in emitting the function, reset the
+ // SizeEstimate back down to zero.
+ SizeEstimate = 0;
+ }
+
+ BufferBegin = CurBufferPtr = 0;
+ NumBytes += FnEnd-FnStart;
+
+ // Invalidate the icache if necessary.
+ sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
+
+ TheJIT->NotifyFunctionEmitted(*F.getFunction(), FnStart, FnEnd-FnStart,
+ EmissionDetails);
+
+ DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart
+ << "] Function: " << F.getFunction()->getName()
+ << ": " << (FnEnd-FnStart) << " bytes of text, "
+ << Relocations.size() << " relocations\n");
+
+ Relocations.clear();
+ ConstPoolAddresses.clear();
+
+ // Mark code region readable and executable if it's not so already.
+ MemMgr->setMemoryExecutable();
+
+ DEBUG(
+ if (sys::hasDisassembler()) {
+ dbgs() << "JIT: Disassembled code:\n";
+ dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
+ (uintptr_t)FnStart);
+ } else {
+ dbgs() << "JIT: Binary code:\n";
+ uint8_t* q = FnStart;
+ for (int i = 0; q < FnEnd; q += 4, ++i) {
+ if (i == 4)
+ i = 0;
+ if (i == 0)
+ dbgs() << "JIT: " << (long)(q - FnStart) << ": ";
+ bool Done = false;
+ for (int j = 3; j >= 0; --j) {
+ if (q + j >= FnEnd)
+ Done = true;
+ else
+ dbgs() << (unsigned short)q[j];
+ }
+ if (Done)
+ break;
+ dbgs() << ' ';
+ if (i == 3)
+ dbgs() << '\n';
+ }
+ dbgs()<< '\n';
+ }
+ );
+
+ if (DwarfExceptionHandling || JITEmitDebugInfo) {
+ uintptr_t ActualSize = 0;
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ if (MemMgr->NeedsExactSize()) {
+ ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd);
+ }
+
+ BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(),
+ ActualSize);
+ BufferEnd = BufferBegin+ActualSize;
+ EmittedFunctions[F.getFunction()].ExceptionTable = BufferBegin;
+ uint8_t *EhStart;
+ uint8_t *FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd,
+ EhStart);
+ MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr,
+ FrameRegister);
+ uint8_t *EhEnd = CurBufferPtr;
+ BufferBegin = SavedBufferBegin;
+ BufferEnd = SavedBufferEnd;
+ CurBufferPtr = SavedCurBufferPtr;
+
+ if (DwarfExceptionHandling) {
+ TheJIT->RegisterTable(FrameRegister);
+ }
+
+ if (JITEmitDebugInfo) {
+ DebugInfo I;
+ I.FnStart = FnStart;
+ I.FnEnd = FnEnd;
+ I.EhStart = EhStart;
+ I.EhEnd = EhEnd;
+ DR->RegisterFunction(F.getFunction(), I);
+ }
+ }
+
+ if (MMI)
+ MMI->EndFunction();
+
+ return false;
+}
+
+void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
+ DEBUG(dbgs() << "JIT: Ran out of space for native code. Reattempting.\n");
+ Relocations.clear(); // Clear the old relocations or we'll reapply them.
+ ConstPoolAddresses.clear();
+ ++NumRetries;
+ deallocateMemForFunction(F.getFunction());
+ // Try again with at least twice as much free space.
+ SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
+}
+
+/// deallocateMemForFunction - Deallocate all memory for the specified
+/// function body. Also drop any references the function has to stubs.
+/// May be called while the Function is being destroyed inside ~Value().
+void JITEmitter::deallocateMemForFunction(const Function *F) {
+ ValueMap<const Function *, EmittedCode, EmittedFunctionConfig>::iterator
+ Emitted = EmittedFunctions.find(F);
+ if (Emitted != EmittedFunctions.end()) {
+ MemMgr->deallocateFunctionBody(Emitted->second.FunctionBody);
+ MemMgr->deallocateExceptionTable(Emitted->second.ExceptionTable);
+ TheJIT->NotifyFreeingMachineCode(Emitted->second.Code);
+
+ EmittedFunctions.erase(Emitted);
+ }
+
+ // TODO: Do we need to unregister exception handling information from libgcc
+ // here?
+
+ if (JITEmitDebugInfo) {
+ DR->UnregisterFunction(F);
+ }
+
+ // If the function did not reference any stubs, return.
+ if (CurFnStubUses.find(F) == CurFnStubUses.end())
+ return;
+
+ // For each referenced stub, erase the reference to this function, and then
+ // erase the list of referenced stubs.
+ SmallVectorImpl<void *> &StubList = CurFnStubUses[F];
+ for (unsigned i = 0, e = StubList.size(); i != e; ++i) {
+ void *Stub = StubList[i];
+
+ // If we already invalidated this stub for this function, continue.
+ if (StubFnRefs.count(Stub) == 0)
+ continue;
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[Stub];
+ FnRefs.erase(F);
+
+ // If this function was the last reference to the stub, invalidate the stub
+ // in the JITResolver. Were there a memory manager deallocateStub routine,
+ // we could call that at this point too.
+ if (FnRefs.empty()) {
+ DEBUG(dbgs() << "\nJIT: Invalidated Stub at [" << Stub << "]\n");
+ StubFnRefs.erase(Stub);
+
+ // Invalidate the stub. If it is a GV stub, update the JIT's global
+ // mapping for that GV to zero.
+ GlobalValue *GV = Resolver.invalidateStub(Stub);
+ if (GV) {
+ TheJIT->updateGlobalMapping(GV, 0);
+ }
+ }
+ }
+ CurFnStubUses.erase(F);
+}
+
+
+void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
+ if (BufferBegin)
+ return JITCodeEmitter::allocateSpace(Size, Alignment);
+
+ // create a new memory block if there is no active one.
+ // care must be taken so that BufferBegin is invalidated when a
+ // block is trimmed
+ BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment);
+ BufferEnd = BufferBegin+Size;
+ return CurBufferPtr;
+}
+
+void* JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ // Delegate this call through the memory manager.
+ return MemMgr->allocateGlobal(Size, Alignment);
+}
+
+void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
+ if (TheJIT->getJITInfo().hasCustomConstantPool())
+ return;
+
+ const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
+ if (Constants.empty()) return;
+
+ unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+ unsigned Align = MCP->getConstantPoolAlignment();
+ ConstantPoolBase = allocateSpace(Size, Align);
+ ConstantPool = MCP;
+
+ if (ConstantPoolBase == 0) return; // Buffer overflow.
+
+ DEBUG(dbgs() << "JIT: Emitted constant pool at [" << ConstantPoolBase
+ << "] (size: " << Size << ", alignment: " << Align << ")\n");
+
+ // Initialize the memory for all of the constant pool entries.
+ unsigned Offset = 0;
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Offset = (Offset + AlignMask) & ~AlignMask;
+
+ uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset;
+ ConstPoolAddresses.push_back(CAddr);
+ if (CPE.isMachineConstantPoolEntry()) {
+ // FIXME: add support to lower machine constant pool values into bytes!
+ llvm_report_error("Initialize memory with machine specific constant pool"
+ "entry has not been implemented!");
+ }
+ TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
+ DEBUG(dbgs() << "JIT: CP" << i << " at [0x";
+ dbgs().write_hex(CAddr) << "]\n");
+
+ const Type *Ty = CPE.Val.ConstVal->getType();
+ Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
+ }
+}
+
+void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty()) return;
+
+ unsigned NumEntries = 0;
+ for (unsigned i = 0, e = JT.size(); i != e; ++i)
+ NumEntries += JT[i].MBBs.size();
+
+ unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData());
+
+ // Just allocate space for all the jump tables now. We will fix up the actual
+ // MBB entries in the tables after we emit the code for each block, since then
+ // we will know the final locations of the MBBs in memory.
+ JumpTable = MJTI;
+ JumpTableBase = allocateSpace(NumEntries * EntrySize,
+ MJTI->getEntryAlignment(*TheJIT->getTargetData()));
+}
+
+void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty() || JumpTableBase == 0) return;
+
+
+ switch (MJTI->getEntryKind()) {
+ case MachineJumpTableInfo::EK_BlockAddress: {
+ // EK_BlockAddress - Each entry is a plain address of block, e.g.:
+ // .word LBB123
+ assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) &&
+ "Cross JIT'ing?");
+
+ // For each jump table, map each target in the jump table to the address of
+ // an emitted MachineBasicBlock.
+ intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
+
+ for (unsigned i = 0, e = JT.size(); i != e; ++i) {
+ const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
+ // Store the address of the basic block for this jump table slot in the
+ // memory we allocated for the jump table in 'initJumpTableInfo'
+ for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
+ *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
+ }
+ break;
+ }
+
+ case MachineJumpTableInfo::EK_Custom32:
+ case MachineJumpTableInfo::EK_GPRel32BlockAddress:
+ case MachineJumpTableInfo::EK_LabelDifference32: {
+ assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?");
+ // For each jump table, place the offset from the beginning of the table
+ // to the target address.
+ int *SlotPtr = (int*)JumpTableBase;
+
+ for (unsigned i = 0, e = JT.size(); i != e; ++i) {
+ const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
+ // Store the offset of the basic block for this jump table slot in the
+ // memory we allocated for the jump table in 'initJumpTableInfo'
+ uintptr_t Base = (uintptr_t)SlotPtr;
+ for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
+ uintptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]);
+ /// FIXME: USe EntryKind instead of magic "getPICJumpTableEntry" hook.
+ *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base);
+ }
+ }
+ break;
+ }
+ }
+}
+
+void JITEmitter::startGVStub(const GlobalValue* GV,
+ unsigned StubSize, unsigned Alignment) {
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment);
+ BufferEnd = BufferBegin+StubSize+1;
+}
+
+void JITEmitter::startGVStub(void *Buffer, unsigned StubSize) {
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ BufferBegin = CurBufferPtr = (uint8_t *)Buffer;
+ BufferEnd = BufferBegin+StubSize+1;
+}
+
+void JITEmitter::finishGVStub() {
+ assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
+ NumBytes += getCurrentPCOffset();
+ BufferBegin = SavedBufferBegin;
+ BufferEnd = SavedBufferEnd;
+ CurBufferPtr = SavedCurBufferPtr;
+}
+
+void *JITEmitter::allocIndirectGV(const GlobalValue *GV,
+ const uint8_t *Buffer, size_t Size,
+ unsigned Alignment) {
+ uint8_t *IndGV = MemMgr->allocateStub(GV, Size, Alignment);
+ memcpy(IndGV, Buffer, Size);
+ return IndGV;
+}
+
+// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
+// in the constant pool that was last emitted with the 'emitConstantPool'
+// method.
+//
+uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
+ assert(ConstantNum < ConstantPool->getConstants().size() &&
+ "Invalid ConstantPoolIndex!");
+ return ConstPoolAddresses[ConstantNum];
+}
+
+// getJumpTableEntryAddress - Return the address of the JumpTable with index
+// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
+//
+uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
+ const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
+ assert(Index < JT.size() && "Invalid jump table index!");
+
+ unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData());
+
+ unsigned Offset = 0;
+ for (unsigned i = 0; i < Index; ++i)
+ Offset += JT[i].MBBs.size();
+
+ Offset *= EntrySize;
+
+ return (uintptr_t)((char *)JumpTableBase + Offset);
+}
+
+void JITEmitter::EmittedFunctionConfig::onDelete(
+ JITEmitter *Emitter, const Function *F) {
+ Emitter->deallocateMemForFunction(F);
+}
+void JITEmitter::EmittedFunctionConfig::onRAUW(
+ JITEmitter *, const Function*, const Function*) {
+ llvm_unreachable("The JIT doesn't know how to handle a"
+ " RAUW on a value it has emitted.");
+}
+
+
+//===----------------------------------------------------------------------===//
+// Public interface to this file
+//===----------------------------------------------------------------------===//
+
+JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM,
+ TargetMachine &tm) {
+ return new JITEmitter(jit, JMM, tm);
+}
+
+// getPointerToNamedFunction - This function is used as a global wrapper to
+// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
+// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
+// need to resolve function(s) that are being mis-codegenerated, so we need to
+// resolve their addresses at runtime, and this is the way to do it.
+extern "C" {
+ void *getPointerToNamedFunction(const char *Name) {
+ if (Function *F = TheJIT->FindFunctionNamed(Name))
+ return TheJIT->getPointerToFunction(F);
+ return TheJIT->getPointerToNamedFunction(Name);
+ }
+}
+
+// getPointerToFunctionOrStub - If the specified function has been
+// code-gen'd, return a pointer to the function. If not, compile it, or use
+// a stub to implement lazy compilation if available.
+//
+void *JIT::getPointerToFunctionOrStub(Function *F) {
+ // If we have already code generated the function, just return the address.
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr;
+
+ // Get a stub if the target supports it.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ return JE->getJITResolver().getLazyFunctionStub(F);
+}
+
+void JIT::updateFunctionStub(Function *F) {
+ // Get the empty stub we generated earlier.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ assert(Addr != Stub && "Function must have non-stub address to be updated.");
+
+ // Tell the target jit info to rewrite the stub at the specified address,
+ // rather than creating a new one.
+ TargetJITInfo::StubLayout layout = getJITInfo().getStubLayout();
+ JE->startGVStub(Stub, layout.Size);
+ getJITInfo().emitFunctionStub(F, Addr, *getCodeEmitter());
+ JE->finishGVStub();
+}
+
+/// freeMachineCodeForFunction - release machine code memory for given Function.
+///
+void JIT::freeMachineCodeForFunction(Function *F) {
+ // Delete translation for this from the ExecutionEngine, so it will get
+ // retranslated next time it is used.
+ updateGlobalMapping(F, 0);
+
+ // Free the actual memory for the function body and related stuff.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ cast<JITEmitter>(JCE)->deallocateMemForFunction(F);
+}
diff --git a/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
new file mode 100644
index 0000000..a17caa17
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -0,0 +1,729 @@
+//===-- JITMemoryManager.cpp - Memory Allocator for JIT'd code ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DefaultJITMemoryManager class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Memory.h"
+#include <map>
+#include <vector>
+#include <cassert>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+using namespace llvm;
+
+STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
+
+JITMemoryManager::~JITMemoryManager() {}
+
+//===----------------------------------------------------------------------===//
+// Memory Block Implementation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// MemoryRangeHeader - For a range of memory, this is the header that we put
+ /// on the block of memory. It is carefully crafted to be one word of memory.
+ /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader
+ /// which starts with this.
+ struct FreeRangeHeader;
+ struct MemoryRangeHeader {
+ /// ThisAllocated - This is true if this block is currently allocated. If
+ /// not, this can be converted to a FreeRangeHeader.
+ unsigned ThisAllocated : 1;
+
+ /// PrevAllocated - Keep track of whether the block immediately before us is
+ /// allocated. If not, the word immediately before this header is the size
+ /// of the previous block.
+ unsigned PrevAllocated : 1;
+
+ /// BlockSize - This is the size in bytes of this memory block,
+ /// including this header.
+ uintptr_t BlockSize : (sizeof(intptr_t)*CHAR_BIT - 2);
+
+
+ /// getBlockAfter - Return the memory block immediately after this one.
+ ///
+ MemoryRangeHeader &getBlockAfter() const {
+ return *(MemoryRangeHeader*)((char*)this+BlockSize);
+ }
+
+ /// getFreeBlockBefore - If the block before this one is free, return it,
+ /// otherwise return null.
+ FreeRangeHeader *getFreeBlockBefore() const {
+ if (PrevAllocated) return 0;
+ intptr_t PrevSize = ((intptr_t *)this)[-1];
+ return (FreeRangeHeader*)((char*)this-PrevSize);
+ }
+
+ /// FreeBlock - Turn an allocated block into a free block, adjusting
+ /// bits in the object headers, and adding an end of region memory block.
+ FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList);
+
+ /// TrimAllocationToSize - If this allocated block is significantly larger
+ /// than NewSize, split it into two pieces (where the former is NewSize
+ /// bytes, including the header), and add the new block to the free list.
+ FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList,
+ uint64_t NewSize);
+ };
+
+ /// FreeRangeHeader - For a memory block that isn't already allocated, this
+ /// keeps track of the current block and has a pointer to the next free block.
+ /// Free blocks are kept on a circularly linked list.
+ struct FreeRangeHeader : public MemoryRangeHeader {
+ FreeRangeHeader *Prev;
+ FreeRangeHeader *Next;
+
+ /// getMinBlockSize - Get the minimum size for a memory block. Blocks
+ /// smaller than this size cannot be created.
+ static unsigned getMinBlockSize() {
+ return sizeof(FreeRangeHeader)+sizeof(intptr_t);
+ }
+
+ /// SetEndOfBlockSizeMarker - The word at the end of every free block is
+ /// known to be the size of the free block. Set it for this block.
+ void SetEndOfBlockSizeMarker() {
+ void *EndOfBlock = (char*)this + BlockSize;
+ ((intptr_t *)EndOfBlock)[-1] = BlockSize;
+ }
+
+ FreeRangeHeader *RemoveFromFreeList() {
+ assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
+ Next->Prev = Prev;
+ return Prev->Next = Next;
+ }
+
+ void AddToFreeList(FreeRangeHeader *FreeList) {
+ Next = FreeList;
+ Prev = FreeList->Prev;
+ Prev->Next = this;
+ Next->Prev = this;
+ }
+
+ /// GrowBlock - The block after this block just got deallocated. Merge it
+ /// into the current block.
+ void GrowBlock(uintptr_t NewSize);
+
+ /// AllocateBlock - Mark this entire block allocated, updating freelists
+ /// etc. This returns a pointer to the circular free-list.
+ FreeRangeHeader *AllocateBlock();
+ };
+}
+
+
+/// AllocateBlock - Mark this entire block allocated, updating freelists
+/// etc. This returns a pointer to the circular free-list.
+FreeRangeHeader *FreeRangeHeader::AllocateBlock() {
+ assert(!ThisAllocated && !getBlockAfter().PrevAllocated &&
+ "Cannot allocate an allocated block!");
+ // Mark this block allocated.
+ ThisAllocated = 1;
+ getBlockAfter().PrevAllocated = 1;
+
+ // Remove it from the free list.
+ return RemoveFromFreeList();
+}
+
+/// FreeBlock - Turn an allocated block into a free block, adjusting
+/// bits in the object headers, and adding an end of region memory block.
+/// If possible, coalesce this block with neighboring blocks. Return the
+/// FreeRangeHeader to allocate from.
+FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
+ MemoryRangeHeader *FollowingBlock = &getBlockAfter();
+ assert(ThisAllocated && "This block is already free!");
+ assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
+
+ FreeRangeHeader *FreeListToReturn = FreeList;
+
+ // If the block after this one is free, merge it into this block.
+ if (!FollowingBlock->ThisAllocated) {
+ FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock;
+ // "FreeList" always needs to be a valid free block. If we're about to
+ // coalesce with it, update our notion of what the free list is.
+ if (&FollowingFreeBlock == FreeList) {
+ FreeList = FollowingFreeBlock.Next;
+ FreeListToReturn = 0;
+ assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
+ }
+ FollowingFreeBlock.RemoveFromFreeList();
+
+ // Include the following block into this one.
+ BlockSize += FollowingFreeBlock.BlockSize;
+ FollowingBlock = &FollowingFreeBlock.getBlockAfter();
+
+ // Tell the block after the block we are coalescing that this block is
+ // allocated.
+ FollowingBlock->PrevAllocated = 1;
+ }
+
+ assert(FollowingBlock->ThisAllocated && "Missed coalescing?");
+
+ if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) {
+ PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize);
+ return FreeListToReturn ? FreeListToReturn : PrevFreeBlock;
+ }
+
+ // Otherwise, mark this block free.
+ FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this;
+ FollowingBlock->PrevAllocated = 0;
+ FreeBlock.ThisAllocated = 0;
+
+ // Link this into the linked list of free blocks.
+ FreeBlock.AddToFreeList(FreeList);
+
+ // Add a marker at the end of the block, indicating the size of this free
+ // block.
+ FreeBlock.SetEndOfBlockSizeMarker();
+ return FreeListToReturn ? FreeListToReturn : &FreeBlock;
+}
+
+/// GrowBlock - The block after this block just got deallocated. Merge it
+/// into the current block.
+void FreeRangeHeader::GrowBlock(uintptr_t NewSize) {
+ assert(NewSize > BlockSize && "Not growing block?");
+ BlockSize = NewSize;
+ SetEndOfBlockSizeMarker();
+ getBlockAfter().PrevAllocated = 0;
+}
+
+/// TrimAllocationToSize - If this allocated block is significantly larger
+/// than NewSize, split it into two pieces (where the former is NewSize
+/// bytes, including the header), and add the new block to the free list.
+FreeRangeHeader *MemoryRangeHeader::
+TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) {
+ assert(ThisAllocated && getBlockAfter().PrevAllocated &&
+ "Cannot deallocate part of an allocated block!");
+
+ // Don't allow blocks to be trimmed below minimum required size
+ NewSize = std::max<uint64_t>(FreeRangeHeader::getMinBlockSize(), NewSize);
+
+ // Round up size for alignment of header.
+ unsigned HeaderAlign = __alignof(FreeRangeHeader);
+ NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1);
+
+ // Size is now the size of the block we will remove from the start of the
+ // current block.
+ assert(NewSize <= BlockSize &&
+ "Allocating more space from this block than exists!");
+
+ // If splitting this block will cause the remainder to be too small, do not
+ // split the block.
+ if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize())
+ return FreeList;
+
+ // Otherwise, we splice the required number of bytes out of this block, form
+ // a new block immediately after it, then mark this block allocated.
+ MemoryRangeHeader &FormerNextBlock = getBlockAfter();
+
+ // Change the size of this block.
+ BlockSize = NewSize;
+
+ // Get the new block we just sliced out and turn it into a free block.
+ FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter();
+ NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock;
+ NewNextBlock.ThisAllocated = 0;
+ NewNextBlock.PrevAllocated = 1;
+ NewNextBlock.SetEndOfBlockSizeMarker();
+ FormerNextBlock.PrevAllocated = 0;
+ NewNextBlock.AddToFreeList(FreeList);
+ return &NewNextBlock;
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Block Implementation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+ class DefaultJITMemoryManager;
+
+ class JITSlabAllocator : public SlabAllocator {
+ DefaultJITMemoryManager &JMM;
+ public:
+ JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
+ virtual ~JITSlabAllocator() { }
+ virtual MemSlab *Allocate(size_t Size);
+ virtual void Deallocate(MemSlab *Slab);
+ };
+
+ /// DefaultJITMemoryManager - Manage memory for the JIT code generation.
+ /// This splits a large block of MAP_NORESERVE'd memory into two
+ /// sections, one for function stubs, one for the functions themselves. We
+ /// have to do this because we may need to emit a function stub while in the
+ /// middle of emitting a function, and we don't know how large the function we
+ /// are emitting is.
+ class DefaultJITMemoryManager : public JITMemoryManager {
+
+ // Whether to poison freed memory.
+ bool PoisonMemory;
+
+ /// LastSlab - This points to the last slab allocated and is used as the
+ /// NearBlock parameter to AllocateRWX so that we can attempt to lay out all
+ /// stubs, data, and code contiguously in memory. In general, however, this
+ /// is not possible because the NearBlock parameter is ignored on Windows
+ /// platforms and even on Unix it works on a best-effort pasis.
+ sys::MemoryBlock LastSlab;
+
+ // Memory slabs allocated by the JIT. We refer to them as slabs so we don't
+ // confuse them with the blocks of memory described above.
+ std::vector<sys::MemoryBlock> CodeSlabs;
+ JITSlabAllocator BumpSlabAllocator;
+ BumpPtrAllocator StubAllocator;
+ BumpPtrAllocator DataAllocator;
+
+ // Circular list of free blocks.
+ FreeRangeHeader *FreeMemoryList;
+
+ // When emitting code into a memory block, this is the block.
+ MemoryRangeHeader *CurBlock;
+
+ uint8_t *GOTBase; // Target Specific reserved memory
+ public:
+ DefaultJITMemoryManager();
+ ~DefaultJITMemoryManager();
+
+ /// allocateNewSlab - Allocates a new MemoryBlock and remembers it as the
+ /// last slab it allocated, so that subsequent allocations follow it.
+ sys::MemoryBlock allocateNewSlab(size_t size);
+
+ /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
+ /// least this much unless more is requested.
+ static const size_t DefaultCodeSlabSize;
+
+ /// DefaultSlabSize - Allocate data into slabs of this size unless we get
+ /// an allocation above SizeThreshold.
+ static const size_t DefaultSlabSize;
+
+ /// DefaultSizeThreshold - For any allocation larger than this threshold, we
+ /// should allocate a separate slab.
+ static const size_t DefaultSizeThreshold;
+
+ void AllocateGOT();
+
+ // Testing methods.
+ virtual bool CheckInvariants(std::string &ErrorStr);
+ size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; }
+ size_t GetDefaultDataSlabSize() { return DefaultSlabSize; }
+ size_t GetDefaultStubSlabSize() { return DefaultSlabSize; }
+ unsigned GetNumCodeSlabs() { return CodeSlabs.size(); }
+ unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); }
+ unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); }
+
+ /// startFunctionBody - When a function starts, allocate a block of free
+ /// executable memory, returning a pointer to it and its actual size.
+ uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
+
+ FreeRangeHeader* candidateBlock = FreeMemoryList;
+ FreeRangeHeader* head = FreeMemoryList;
+ FreeRangeHeader* iter = head->Next;
+
+ uintptr_t largest = candidateBlock->BlockSize;
+
+ // Search for the largest free block
+ while (iter != head) {
+ if (iter->BlockSize > largest) {
+ largest = iter->BlockSize;
+ candidateBlock = iter;
+ }
+ iter = iter->Next;
+ }
+
+ largest = largest - sizeof(MemoryRangeHeader);
+
+ // If this block isn't big enough for the allocation desired, allocate
+ // another block of memory and add it to the free list.
+ if (largest < ActualSize ||
+ largest <= FreeRangeHeader::getMinBlockSize()) {
+ DEBUG(dbgs() << "JIT: Allocating another slab of memory for function.");
+ candidateBlock = allocateNewCodeSlab((size_t)ActualSize);
+ }
+
+ // Select this candidate block for allocation
+ CurBlock = candidateBlock;
+
+ // Allocate the entire memory block.
+ FreeMemoryList = candidateBlock->AllocateBlock();
+ ActualSize = CurBlock->BlockSize - sizeof(MemoryRangeHeader);
+ return (uint8_t *)(CurBlock + 1);
+ }
+
+ /// allocateNewCodeSlab - Helper method to allocate a new slab of code
+ /// memory from the OS and add it to the free list. Returns the new
+ /// FreeRangeHeader at the base of the slab.
+ FreeRangeHeader *allocateNewCodeSlab(size_t MinSize) {
+ // If the user needs at least MinSize free memory, then we account for
+ // two MemoryRangeHeaders: the one in the user's block, and the one at the
+ // end of the slab.
+ size_t PaddedMin = MinSize + 2 * sizeof(MemoryRangeHeader);
+ size_t SlabSize = std::max(DefaultCodeSlabSize, PaddedMin);
+ sys::MemoryBlock B = allocateNewSlab(SlabSize);
+ CodeSlabs.push_back(B);
+ char *MemBase = (char*)(B.base());
+
+ // Put a tiny allocated block at the end of the memory chunk, so when
+ // FreeBlock calls getBlockAfter it doesn't fall off the end.
+ MemoryRangeHeader *EndBlock =
+ (MemoryRangeHeader*)(MemBase + B.size()) - 1;
+ EndBlock->ThisAllocated = 1;
+ EndBlock->PrevAllocated = 0;
+ EndBlock->BlockSize = sizeof(MemoryRangeHeader);
+
+ // Start out with a vast new block of free memory.
+ FreeRangeHeader *NewBlock = (FreeRangeHeader*)MemBase;
+ NewBlock->ThisAllocated = 0;
+ // Make sure getFreeBlockBefore doesn't look into unmapped memory.
+ NewBlock->PrevAllocated = 1;
+ NewBlock->BlockSize = (uintptr_t)EndBlock - (uintptr_t)NewBlock;
+ NewBlock->SetEndOfBlockSizeMarker();
+ NewBlock->AddToFreeList(FreeMemoryList);
+
+ assert(NewBlock->BlockSize - sizeof(MemoryRangeHeader) >= MinSize &&
+ "The block was too small!");
+ return NewBlock;
+ }
+
+ /// endFunctionBody - The function F is now allocated, and takes the memory
+ /// in the range [FunctionStart,FunctionEnd).
+ void endFunctionBody(const Function *F, uint8_t *FunctionStart,
+ uint8_t *FunctionEnd) {
+ assert(FunctionEnd > FunctionStart);
+ assert(FunctionStart == (uint8_t *)(CurBlock+1) &&
+ "Mismatched function start/end!");
+
+ uintptr_t BlockSize = FunctionEnd - (uint8_t *)CurBlock;
+
+ // Release the memory at the end of this block that isn't needed.
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+ }
+
+ /// allocateSpace - Allocate a memory block of the given size. This method
+ /// cannot be called between calls to startFunctionBody and endFunctionBody.
+ uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
+ CurBlock = FreeMemoryList;
+ FreeMemoryList = FreeMemoryList->AllocateBlock();
+
+ uint8_t *result = (uint8_t *)(CurBlock + 1);
+
+ if (Alignment == 0) Alignment = 1;
+ result = (uint8_t*)(((intptr_t)result+Alignment-1) &
+ ~(intptr_t)(Alignment-1));
+
+ uintptr_t BlockSize = result + Size - (uint8_t *)CurBlock;
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+
+ return result;
+ }
+
+ /// allocateStub - Allocate memory for a function stub.
+ uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
+ unsigned Alignment) {
+ return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment);
+ }
+
+ /// allocateGlobal - Allocate memory for a global.
+ uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
+ }
+
+ /// startExceptionTable - Use startFunctionBody to allocate memory for the
+ /// function's exception table.
+ uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize) {
+ return startFunctionBody(F, ActualSize);
+ }
+
+ /// endExceptionTable - The exception table of F is now allocated,
+ /// and takes the memory in the range [TableStart,TableEnd).
+ void endExceptionTable(const Function *F, uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister) {
+ assert(TableEnd > TableStart);
+ assert(TableStart == (uint8_t *)(CurBlock+1) &&
+ "Mismatched table start/end!");
+
+ uintptr_t BlockSize = TableEnd - (uint8_t *)CurBlock;
+
+ // Release the memory at the end of this block that isn't needed.
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+ }
+
+ uint8_t *getGOTBase() const {
+ return GOTBase;
+ }
+
+ void deallocateBlock(void *Block) {
+ // Find the block that is allocated for this function.
+ MemoryRangeHeader *MemRange = static_cast<MemoryRangeHeader*>(Block) - 1;
+ assert(MemRange->ThisAllocated && "Block isn't allocated!");
+
+ // Fill the buffer with garbage!
+ if (PoisonMemory) {
+ memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange));
+ }
+
+ // Free the memory.
+ FreeMemoryList = MemRange->FreeBlock(FreeMemoryList);
+ }
+
+ /// deallocateFunctionBody - Deallocate all memory for the specified
+ /// function body.
+ void deallocateFunctionBody(void *Body) {
+ if (Body) deallocateBlock(Body);
+ }
+
+ /// deallocateExceptionTable - Deallocate memory for the specified
+ /// exception table.
+ void deallocateExceptionTable(void *ET) {
+ if (ET) deallocateBlock(ET);
+ }
+
+ /// setMemoryWritable - When code generation is in progress,
+ /// the code pages may need permissions changed.
+ void setMemoryWritable()
+ {
+ for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
+ sys::Memory::setWritable(CodeSlabs[i]);
+ }
+ /// setMemoryExecutable - When code generation is done and we're ready to
+ /// start execution, the code pages may need permissions changed.
+ void setMemoryExecutable()
+ {
+ for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
+ sys::Memory::setExecutable(CodeSlabs[i]);
+ }
+
+ /// setPoisonMemory - Controls whether we write garbage over freed memory.
+ ///
+ void setPoisonMemory(bool poison) {
+ PoisonMemory = poison;
+ }
+ };
+}
+
+MemSlab *JITSlabAllocator::Allocate(size_t Size) {
+ sys::MemoryBlock B = JMM.allocateNewSlab(Size);
+ MemSlab *Slab = (MemSlab*)B.base();
+ Slab->Size = B.size();
+ Slab->NextPtr = 0;
+ return Slab;
+}
+
+void JITSlabAllocator::Deallocate(MemSlab *Slab) {
+ sys::MemoryBlock B(Slab, Slab->Size);
+ sys::Memory::ReleaseRWX(B);
+}
+
+DefaultJITMemoryManager::DefaultJITMemoryManager()
+ :
+#ifdef NDEBUG
+ PoisonMemory(false),
+#else
+ PoisonMemory(true),
+#endif
+ LastSlab(0, 0),
+ BumpSlabAllocator(*this),
+ StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
+ DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
+
+ // Allocate space for code.
+ sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);
+ CodeSlabs.push_back(MemBlock);
+ uint8_t *MemBase = (uint8_t*)MemBlock.base();
+
+ // We set up the memory chunk with 4 mem regions, like this:
+ // [ START
+ // [ Free #0 ] -> Large space to allocate functions from.
+ // [ Allocated #1 ] -> Tiny space to separate regions.
+ // [ Free #2 ] -> Tiny space so there is always at least 1 free block.
+ // [ Allocated #3 ] -> Tiny space to prevent looking past end of block.
+ // END ]
+ //
+ // The last three blocks are never deallocated or touched.
+
+ // Add MemoryRangeHeader to the end of the memory region, indicating that
+ // the space after the block of memory is allocated. This is block #3.
+ MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
+ Mem3->ThisAllocated = 1;
+ Mem3->PrevAllocated = 0;
+ Mem3->BlockSize = sizeof(MemoryRangeHeader);
+
+ /// Add a tiny free region so that the free list always has one entry.
+ FreeRangeHeader *Mem2 =
+ (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize());
+ Mem2->ThisAllocated = 0;
+ Mem2->PrevAllocated = 1;
+ Mem2->BlockSize = FreeRangeHeader::getMinBlockSize();
+ Mem2->SetEndOfBlockSizeMarker();
+ Mem2->Prev = Mem2; // Mem2 *is* the free list for now.
+ Mem2->Next = Mem2;
+
+ /// Add a tiny allocated region so that Mem2 is never coalesced away.
+ MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
+ Mem1->ThisAllocated = 1;
+ Mem1->PrevAllocated = 0;
+ Mem1->BlockSize = sizeof(MemoryRangeHeader);
+
+ // Add a FreeRangeHeader to the start of the function body region, indicating
+ // that the space is free. Mark the previous block allocated so we never look
+ // at it.
+ FreeRangeHeader *Mem0 = (FreeRangeHeader*)MemBase;
+ Mem0->ThisAllocated = 0;
+ Mem0->PrevAllocated = 1;
+ Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
+ Mem0->SetEndOfBlockSizeMarker();
+ Mem0->AddToFreeList(Mem2);
+
+ // Start out with the freelist pointing to Mem0.
+ FreeMemoryList = Mem0;
+
+ GOTBase = NULL;
+}
+
+void DefaultJITMemoryManager::AllocateGOT() {
+ assert(GOTBase == 0 && "Cannot allocate the got multiple times");
+ GOTBase = new uint8_t[sizeof(void*) * 8192];
+ HasGOT = true;
+}
+
+DefaultJITMemoryManager::~DefaultJITMemoryManager() {
+ for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
+ sys::Memory::ReleaseRWX(CodeSlabs[i]);
+
+ delete[] GOTBase;
+}
+
+sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
+ // Allocate a new block close to the last one.
+ std::string ErrMsg;
+ sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
+ sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
+ if (B.base() == 0) {
+ llvm_report_error("Allocation failed when allocating new memory in the"
+ " JIT\n" + ErrMsg);
+ }
+ LastSlab = B;
+ ++NumSlabs;
+ // Initialize the slab to garbage when debugging.
+ if (PoisonMemory) {
+ memset(B.base(), 0xCD, B.size());
+ }
+ return B;
+}
+
+/// CheckInvariants - For testing only. Return "" if all internal invariants
+/// are preserved, and a helpful error message otherwise. For free and
+/// allocated blocks, make sure that adding BlockSize gives a valid block.
+/// For free blocks, make sure they're in the free list and that their end of
+/// block size marker is correct. This function should return an error before
+/// accessing bad memory. This function is defined here instead of in
+/// JITMemoryManagerTest.cpp so that we don't have to expose all of the
+/// implementation details of DefaultJITMemoryManager.
+bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
+ raw_string_ostream Err(ErrorStr);
+
+ // Construct a the set of FreeRangeHeader pointers so we can query it
+ // efficiently.
+ llvm::SmallPtrSet<MemoryRangeHeader*, 16> FreeHdrSet;
+ FreeRangeHeader* FreeHead = FreeMemoryList;
+ FreeRangeHeader* FreeRange = FreeHead;
+
+ do {
+ // Check that the free range pointer is in the blocks we've allocated.
+ bool Found = false;
+ for (std::vector<sys::MemoryBlock>::iterator I = CodeSlabs.begin(),
+ E = CodeSlabs.end(); I != E && !Found; ++I) {
+ char *Start = (char*)I->base();
+ char *End = Start + I->size();
+ Found = (Start <= (char*)FreeRange && (char*)FreeRange < End);
+ }
+ if (!Found) {
+ Err << "Corrupt free list; points to " << FreeRange;
+ return false;
+ }
+
+ if (FreeRange->Next->Prev != FreeRange) {
+ Err << "Next and Prev pointers do not match.";
+ return false;
+ }
+
+ // Otherwise, add it to the set.
+ FreeHdrSet.insert(FreeRange);
+ FreeRange = FreeRange->Next;
+ } while (FreeRange != FreeHead);
+
+ // Go over each block, and look at each MemoryRangeHeader.
+ for (std::vector<sys::MemoryBlock>::iterator I = CodeSlabs.begin(),
+ E = CodeSlabs.end(); I != E; ++I) {
+ char *Start = (char*)I->base();
+ char *End = Start + I->size();
+
+ // Check each memory range.
+ for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL;
+ Start <= (char*)Hdr && (char*)Hdr < End;
+ Hdr = &Hdr->getBlockAfter()) {
+ if (Hdr->ThisAllocated == 0) {
+ // Check that this range is in the free list.
+ if (!FreeHdrSet.count(Hdr)) {
+ Err << "Found free header at " << Hdr << " that is not in free list.";
+ return false;
+ }
+
+ // Now make sure the size marker at the end of the block is correct.
+ uintptr_t *Marker = ((uintptr_t*)&Hdr->getBlockAfter()) - 1;
+ if (!(Start <= (char*)Marker && (char*)Marker < End)) {
+ Err << "Block size in header points out of current MemoryBlock.";
+ return false;
+ }
+ if (Hdr->BlockSize != *Marker) {
+ Err << "End of block size marker (" << *Marker << ") "
+ << "and BlockSize (" << Hdr->BlockSize << ") don't match.";
+ return false;
+ }
+ }
+
+ if (LastHdr && LastHdr->ThisAllocated != Hdr->PrevAllocated) {
+ Err << "Hdr->PrevAllocated (" << Hdr->PrevAllocated << ") != "
+ << "LastHdr->ThisAllocated (" << LastHdr->ThisAllocated << ")";
+ return false;
+ } else if (!LastHdr && !Hdr->PrevAllocated) {
+ Err << "The first header should have PrevAllocated true.";
+ return false;
+ }
+
+ // Remember the last header.
+ LastHdr = Hdr;
+ }
+ }
+
+ // All invariants are preserved.
+ return true;
+}
+
+JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
+ return new DefaultJITMemoryManager();
+}
+
+// Allocate memory for code in 512K slabs.
+const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024;
+
+// Allocate globals and stubs in slabs of 64K. (probably 16 pages)
+const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024;
+
+// Waste at most 16K at the end of each bump slab. (probably 4 pages)
+const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024;
diff --git a/lib/ExecutionEngine/JIT/Makefile b/lib/ExecutionEngine/JIT/Makefile
new file mode 100644
index 0000000..aafa3d9
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/Makefile
@@ -0,0 +1,38 @@
+##===- lib/ExecutionEngine/JIT/Makefile --------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMJIT
+
+# Get the $(ARCH) setting
+include $(LEVEL)/Makefile.config
+
+# Enable the X86 JIT if compiling on X86
+ifeq ($(ARCH), x86)
+ ENABLE_X86_JIT = 1
+endif
+
+# This flag can also be used on the command line to force inclusion
+# of the X86 JIT on non-X86 hosts
+ifdef ENABLE_X86_JIT
+ CPPFLAGS += -DENABLE_X86_JIT
+endif
+
+# Enable the Sparc JIT if compiling on Sparc
+ifeq ($(ARCH), Sparc)
+ ENABLE_SPARC_JIT = 1
+endif
+
+# This flag can also be used on the command line to force inclusion
+# of the Sparc JIT on non-Sparc hosts
+ifdef ENABLE_SPARC_JIT
+ CPPFLAGS += -DENABLE_SPARC_JIT
+endif
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp b/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
new file mode 100644
index 0000000..2baf979
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
@@ -0,0 +1,174 @@
+//===-- OProfileJITEventListener.cpp - Tell OProfile about JITted code ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that calls into OProfile to tell
+// it about JITted functions. For now, we only record function names and sizes,
+// but eventually we'll also record line number information.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface we're using.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "oprofile-jit-event-listener"
+#include "llvm/Function.h"
+#include "llvm/Metadata.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Errno.h"
+#include "llvm/Config/config.h"
+#include <stddef.h>
+using namespace llvm;
+
+#if USE_OPROFILE
+
+#include <opagent.h>
+
+namespace {
+
+class OProfileJITEventListener : public JITEventListener {
+ op_agent_t Agent;
+public:
+ OProfileJITEventListener();
+ ~OProfileJITEventListener();
+
+ virtual void NotifyFunctionEmitted(const Function &F,
+ void *FnStart, size_t FnSize,
+ const EmittedFunctionDetails &Details);
+ virtual void NotifyFreeingMachineCode(void *OldPtr);
+};
+
+OProfileJITEventListener::OProfileJITEventListener()
+ : Agent(op_open_agent()) {
+ if (Agent == NULL) {
+ const std::string err_str = sys::StrError();
+ DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str << "\n");
+ } else {
+ DEBUG(dbgs() << "Connected to OProfile agent.\n");
+ }
+}
+
+OProfileJITEventListener::~OProfileJITEventListener() {
+ if (Agent != NULL) {
+ if (op_close_agent(Agent) == -1) {
+ const std::string err_str = sys::StrError();
+ DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
+ << err_str << "\n");
+ } else {
+ DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
+ }
+ }
+}
+
+class FilenameCache {
+ // Holds the filename of each Scope, so that we can pass a null-terminated
+ // string into oprofile. Use an AssertingVH rather than a ValueMap because we
+ // shouldn't be modifying any MDNodes while this map is alive.
+ DenseMap<AssertingVH<MDNode>, std::string> Filenames;
+
+ public:
+ const char *getFilename(DIScope Scope) {
+ std::string &Filename = Filenames[Scope.getNode()];
+ if (Filename.empty()) {
+ Filename = Scope.getFilename();
+ }
+ return Filename.c_str();
+ }
+};
+
+static debug_line_info LineStartToOProfileFormat(
+ const MachineFunction &MF, FilenameCache &Filenames,
+ uintptr_t Address, DebugLoc Loc) {
+ debug_line_info Result;
+ Result.vma = Address;
+ DILocation DILoc = MF.getDILocation(Loc);
+ Result.lineno = DILoc.getLineNumber();
+ Result.filename = Filenames.getFilename(DILoc.getScope());
+ DEBUG(dbgs() << "Mapping " << reinterpret_cast<void*>(Result.vma) << " to "
+ << Result.filename << ":" << Result.lineno << "\n");
+ return Result;
+}
+
+// Adds the just-emitted function to the symbol table.
+void OProfileJITEventListener::NotifyFunctionEmitted(
+ const Function &F, void *FnStart, size_t FnSize,
+ const EmittedFunctionDetails &Details) {
+ assert(F.hasName() && FnStart != 0 && "Bad symbol to add");
+ if (op_write_native_code(Agent, F.getName().data(),
+ reinterpret_cast<uint64_t>(FnStart),
+ FnStart, FnSize) == -1) {
+ DEBUG(dbgs() << "Failed to tell OProfile about native function "
+ << F.getName() << " at ["
+ << FnStart << "-" << ((char*)FnStart + FnSize) << "]\n");
+ return;
+ }
+
+ // Now we convert the line number information from the address/DebugLoc format
+ // in Details to the address/filename/lineno format that OProfile expects.
+ // OProfile 0.9.4 (and maybe later versions) has a bug that causes it to
+ // ignore line numbers for addresses above 4G.
+ FilenameCache Filenames;
+ std::vector<debug_line_info> LineInfo;
+ LineInfo.reserve(1 + Details.LineStarts.size());
+ if (!Details.MF->getDefaultDebugLoc().isUnknown()) {
+ LineInfo.push_back(LineStartToOProfileFormat(
+ *Details.MF, Filenames,
+ reinterpret_cast<uintptr_t>(FnStart),
+ Details.MF->getDefaultDebugLoc()));
+ }
+ for (std::vector<EmittedFunctionDetails::LineStart>::const_iterator
+ I = Details.LineStarts.begin(), E = Details.LineStarts.end();
+ I != E; ++I) {
+ LineInfo.push_back(LineStartToOProfileFormat(
+ *Details.MF, Filenames, I->Address, I->Loc));
+ }
+ if (!LineInfo.empty()) {
+ if (op_write_debug_line_info(Agent, FnStart,
+ LineInfo.size(), &*LineInfo.begin()) == -1) {
+ DEBUG(dbgs()
+ << "Failed to tell OProfile about line numbers for native function "
+ << F.getName() << " at ["
+ << FnStart << "-" << ((char*)FnStart + FnSize) << "]\n");
+ }
+ }
+}
+
+// Removes the being-deleted function from the symbol table.
+void OProfileJITEventListener::NotifyFreeingMachineCode(void *FnStart) {
+ assert(FnStart && "Invalid function pointer");
+ if (op_unload_native_code(Agent, reinterpret_cast<uint64_t>(FnStart)) == -1) {
+ DEBUG(dbgs()
+ << "Failed to tell OProfile about unload of native function at "
+ << FnStart << "\n");
+ }
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *createOProfileJITEventListener() {
+ return new OProfileJITEventListener;
+}
+}
+
+#else // USE_OPROFILE
+
+namespace llvm {
+// By defining this to return NULL, we can let clients call it unconditionally,
+// even if they haven't configured with the OProfile libraries.
+JITEventListener *createOProfileJITEventListener() {
+ return NULL;
+}
+} // namespace llvm
+
+#endif // USE_OPROFILE
diff --git a/lib/ExecutionEngine/JIT/TargetSelect.cpp b/lib/ExecutionEngine/JIT/TargetSelect.cpp
new file mode 100644
index 0000000..3349c33
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/TargetSelect.cpp
@@ -0,0 +1,91 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetRegistry for the appropriate JIT to use, and allows
+// the user to specify a specific one on the commandline with -march=x. Clients
+// should initialize targets prior to calling createJIT.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Host.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace llvm;
+
+/// selectTarget - Pick a target either via -march or by guessing the native
+/// arch. Add any CPU features specified via -mcpu or -mattr.
+TargetMachine *JIT::selectTarget(Module *Mod,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs,
+ std::string *ErrorStr) {
+ Triple TheTriple(Mod->getTargetTriple());
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getHostTriple());
+
+ // Adjust the triple to match what the user requested.
+ const Target *TheTarget = 0;
+ if (!MArch.empty()) {
+ for (TargetRegistry::iterator it = TargetRegistry::begin(),
+ ie = TargetRegistry::end(); it != ie; ++it) {
+ if (MArch == it->getName()) {
+ TheTarget = &*it;
+ break;
+ }
+ }
+
+ if (!TheTarget) {
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
+ return 0;
+ }
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // module/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (TheTarget == 0) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return 0;
+ }
+ }
+
+ if (!TheTarget->hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host you are"
+ << " running. If bad things happen, please choose a different "
+ << "-march switch.\n";
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MCPU.empty() || !MAttrs.empty()) {
+ SubtargetFeatures Features;
+ Features.setCPU(MCPU);
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // Allocate a target...
+ TargetMachine *Target =
+ TheTarget->createTargetMachine(TheTriple.getTriple(), FeaturesStr);
+ assert(Target && "Could not allocate target machine!");
+ return Target;
+}
diff --git a/lib/ExecutionEngine/Makefile b/lib/ExecutionEngine/Makefile
new file mode 100644
index 0000000..e0e050e
--- /dev/null
+++ b/lib/ExecutionEngine/Makefile
@@ -0,0 +1,13 @@
+##===- lib/ExecutionEngine/Makefile ------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../..
+LIBRARYNAME = LLVMExecutionEngine
+PARALLEL_DIRS = Interpreter JIT
+
+include $(LEVEL)/Makefile.common