diff options
Diffstat (limited to 'lib')
540 files changed, 34155 insertions, 18210 deletions
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp index e58dde3..a571463 100644 --- a/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -44,6 +44,8 @@ static cl::opt<bool> PrintMod("print-mod", cl::ReallyHidden); static cl::opt<bool> PrintRef("print-ref", cl::ReallyHidden); static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden); +static cl::opt<bool> EvalTBAA("evaluate-tbaa", cl::ReallyHidden); + namespace { class AAEval : public FunctionPass { unsigned NoAlias, MayAlias, PartialAlias, MustAlias; @@ -123,6 +125,15 @@ PrintModRefResults(const char *Msg, bool P, CallSite CSA, CallSite CSB, } } +static inline void +PrintLoadStoreResults(const char *Msg, bool P, const Value *V1, + const Value *V2, const Module *M) { + if (P) { + errs() << " " << Msg << ": " << *V1 + << " <-> " << *V2 << '\n'; + } +} + static inline bool isInterestingPointer(Value *V) { return V->getType()->isPointerTy() && !isa<ConstantPointerNull>(V); @@ -133,6 +144,8 @@ bool AAEval::runOnFunction(Function &F) { SetVector<Value *> Pointers; SetVector<CallSite> CallSites; + SetVector<Value *> Loads; + SetVector<Value *> Stores; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) if (I->getType()->isPointerTy()) // Add all pointer arguments. @@ -141,6 +154,10 @@ bool AAEval::runOnFunction(Function &F) { for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { if (I->getType()->isPointerTy()) // Add all pointer instructions. Pointers.insert(&*I); + if (EvalTBAA && isa<LoadInst>(&*I)) + Loads.insert(&*I); + if (EvalTBAA && isa<StoreInst>(&*I)) + Stores.insert(&*I); Instruction &Inst = *I; if (CallSite CS = cast<Value>(&Inst)) { Value *Callee = CS.getCalledValue(); @@ -197,6 +214,61 @@ bool AAEval::runOnFunction(Function &F) { } } + if (EvalTBAA) { + // iterate over all pairs of load, store + for (SetVector<Value *>::iterator I1 = Loads.begin(), E = Loads.end(); + I1 != E; ++I1) { + for (SetVector<Value *>::iterator I2 = Stores.begin(), E2 = Stores.end(); + I2 != E2; ++I2) { + switch (AA.alias(AA.getLocation(cast<LoadInst>(*I1)), + AA.getLocation(cast<StoreInst>(*I2)))) { + case AliasAnalysis::NoAlias: + PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2, + F.getParent()); + ++NoAlias; break; + case AliasAnalysis::MayAlias: + PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2, + F.getParent()); + ++MayAlias; break; + case AliasAnalysis::PartialAlias: + PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2, + F.getParent()); + ++PartialAlias; break; + case AliasAnalysis::MustAlias: + PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2, + F.getParent()); + ++MustAlias; break; + } + } + } + + // iterate over all pairs of store, store + for (SetVector<Value *>::iterator I1 = Stores.begin(), E = Stores.end(); + I1 != E; ++I1) { + for (SetVector<Value *>::iterator I2 = Stores.begin(); I2 != I1; ++I2) { + switch (AA.alias(AA.getLocation(cast<StoreInst>(*I1)), + AA.getLocation(cast<StoreInst>(*I2)))) { + case AliasAnalysis::NoAlias: + PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2, + F.getParent()); + ++NoAlias; break; + case AliasAnalysis::MayAlias: + PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2, + F.getParent()); + ++MayAlias; break; + case AliasAnalysis::PartialAlias: + PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2, + F.getParent()); + ++PartialAlias; break; + case AliasAnalysis::MustAlias: + PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2, + F.getParent()); + ++MustAlias; break; + } + } + } + } + // Mod/ref alias analysis: compare all pairs of calls and values for (SetVector<CallSite>::iterator C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) { diff --git a/lib/Analysis/Analysis.cpp b/lib/Analysis/Analysis.cpp index 66e416c..349c417 100644 --- a/lib/Analysis/Analysis.cpp +++ b/lib/Analysis/Analysis.cpp @@ -11,6 +11,8 @@ #include "llvm-c/Initialization.h" #include "llvm/Analysis/Verifier.h" #include "llvm/InitializePasses.h" +#include "llvm/IR/Module.h" +#include "llvm/PassRegistry.h" #include <cstring> using namespace llvm; diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 4139336..f8509dd 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -88,7 +88,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD, const TargetLibraryInfo &TLI, bool RoundToAlign = false) { uint64_t Size; - if (getUnderlyingObjectSize(V, Size, &TD, &TLI, RoundToAlign)) + if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign)) return Size; return AliasAnalysis::UnknownSize; } @@ -98,6 +98,35 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD, static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &TD, const TargetLibraryInfo &TLI) { + // Note that the meanings of the "object" are slightly different in the + // following contexts: + // c1: llvm::getObjectSize() + // c2: llvm.objectsize() intrinsic + // c3: isObjectSmallerThan() + // c1 and c2 share the same meaning; however, the meaning of "object" in c3 + // refers to the "entire object". + // + // Consider this example: + // char *p = (char*)malloc(100) + // char *q = p+80; + // + // In the context of c1 and c2, the "object" pointed by q refers to the + // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. + // + // However, in the context of c3, the "object" refers to the chunk of memory + // being allocated. So, the "object" has 100 bytes, and q points to the middle + // the "object". In case q is passed to isObjectSmallerThan() as the 1st + // parameter, before the llvm::getObjectSize() is called to get the size of + // entire object, we should: + // - either rewind the pointer q to the base-address of the object in + // question (in this case rewind to p), or + // - just give up. It is up to caller to make sure the pointer is pointing + // to the base address the object. + // + // We go for 2nd option for simplicity. + if (!isIdentifiedObject(V)) + return false; + // This function needs to use the aligned object size because we allow // reads a bit past the end given sufficient alignment. uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); @@ -851,9 +880,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, // pointers, figure out if the indexes to the GEP tell us anything about the // derived pointer. if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { + // Do the base pointers alias? + AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, + UnderlyingV2, UnknownSize, 0); + // Check for geps of non-aliasing underlying pointers where the offsets are // identical. - if (V1Size == V2Size) { + if ((BaseAlias == MayAlias) && V1Size == V2Size) { // Do the base pointers alias assuming type and size. AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1TBAAInfo, UnderlyingV2, @@ -881,10 +914,6 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, GEP1VariableIndices.clear(); } } - - // Do the base pointers alias? - AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, - UnderlyingV2, UnknownSize, 0); // If we get a No or May, then return it immediately, no amount of analysis // will improve this situation. diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 09d7608..bc0dffc 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -17,6 +17,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/Analysis/ValueTracking.h" @@ -550,7 +551,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, if (Opc == Instruction::And && DL) { - unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()); + unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL); @@ -880,19 +881,20 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); } -/// ConstantFoldConstantExpression - Attempt to fold the constant expression -/// using the specified DataLayout. If successful, the constant result is -/// result is returned, if not, null is returned. -Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, - const DataLayout *TD, - const TargetLibraryInfo *TLI) { - SmallVector<Constant*, 8> Ops; - for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); - i != e; ++i) { +static Constant * +ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD, + const TargetLibraryInfo *TLI, + SmallPtrSet<ConstantExpr *, 4> &FoldedOps) { + SmallVector<Constant *, 8> Ops; + for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; + ++i) { Constant *NewC = cast<Constant>(*i); - // Recursively fold the ConstantExpr's operands. - if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) - NewC = ConstantFoldConstantExpression(NewCE, TD, TLI); + // Recursively fold the ConstantExpr's operands. If we have already folded + // a ConstantExpr, we don't have to process it again. + if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { + if (FoldedOps.insert(NewCE)) + NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps); + } Ops.push_back(NewC); } @@ -902,6 +904,16 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); } +/// ConstantFoldConstantExpression - Attempt to fold the constant expression +/// using the specified DataLayout. If successful, the constant result is +/// result is returned, if not, null is returned. +Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { + SmallPtrSet<ConstantExpr *, 4> FoldedOps; + return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps); +} + /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the /// specified opcode and operands. If successful, the constant result is /// returned, if not, null is returned. Note that this function can fail when diff --git a/lib/Analysis/CostModel.cpp b/lib/Analysis/CostModel.cpp index df70d15..98a7780 100644 --- a/lib/Analysis/CostModel.cpp +++ b/lib/Analysis/CostModel.cpp @@ -88,6 +88,23 @@ static bool isReverseVectorMask(SmallVector<int, 16> &Mask) { return true; } +static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) { + TargetTransformInfo::OperandValueKind OpInfo = + TargetTransformInfo::OK_AnyValue; + + // Check for a splat of a constant. + ConstantDataVector *CDV = 0; + if ((CDV = dyn_cast<ConstantDataVector>(V))) + if (CDV->getSplatValue() != NULL) + OpInfo = TargetTransformInfo::OK_UniformConstantValue; + ConstantVector *CV = 0; + if ((CV = dyn_cast<ConstantVector>(V))) + if (CV->getSplatValue() != NULL) + OpInfo = TargetTransformInfo::OK_UniformConstantValue; + + return OpInfo; +} + unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const { if (!TTI) return -1; @@ -121,7 +138,12 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const { case Instruction::And: case Instruction::Or: case Instruction::Xor: { - return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType()); + TargetTransformInfo::OperandValueKind Op1VK = + getOperandInfo(I->getOperand(0)); + TargetTransformInfo::OperandValueKind Op2VK = + getOperandInfo(I->getOperand(1)); + return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK, + Op2VK); } case Instruction::Select: { const SelectInst *SI = cast<SelectInst>(I); diff --git a/lib/Analysis/IPA/IPA.cpp b/lib/Analysis/IPA/IPA.cpp index aa5164e..1c1816d 100644 --- a/lib/Analysis/IPA/IPA.cpp +++ b/lib/Analysis/IPA/IPA.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" #include "llvm-c/Initialization.h" using namespace llvm; diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 4a3c74e..bf77451 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -1711,7 +1711,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, // subobject at its beginning) or function, both are pointers to one past the // last element of the same array object, or one is a pointer to one past the // end of one array object and the other is a pointer to the start of a -// different array object that happens to immediately follow the first array +// different array object that happens to immediately follow the first array // object in the address space.) // // C11's version is more restrictive, however there's no reason why an argument diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp index d490d54..9c0d8ac 100644 --- a/lib/Analysis/MemoryBuiltins.cpp +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -364,26 +364,6 @@ bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD, return true; } -/// \brief Compute the size of the underlying object pointed by Ptr. Returns -/// true and the object size in Size if successful, and false otherwise. -/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, -/// byval arguments, and global variables. -bool llvm::getUnderlyingObjectSize(const Value *Ptr, uint64_t &Size, - const DataLayout *TD, - const TargetLibraryInfo *TLI, - bool RoundToAlign) { - if (!TD) - return false; - - ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign); - SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); - if (!Visitor.knownSize(Data)) - return false; - - Size = Data.first.getZExtValue(); - return true; -} - STATISTIC(ObjectVisitorArgument, "Number of arguments with unsolved size and offset"); @@ -409,23 +389,16 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD, SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { V = V->stripPointerCasts(); + if (Instruction *I = dyn_cast<Instruction>(V)) { + // If we have already seen this instruction, bail out. Cycles can happen in + // unreachable code after constant propagation. + if (!SeenInsts.insert(I)) + return unknown(); - if (isa<Instruction>(V) || isa<GEPOperator>(V)) { - // Return cached value or insert unknown in cache if size of V was not - // computed yet in order to avoid recursions in PHis. - std::pair<CacheMapTy::iterator, bool> CacheVal = - CacheMap.insert(std::make_pair(V, unknown())); - if (!CacheVal.second) - return CacheVal.first->second; - - SizeOffsetType Result; if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) - Result = visitGEPOperator(*GEP); - else - Result = visit(cast<Instruction>(*V)); - return CacheMap[V] = Result; + return visitGEPOperator(*GEP); + return visit(*I); } - if (Argument *A = dyn_cast<Argument>(V)) return visitArgument(*A); if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) @@ -439,6 +412,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { if (CE->getOpcode() == Instruction::IntToPtr) return unknown(); // clueless + if (CE->getOpcode() == Instruction::GetElementPtr) + return visitGEPOperator(cast<GEPOperator>(*CE)); } DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V @@ -572,21 +547,9 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { return unknown(); } -SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PHI) { - if (PHI.getNumIncomingValues() == 0) - return unknown(); - - SizeOffsetType Ret = compute(PHI.getIncomingValue(0)); - if (!bothKnown(Ret)) - return unknown(); - - // Verify that all PHI incoming pointers have the same size and offset. - for (unsigned i = 1, e = PHI.getNumIncomingValues(); i != e; ++i) { - SizeOffsetType EdgeData = compute(PHI.getIncomingValue(i)); - if (!bothKnown(EdgeData) || EdgeData != Ret) - return unknown(); - } - return Ret; +SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { + // too complex to analyze statically. + return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 1faa046..0e8529a 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file implements an analysis that determines, for a given memory -// operation, what preceding memory operations it depends on. It builds on +// operation, what preceding memory operations it depends on. It builds on // alias analysis information, and tries to provide a lazy, caching interface to // a common kind of alias information query. // @@ -47,12 +47,10 @@ STATISTIC(NumCacheCompleteNonLocalPtr, "Number of block queries that were completely cached"); // Limit for the number of instructions to scan in a block. -// FIXME: Figure out what a sane value is for this. -// (500 is relatively insane.) -static const int BlockScanLimit = 500; +static const int BlockScanLimit = 100; char MemoryDependenceAnalysis::ID = 0; - + // Register this pass... INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep", "Memory Dependence Analysis", false, true) @@ -99,7 +97,7 @@ bool MemoryDependenceAnalysis::runOnFunction(Function &) { /// RemoveFromReverseMap - This is a helper function that removes Val from /// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry. template <typename KeyTy> -static void RemoveFromReverseMap(DenseMap<Instruction*, +static void RemoveFromReverseMap(DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> > &ReverseMap, Instruction *Inst, KeyTy Val) { typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator @@ -123,7 +121,8 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst, if (LI->isUnordered()) { Loc = AA->getLocation(LI); return AliasAnalysis::Ref; - } else if (LI->getOrdering() == Monotonic) { + } + if (LI->getOrdering() == Monotonic) { Loc = AA->getLocation(LI); return AliasAnalysis::ModRef; } @@ -135,7 +134,8 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst, if (SI->isUnordered()) { Loc = AA->getLocation(SI); return AliasAnalysis::Mod; - } else if (SI->getOrdering() == Monotonic) { + } + if (SI->getOrdering() == Monotonic) { Loc = AA->getLocation(SI); return AliasAnalysis::ModRef; } @@ -196,13 +196,13 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, // Walk backwards through the block, looking for dependencies while (ScanIt != BB->begin()) { // Limit the amount of scanning we do so we don't end up with quadratic - // running time on extreme testcases. + // running time on extreme testcases. --Limit; if (!Limit) return MemDepResult::getUnknown(); Instruction *Inst = --ScanIt; - + // If this inst is a memory op, get the pointer it accessed AliasAnalysis::Location Loc; AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA); @@ -251,7 +251,7 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, /// /// MemLocBase, MemLocOffset are lazily computed here the first time the /// base/offs of memloc is needed. -static bool +static bool isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase, int64_t &MemLocOffs, @@ -289,25 +289,25 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, if (LI->getParent()->getParent()->getAttributes(). hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread)) return 0; - + // Get the base of this load. int64_t LIOffs = 0; - const Value *LIBase = + const Value *LIBase = GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD); - + // If the two pointers are not based on the same pointer, we can't tell that // they are related. if (LIBase != MemLocBase) return 0; - + // Okay, the two values are based on the same pointer, but returned as // no-alias. This happens when we have things like two byte loads at "P+1" // and "P+3". Check to see if increasing the size of the "LI" load up to its // alignment (or the largest native integer type) will allow us to load all // the bits required by MemLoc. - + // If MemLoc is before LI, then no widening of LI will help us out. if (MemLocOffs < LIOffs) return 0; - + // Get the alignment of the load in bytes. We assume that it is safe to load // any legal integer up to this size without a problem. For example, if we're // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can @@ -316,15 +316,15 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned LoadAlign = LI->getAlignment(); int64_t MemLocEnd = MemLocOffs+MemLocSize; - + // If no amount of rounding up will let MemLoc fit into LI, then bail out. if (LIOffs+LoadAlign < MemLocEnd) return 0; - + // This is the size of the load to try. Start with the next larger power of // two. unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U; NewLoadByteSize = NextPowerOf2(NewLoadByteSize); - + while (1) { // If this load size is bigger than our known alignment or would not fit // into a native integer register, then we fail. @@ -343,7 +343,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, // If a load of this width would include all of MemLoc, then we succeed. if (LIOffs+NewLoadByteSize >= MemLocEnd) return NewLoadByteSize; - + NewLoadByteSize <<= 1; } } @@ -355,7 +355,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, /// instruction as well; this function may take advantage of the metadata /// annotated to the query instruction to refine the result. MemDepResult MemoryDependenceAnalysis:: -getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, +getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst) { @@ -382,7 +382,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { // Debug intrinsics don't (and can't) cause dependences. if (isa<DbgInfoIntrinsic>(II)) continue; - + // If we reach a lifetime begin or end marker, then the query ends here // because the value is undefined. if (II->getIntrinsicID() == Intrinsic::lifetime_start) { @@ -406,10 +406,10 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, return MemDepResult::getClobber(LI); AliasAnalysis::Location LoadLoc = AA->getLocation(LI); - + // If we found a pointer, check if it could be the same as our pointer. AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc); - + if (isLoad) { if (R == AliasAnalysis::NoAlias) { // If this is an over-aligned integer load (for example, @@ -423,10 +423,10 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase, MemLocOffset, LI, TD)) return MemDepResult::getClobber(Inst); - + continue; } - + // Must aliased loads are defs of each other. if (R == AliasAnalysis::MustAlias) return MemDepResult::getDef(Inst); @@ -441,7 +441,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, if (R == AliasAnalysis::PartialAlias) return MemDepResult::getClobber(Inst); #endif - + // Random may-alias loads don't depend on each other without a // dependence. continue; @@ -458,7 +458,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, // Stores depend on may/must aliased loads. return MemDepResult::getDef(Inst); } - + if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { // Atomic stores have complications involved. // FIXME: This is overly conservative. @@ -474,10 +474,10 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, // Ok, this store might clobber the query pointer. Check to see if it is // a must alias: in this case, we want to return this as a def. AliasAnalysis::Location StoreLoc = AA->getLocation(SI); - + // If we found a pointer, check if it could be the same as our pointer. AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc); - + if (R == AliasAnalysis::NoAlias) continue; if (R == AliasAnalysis::MustAlias) @@ -498,7 +498,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo(); if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) { const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD); - + if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) return MemDepResult::getDef(Inst); // Be conservative if the accessed pointer may alias the allocation. @@ -532,7 +532,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, return MemDepResult::getClobber(Inst); } } - + // No dependence found. If this is the entry block of the function, it is // unknown, otherwise it is non-local. if (BB != &BB->getParent()->getEntryBlock()) @@ -544,25 +544,25 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, /// depends. MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { Instruction *ScanPos = QueryInst; - + // Check for a cached result MemDepResult &LocalCache = LocalDeps[QueryInst]; - + // If the cached entry is non-dirty, just return it. Note that this depends // on MemDepResult's default constructing to 'dirty'. if (!LocalCache.isDirty()) return LocalCache; - + // Otherwise, if we have a dirty entry, we know we can start the scan at that // instruction, which may save us some work. if (Instruction *Inst = LocalCache.getInst()) { ScanPos = Inst; - + RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); } - + BasicBlock *QueryParent = QueryInst->getParent(); - + // Do the scan. if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { // No dependence found. If this is the entry block of the function, it is @@ -591,11 +591,11 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { // Non-memory instruction. LocalCache = MemDepResult::getUnknown(); } - + // Remember the result! if (Instruction *I = LocalCache.getInst()) ReverseLocalDeps[I].insert(QueryInst); - + return LocalCache; } @@ -636,7 +636,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { /// the uncached case, this starts out as the set of predecessors we care /// about. SmallVector<BasicBlock*, 32> DirtyBlocks; - + if (!Cache.empty()) { // Okay, we have a cache entry. If we know it is not dirty, just return it // with no computation. @@ -644,17 +644,17 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { ++NumCacheNonLocal; return Cache; } - + // If we already have a partially computed set of results, scan them to // determine what is dirty, seeding our initial DirtyBlocks worklist. for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); I != E; ++I) if (I->getResult().isDirty()) DirtyBlocks.push_back(I->getBB()); - + // Sort the cache so that we can do fast binary search lookups below. std::sort(Cache.begin(), Cache.end()); - + ++NumCacheDirtyNonLocal; //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " // << Cache.size() << " cached: " << *QueryInst; @@ -665,45 +665,45 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { DirtyBlocks.push_back(*PI); ++NumUncacheNonLocal; } - + // isReadonlyCall - If this is a read-only call, we can be more aggressive. bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); SmallPtrSet<BasicBlock*, 64> Visited; - + unsigned NumSortedEntries = Cache.size(); DEBUG(AssertSorted(Cache)); - + // Iterate while we still have blocks to update. while (!DirtyBlocks.empty()) { BasicBlock *DirtyBB = DirtyBlocks.back(); DirtyBlocks.pop_back(); - + // Already processed this block? if (!Visited.insert(DirtyBB)) continue; - + // Do a binary search to see if we already have an entry for this block in // the cache set. If so, find it. DEBUG(AssertSorted(Cache, NumSortedEntries)); - NonLocalDepInfo::iterator Entry = + NonLocalDepInfo::iterator Entry = std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, NonLocalDepEntry(DirtyBB)); if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB) --Entry; - + NonLocalDepEntry *ExistingResult = 0; - if (Entry != Cache.begin()+NumSortedEntries && + if (Entry != Cache.begin()+NumSortedEntries && Entry->getBB() == DirtyBB) { // If we already have an entry, and if it isn't already dirty, the block // is done. if (!Entry->getResult().isDirty()) continue; - + // Otherwise, remember this slot so we can update the value. ExistingResult = &*Entry; } - + // If the dirty entry has a pointer, start scanning from it so we don't have // to rescan the entire block. BasicBlock::iterator ScanPos = DirtyBB->end(); @@ -715,10 +715,10 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { QueryCS.getInstruction()); } } - + // Find out if this block has a local dependency for QueryInst. MemDepResult Dep; - + if (ScanPos != DirtyBB->begin()) { Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB); } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { @@ -728,14 +728,14 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { } else { Dep = MemDepResult::getNonFuncLocal(); } - + // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. if (ExistingResult) ExistingResult->setResult(Dep); else Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); - + // If the block has a dependency (i.e. it isn't completely transparent to // the value), remember the association! if (!Dep.isNonLocal()) { @@ -744,14 +744,14 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { if (Instruction *Inst = Dep.getInst()) ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); } else { - + // If the block *is* completely transparent to the load, we need to check // the predecessors of this block. Add them to our worklist. for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI) DirtyBlocks.push_back(*PI); } } - + return Cache; } @@ -769,9 +769,9 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad, assert(Loc.Ptr->getType()->isPointerTy() && "Can't get pointer deps of a non-pointer!"); Result.clear(); - + PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD); - + // This is the set of blocks we've inspected, and the pointer we consider in // each block. Because of critical edges, we currently bail out if querying // a block with multiple different pointers. This can happen during PHI @@ -794,7 +794,7 @@ MemDepResult MemoryDependenceAnalysis:: GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, bool isLoad, BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { - + // Do a binary search to see if we already have an entry for this block in // the cache set. If so, find it. NonLocalDepInfo::iterator Entry = @@ -802,18 +802,18 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, NonLocalDepEntry(BB)); if (Entry != Cache->begin() && (Entry-1)->getBB() == BB) --Entry; - + NonLocalDepEntry *ExistingResult = 0; if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB) ExistingResult = &*Entry; - + // If we have a cached entry, and it is non-dirty, use it as the value for // this dependency. if (ExistingResult && !ExistingResult->getResult().isDirty()) { ++NumCacheNonLocalPtr; return ExistingResult->getResult(); - } - + } + // Otherwise, we have to scan for the value. If we have a dirty cache // entry, start scanning from its position, otherwise we scan from the end // of the block. @@ -823,30 +823,30 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, "Instruction invalidated?"); ++NumCacheDirtyNonLocalPtr; ScanPos = ExistingResult->getResult().getInst(); - + // Eliminating the dirty entry from 'Cache', so update the reverse info. ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey); } else { ++NumUncacheNonLocalPtr; } - + // Scan the block for the dependency. MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB); - + // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. if (ExistingResult) ExistingResult->setResult(Dep); else Cache->push_back(NonLocalDepEntry(BB, Dep)); - + // If the block has a dependency (i.e. it isn't completely transparent to // the value), remember the reverse association because we just added it // to Cache! if (!Dep.isDef() && !Dep.isClobber()) return Dep; - + // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently // update MemDep when we remove instructions. Instruction *Inst = Dep.getInst(); @@ -859,7 +859,7 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, /// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain /// number of elements in the array that are already properly ordered. This is /// optimized for the case when only a few entries are added. -static void +static void SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, unsigned NumSortedEntries) { switch (Cache.size() - NumSortedEntries) { @@ -911,7 +911,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, SmallVectorImpl<NonLocalDepResult> &Result, DenseMap<BasicBlock*, Value*> &Visited, bool SkipFirstBlock) { - + // Look up the cached info for Pointer. ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); @@ -925,7 +925,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // Get the NLPI for CacheKey, inserting one into the map if it doesn't // already have one. - std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = + std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); NonLocalPointerInfo *CacheInfo = &Pair.first->second; @@ -987,14 +987,14 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB()); if (VI == Visited.end() || VI->second == Pointer.getAddr()) continue; - + // We have a pointer mismatch in a block. Just return clobber, saying // that something was clobbered in this result. We could also do a // non-fully cached query, but there is little point in doing this. return true; } } - + Value *Addr = Pointer.getAddr(); for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); I != E; ++I) { @@ -1005,7 +1005,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, ++NumCacheCompleteNonLocalPtr; return false; } - + // Otherwise, either this is a new block, a block with an invalid cache // pointer or one that we're about to invalidate by putting more info into it // than its valid cache info. If empty, the result will be valid cache info, @@ -1014,10 +1014,10 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); else CacheInfo->Pair = BBSkipFirstBlockPair(); - + SmallVector<BasicBlock*, 32> Worklist; Worklist.push_back(StartBB); - + // PredList used inside loop. SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList; @@ -1028,10 +1028,10 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // revisit blocks after we insert info for them. unsigned NumSortedEntries = Cache->size(); DEBUG(AssertSorted(*Cache)); - + while (!Worklist.empty()) { BasicBlock *BB = Worklist.pop_back_val(); - + // Skip the first block if we have it. if (!SkipFirstBlock) { // Analyze the dependency of *Pointer in FromBB. See if we already have @@ -1043,14 +1043,14 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, DEBUG(AssertSorted(*Cache, NumSortedEntries)); MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache, NumSortedEntries); - + // If we got a Def or Clobber, add this to the list of results. if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) { Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); continue; } } - + // If 'Pointer' is an instruction defined in this block, then we need to do // phi translation to change it into a value live in the predecessor block. // If not, we just add the predecessors to the worklist and scan them with @@ -1067,7 +1067,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, NewBlocks.push_back(*PI); continue; } - + // If we have seen this block before, but it was with a different // pointer then we have a phi translation failure and we have to treat // this as a clobber. @@ -1082,12 +1082,12 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, Worklist.append(NewBlocks.begin(), NewBlocks.end()); continue; } - + // We do need to do phi translation, if we know ahead of time we can't phi // translate this value, don't even try. if (!Pointer.IsPotentiallyPHITranslatable()) goto PredTranslationFailure; - + // We may have added values to the cache list before this PHI translation. // If so, we haven't done anything to ensure that the cache remains sorted. // Sort it now (if needed) so that recursive invocations of @@ -1110,7 +1110,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, PredPointer.PHITranslateValue(BB, Pred, 0); Value *PredPtrVal = PredPointer.getAddr(); - + // Check to see if we have already visited this pred block with another // pointer. If so, we can't do this lookup. This failure can occur // with PHI translation when a critical edge exists and the PHI node in @@ -1127,14 +1127,14 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // the analysis and can ignore it. if (InsertRes.first->second == PredPtrVal) continue; - + // Otherwise, the block was previously analyzed with a different // pointer. We can't represent the result of this case, so we just // treat this as a phi translation failure. // Make sure to clean up the Visited map before continuing on to // PredTranslationFailure. - for (unsigned i = 0; i < PredList.size(); i++) + for (unsigned i = 0, n = PredList.size(); i < n; ++i) Visited.erase(PredList[i].first); goto PredTranslationFailure; @@ -1143,10 +1143,10 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // Actually process results here; this need to be a separate loop to avoid // calling getNonLocalPointerDepFromBB for blocks we don't want to return - // any results for. (getNonLocalPointerDepFromBB will modify our + // any results for. (getNonLocalPointerDepFromBB will modify our // datastructures in ways the code after the PredTranslationFailure label // doesn't expect.) - for (unsigned i = 0; i < PredList.size(); i++) { + for (unsigned i = 0, n = PredList.size(); i < n; ++i) { BasicBlock *Pred = PredList[i].first; PHITransAddr &PredPointer = PredList[i].second; Value *PredPtrVal = PredPointer.getAddr(); @@ -1186,12 +1186,12 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, continue; } } - + // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. CacheInfo = &NonLocalPointerDeps[CacheKey]; Cache = &CacheInfo->NonLocalDeps; NumSortedEntries = Cache->size(); - + // Since we did phi translation, the "Cache" set won't contain all of the // results for the query. This is ok (we can still use it to accelerate // specific block queries) but we can't do the fastpath "return all @@ -1204,20 +1204,20 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // The following code is "failure"; we can't produce a sane translation // for the given block. It assumes that we haven't modified any of // our datastructures while processing the current block. - + if (Cache == 0) { // Refresh the CacheInfo/Cache pointer if it got invalidated. CacheInfo = &NonLocalPointerDeps[CacheKey]; Cache = &CacheInfo->NonLocalDeps; NumSortedEntries = Cache->size(); } - + // Since we failed phi translation, the "Cache" set won't contain all of the // results for the query. This is ok (we can still use it to accelerate // specific block queries) but we can't do the fastpath "return all // results from the set". Clear out the indicator for this. CacheInfo->Pair = BBSkipFirstBlockPair(); - + // If *nothing* works, mark the pointer as unknown. // // If this is the magic first block, return this as a clobber of the whole @@ -1225,12 +1225,12 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // we have to bail out. if (SkipFirstBlock) return true; - + for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) { assert(I != Cache->rend() && "Didn't find current block??"); if (I->getBB() != BB) continue; - + assert(I->getResult().isNonLocal() && "Should only be here with transparent block"); I->setResult(MemDepResult::getUnknown()); @@ -1250,23 +1250,23 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, /// CachedNonLocalPointerInfo, remove it. void MemoryDependenceAnalysis:: RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { - CachedNonLocalPointerInfo::iterator It = + CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); if (It == NonLocalPointerDeps.end()) return; - + // Remove all of the entries in the BB->val map. This involves removing // instructions from the reverse map. NonLocalDepInfo &PInfo = It->second.NonLocalDeps; - + for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { Instruction *Target = PInfo[i].getResult().getInst(); if (Target == 0) continue; // Ignore non-local dep results. assert(Target->getParent() == PInfo[i].getBB()); - + // Eliminating the dirty entry from 'Cache', so update the reverse info. RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); } - + // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). NonLocalPointerDeps.erase(It); } @@ -1321,20 +1321,20 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Remove this local dependency info. LocalDeps.erase(LocalDepEntry); } - + // If we have any cached pointer dependencies on this instruction, remove // them. If the instruction has non-pointer type, then it can't be a pointer // base. - + // Remove it from both the load info and the store info. The instruction // can't be in either of these maps if it is non-pointer. if (RemInst->getType()->isPointerTy()) { RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); } - + // Loop over all of the things that depend on the instruction we're removing. - // + // SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; // If we find RemInst as a clobber or Def in any of the maps for other values, @@ -1346,29 +1346,29 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { MemDepResult NewDirtyVal; if (!RemInst->isTerminator()) NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst)); - + ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); if (ReverseDepIt != ReverseLocalDeps.end()) { SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; // RemInst can't be the terminator if it has local stuff depending on it. assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && "Nothing can locally depend on a terminator"); - + for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), E = ReverseDeps.end(); I != E; ++I) { Instruction *InstDependingOnRemInst = *I; assert(InstDependingOnRemInst != RemInst && "Already removed our local dep info"); - + LocalDeps[InstDependingOnRemInst] = NewDirtyVal; - + // Make sure to remember that new things depend on NewDepInst. assert(NewDirtyVal.getInst() && "There is no way something else can have " "a local dep on this if it is a terminator!"); - ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), + ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); } - + ReverseLocalDeps.erase(ReverseDepIt); // Add new reverse deps after scanning the set, to avoid invalidating the @@ -1379,25 +1379,25 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { ReverseDepsToAdd.pop_back(); } } - + ReverseDepIt = ReverseNonLocalDeps.find(RemInst); if (ReverseDepIt != ReverseNonLocalDeps.end()) { SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second; for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end(); I != E; ++I) { assert(*I != RemInst && "Already removed NonLocalDep info for RemInst"); - + PerInstNLInfo &INLD = NonLocalDeps[*I]; // The information is now dirty! INLD.second = true; - - for (NonLocalDepInfo::iterator DI = INLD.first.begin(), + + for (NonLocalDepInfo::iterator DI = INLD.first.begin(), DE = INLD.first.end(); DI != DE; ++DI) { if (DI->getResult().getInst() != RemInst) continue; - + // Convert to a dirty entry for the subsequent instruction. DI->setResult(NewDirtyVal); - + if (Instruction *NextI = NewDirtyVal.getInst()) ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); } @@ -1412,7 +1412,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { ReverseDepsToAdd.pop_back(); } } - + // If the instruction is in ReverseNonLocalPtrDeps then it appears as a // value in the NonLocalPointerDeps info. ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = @@ -1420,45 +1420,45 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second; SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd; - + for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(), E = Set.end(); I != E; ++I) { ValueIsLoadPair P = *I; assert(P.getPointer() != RemInst && "Already removed NonLocalPointerDeps info for RemInst"); - + NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; - + // The cache is not valid for any specific block anymore. NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); - + // Update any entries for RemInst to use the instruction after it. for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end(); DI != DE; ++DI) { if (DI->getResult().getInst() != RemInst) continue; - + // Convert to a dirty entry for the subsequent instruction. DI->setResult(NewDirtyVal); - + if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); } - + // Re-sort the NonLocalDepInfo. Changing the dirty entry to its // subsequent value may invalidate the sortedness. std::sort(NLPDI.begin(), NLPDI.end()); } - + ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); - + while (!ReversePtrDepsToAdd.empty()) { ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first] .insert(ReversePtrDepsToAdd.back().second); ReversePtrDepsToAdd.pop_back(); } } - - + + assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); AA->deleteValue(RemInst); DEBUG(verifyRemoved(RemInst)); @@ -1472,7 +1472,7 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { assert(I->second.getInst() != D && "Inst occurs in data structures"); } - + for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(), E = NonLocalPointerDeps.end(); I != E; ++I) { assert(I->first.getPointer() != D && "Inst occurs in NLPD map key"); @@ -1481,7 +1481,7 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { II != E; ++II) assert(II->getResult().getInst() != D && "Inst occurs as NLPD value"); } - + for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), E = NonLocalDeps.end(); I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); @@ -1490,7 +1490,7 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { EE = INLD.first.end(); II != EE; ++II) assert(II->getResult().getInst() != D && "Inst occurs in data structures"); } - + for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), E = ReverseLocalDeps.end(); I != E; ++I) { assert(I->first != D && "Inst occurs in data structures"); @@ -1498,7 +1498,7 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { EE = I->second.end(); II != EE; ++II) assert(*II != D && "Inst occurs in data structures"); } - + for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), E = ReverseNonLocalDeps.end(); I != E; ++I) { @@ -1507,17 +1507,17 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { EE = I->second.end(); II != EE; ++II) assert(*II != D && "Inst occurs in data structures"); } - + for (ReverseNonLocalPtrDepTy::const_iterator I = ReverseNonLocalPtrDeps.begin(), E = ReverseNonLocalPtrDeps.end(); I != E; ++I) { assert(I->first != D && "Inst occurs in rev NLPD map"); - + for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(), E = I->second.end(); II != E; ++II) assert(*II != ValueIsLoadPair(D, false) && *II != ValueIsLoadPair(D, true) && "Inst occurs in ReverseNonLocalPtrDeps map"); } - + } diff --git a/lib/Analysis/PathProfileVerifier.cpp b/lib/Analysis/PathProfileVerifier.cpp index 745d8c6..48d7d05 100644 --- a/lib/Analysis/PathProfileVerifier.cpp +++ b/lib/Analysis/PathProfileVerifier.cpp @@ -84,7 +84,7 @@ bool PathProfileVerifier::runOnModule (Module &M) { for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { if (F->isDeclaration()) continue; - arrayMap[0][F->begin()][0] = i++; + arrayMap[(BasicBlock*)0][F->begin()][0] = i++; for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { TerminatorInst *TI = BB->getTerminator(); @@ -125,7 +125,7 @@ bool PathProfileVerifier::runOnModule (Module &M) { << currentPath->getCount() << "\n"); // setup the entry edge (normally path profiling doesn't care about this) if (currentPath->getFirstBlockInPath() == &F->getEntryBlock()) - edgeArray[arrayMap[0][currentPath->getFirstBlockInPath()][0]] + edgeArray[arrayMap[(BasicBlock*)0][currentPath->getFirstBlockInPath()][0]] += currentPath->getCount(); for( ProfilePathEdgeIterator nextEdge = pev->begin(), diff --git a/lib/Analysis/ProfileInfo.cpp b/lib/Analysis/ProfileInfo.cpp index 2daa7d4..9626a48 100644 --- a/lib/Analysis/ProfileInfo.cpp +++ b/lib/Analysis/ProfileInfo.cpp @@ -249,7 +249,7 @@ const BasicBlock *ProfileInfoT<Function,BasicBlock>:: succ_const_iterator Succ = succ_begin(BB), End = succ_end(BB); if (Succ == End) { - P[0] = BB; + P[(const BasicBlock*)0] = BB; if (Mode & GetPathToExit) { hasFoundPath = true; BB = 0; @@ -752,10 +752,10 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) { Succ != End; ++Succ) { Path P; GetPath(*Succ, 0, P, GetPathToExit); - if (Dest && Dest != P[0]) { + if (Dest && Dest != P[(const BasicBlock*)0]) { AllEdgesHaveSameReturn = false; } - Dest = P[0]; + Dest = P[(const BasicBlock*)0]; } if (AllEdgesHaveSameReturn) { if(EstimateMissingEdges(BB)) { @@ -927,7 +927,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) { Path P; const BasicBlock *Dest = GetPath(BB, 0, P, GetPathToExit | GetPathWithNewEdges); - Dest = P[0]; + Dest = P[(const BasicBlock*)0]; if (!Dest) continue; if (getEdgeWeight(getEdge(Dest,0)) == MissingValue) { diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp index fad5074..81ef650 100644 --- a/lib/Analysis/RegionInfo.cpp +++ b/lib/Analysis/RegionInfo.cpp @@ -79,6 +79,38 @@ void Region::replaceExit(BasicBlock *BB) { exit = BB; } +void Region::replaceEntryRecursive(BasicBlock *NewEntry) { + std::vector<Region *> RegionQueue; + BasicBlock *OldEntry = getEntry(); + + RegionQueue.push_back(this); + while (!RegionQueue.empty()) { + Region *R = RegionQueue.back(); + RegionQueue.pop_back(); + + R->replaceEntry(NewEntry); + for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI) + if ((*RI)->getEntry() == OldEntry) + RegionQueue.push_back(*RI); + } +} + +void Region::replaceExitRecursive(BasicBlock *NewExit) { + std::vector<Region *> RegionQueue; + BasicBlock *OldExit = getExit(); + + RegionQueue.push_back(this); + while (!RegionQueue.empty()) { + Region *R = RegionQueue.back(); + RegionQueue.pop_back(); + + R->replaceExit(NewExit); + for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI) + if ((*RI)->getExit() == OldExit) + RegionQueue.push_back(*RI); + } +} + bool Region::contains(const BasicBlock *B) const { BasicBlock *BB = const_cast<BasicBlock*>(B); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 07d8329..6ea915f 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -4230,6 +4230,25 @@ ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { return Max ? Max : SE->getCouldNotCompute(); } +bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, + ScalarEvolution *SE) const { + if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S)) + return true; + + if (!ExitNotTaken.ExitingBlock) + return false; + + for (const ExitNotTakenInfo *ENT = &ExitNotTaken; + ENT != 0; ENT = ENT->getNextExit()) { + + if (ENT->ExactNotTaken != SE->getCouldNotCompute() + && SE->hasOperand(ENT->ExactNotTaken, S)) { + return true; + } + } + return false; +} + /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each /// computable exit into a persistent ExitNotTakenInfo array. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( @@ -6940,6 +6959,17 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { BlockDispositions.erase(S); UnsignedRanges.erase(S); SignedRanges.erase(S); + + for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I = + BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) { + BackedgeTakenInfo &BEInfo = I->second; + if (BEInfo.hasOperand(S, this)) { + BEInfo.clear(); + BackedgeTakenCounts.erase(I++); + } + else + ++I; + } } typedef DenseMap<const Loop *, std::string> VerifyMap; diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp index 976cd87..64f8e96 100644 --- a/lib/Analysis/TargetTransformInfo.cpp +++ b/lib/Analysis/TargetTransformInfo.cpp @@ -150,8 +150,10 @@ unsigned TargetTransformInfo::getMaximumUnrollFactor() const { } unsigned TargetTransformInfo::getArithmeticInstrCost(unsigned Opcode, - Type *Ty) const { - return PrevTTI->getArithmeticInstrCost(Opcode, Ty); + Type *Ty, + OperandValueKind Op1Info, + OperandValueKind Op2Info) const { + return PrevTTI->getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); } unsigned TargetTransformInfo::getShuffleCost(ShuffleKind Kind, Type *Tp, @@ -495,7 +497,8 @@ struct NoTTI : ImmutablePass, TargetTransformInfo { return 1; } - unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { + unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, + OperandValueKind) const { return 1; } diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp index 68e43b2..bbf3c3a 100644 --- a/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -71,6 +71,7 @@ using namespace llvm; // achieved by stripping the !tbaa tags from IR, but this option is sometimes // more convenient. static cl::opt<bool> EnableTBAA("enable-tbaa", cl::init(true)); +static cl::opt<bool> EnableStructPathTBAA("struct-path-tbaa", cl::init(false)); namespace { /// TBAANode - This is a simple wrapper around an MDNode which provides a @@ -109,6 +110,97 @@ namespace { return CI->getValue()[0]; } }; + + /// This is a simple wrapper around an MDNode which provides a + /// higher-level interface by hiding the details of how alias analysis + /// information is encoded in its operands. + class TBAAStructTagNode { + /// This node should be created with createTBAAStructTagNode. + const MDNode *Node; + + public: + TBAAStructTagNode() : Node(0) {} + explicit TBAAStructTagNode(const MDNode *N) : Node(N) {} + + /// Get the MDNode for this TBAAStructTagNode. + const MDNode *getNode() const { return Node; } + + const MDNode *getBaseType() const { + return dyn_cast_or_null<MDNode>(Node->getOperand(0)); + } + const MDNode *getAccessType() const { + return dyn_cast_or_null<MDNode>(Node->getOperand(1)); + } + uint64_t getOffset() const { + return cast<ConstantInt>(Node->getOperand(2))->getZExtValue(); + } + /// TypeIsImmutable - Test if this TBAAStructTagNode represents a type for + /// objects which are not modified (by any means) in the context where this + /// AliasAnalysis is relevant. + bool TypeIsImmutable() const { + if (Node->getNumOperands() < 4) + return false; + ConstantInt *CI = dyn_cast<ConstantInt>(Node->getOperand(3)); + if (!CI) + return false; + return CI->getValue()[0]; + } + }; + + /// This is a simple wrapper around an MDNode which provides a + /// higher-level interface by hiding the details of how alias analysis + /// information is encoded in its operands. + class TBAAStructTypeNode { + /// This node should be created with createTBAAStructTypeNode. + const MDNode *Node; + + public: + TBAAStructTypeNode() : Node(0) {} + explicit TBAAStructTypeNode(const MDNode *N) : Node(N) {} + + /// Get the MDNode for this TBAAStructTypeNode. + const MDNode *getNode() const { return Node; } + + /// Get this TBAAStructTypeNode's field in the type DAG with + /// given offset. Update the offset to be relative to the field type. + TBAAStructTypeNode getParent(uint64_t &Offset) const { + // Parent can be omitted for the root node. + if (Node->getNumOperands() < 2) + return TBAAStructTypeNode(); + + // Special handling for a scalar type node. + if (Node->getNumOperands() <= 3) { + MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(1)); + if (!P) + return TBAAStructTypeNode(); + return TBAAStructTypeNode(P); + } + + // Assume the offsets are in order. We return the previous field if + // the current offset is bigger than the given offset. + unsigned TheIdx = 0; + for (unsigned Idx = 1; Idx < Node->getNumOperands(); Idx += 2) { + uint64_t Cur = cast<ConstantInt>(Node->getOperand(Idx + 1))-> + getZExtValue(); + if (Cur > Offset) { + assert(Idx >= 3 && + "TBAAStructTypeNode::getParent should have an offset match!"); + TheIdx = Idx - 2; + break; + } + } + // Move along the last field. + if (TheIdx == 0) + TheIdx = Node->getNumOperands() - 2; + uint64_t Cur = cast<ConstantInt>(Node->getOperand(TheIdx + 1))-> + getZExtValue(); + Offset -= Cur; + MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(TheIdx)); + if (!P) + return TBAAStructTypeNode(); + return TBAAStructTypeNode(P); + } + }; } namespace { @@ -137,6 +229,7 @@ namespace { } bool Aliases(const MDNode *A, const MDNode *B) const; + bool PathAliases(const MDNode *A, const MDNode *B) const; private: virtual void getAnalysisUsage(AnalysisUsage &AU) const; @@ -171,6 +264,9 @@ TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { bool TypeBasedAliasAnalysis::Aliases(const MDNode *A, const MDNode *B) const { + if (EnableStructPathTBAA) + return PathAliases(A, B); + // Keep track of the root node for A and B. TBAANode RootA, RootB; @@ -209,6 +305,67 @@ TypeBasedAliasAnalysis::Aliases(const MDNode *A, return false; } +/// Test whether the struct-path tag represented by A may alias the +/// struct-path tag represented by B. +bool +TypeBasedAliasAnalysis::PathAliases(const MDNode *A, + const MDNode *B) const { + // Keep track of the root node for A and B. + TBAAStructTypeNode RootA, RootB; + TBAAStructTagNode TagA(A), TagB(B); + + // TODO: We need to check if AccessType of TagA encloses AccessType of + // TagB to support aggregate AccessType. If yes, return true. + + // Start from the base type of A, follow the edge with the correct offset in + // the type DAG and adjust the offset until we reach the base type of B or + // until we reach the Root node. + // Compare the adjusted offset once we have the same base. + + // Climb the type DAG from base type of A to see if we reach base type of B. + const MDNode *BaseA = TagA.getBaseType(); + const MDNode *BaseB = TagB.getBaseType(); + uint64_t OffsetA = TagA.getOffset(), OffsetB = TagB.getOffset(); + for (TBAAStructTypeNode T(BaseA); ; ) { + if (T.getNode() == BaseB) + // Base type of A encloses base type of B, check if the offsets match. + return OffsetA == OffsetB; + + RootA = T; + // Follow the edge with the correct offset, OffsetA will be adjusted to + // be relative to the field type. + T = T.getParent(OffsetA); + if (!T.getNode()) + break; + } + + // Reset OffsetA and climb the type DAG from base type of B to see if we reach + // base type of A. + OffsetA = TagA.getOffset(); + for (TBAAStructTypeNode T(BaseB); ; ) { + if (T.getNode() == BaseA) + // Base type of B encloses base type of A, check if the offsets match. + return OffsetA == OffsetB; + + RootB = T; + // Follow the edge with the correct offset, OffsetB will be adjusted to + // be relative to the field type. + T = T.getParent(OffsetB); + if (!T.getNode()) + break; + } + + // Neither node is an ancestor of the other. + + // If they have different roots, they're part of different potentially + // unrelated type systems, so we must be conservative. + if (RootA.getNode() != RootB.getNode()) + return true; + + // If they have the same root, then we've proved there's no alias. + return false; +} + AliasAnalysis::AliasResult TypeBasedAliasAnalysis::alias(const Location &LocA, const Location &LocB) { @@ -240,7 +397,8 @@ bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Location &Loc, // If this is an "immutable" type, we can assume the pointer is pointing // to constant memory. - if (TBAANode(M).TypeIsImmutable()) + if ((!EnableStructPathTBAA && TBAANode(M).TypeIsImmutable()) || + (EnableStructPathTBAA && TBAAStructTagNode(M).TypeIsImmutable())) return true; return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); @@ -256,7 +414,8 @@ TypeBasedAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { // If this is an "immutable" type, we can assume the call doesn't write // to memory. if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) - if (TBAANode(M).TypeIsImmutable()) + if ((!EnableStructPathTBAA && TBAANode(M).TypeIsImmutable()) || + (EnableStructPathTBAA && TBAAStructTagNode(M).TypeIsImmutable())) Min = OnlyReadsMemory; return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); @@ -298,3 +457,55 @@ TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, return AliasAnalysis::getModRefInfo(CS1, CS2); } + +MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) { + if (!A || !B) + return NULL; + + if (A == B) + return A; + + // For struct-path aware TBAA, we use the access type of the tag. + if (EnableStructPathTBAA) { + A = cast_or_null<MDNode>(A->getOperand(1)); + if (!A) return 0; + B = cast_or_null<MDNode>(B->getOperand(1)); + if (!B) return 0; + } + + SmallVector<MDNode *, 4> PathA; + MDNode *T = A; + while (T) { + PathA.push_back(T); + T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; + } + + SmallVector<MDNode *, 4> PathB; + T = B; + while (T) { + PathB.push_back(T); + T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; + } + + int IA = PathA.size() - 1; + int IB = PathB.size() - 1; + + MDNode *Ret = 0; + while (IA >= 0 && IB >=0) { + if (PathA[IA] == PathB[IB]) + Ret = PathA[IA]; + else + break; + --IA; + --IB; + } + if (!EnableStructPathTBAA) + return Ret; + + if (!Ret) + return 0; + // We need to convert from a type node to a tag node. + Type *Int64 = IntegerType::get(A->getContext(), 64); + Value *Ops[3] = { Ret, Ret, ConstantInt::get(Int64, 0) }; + return MDNode::get(A->getContext(), Ops); +} diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index f46383b..e7a9f2a 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -582,6 +582,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(optsize); KEYWORD(readnone); KEYWORD(readonly); + KEYWORD(returned); KEYWORD(returns_twice); KEYWORD(signext); KEYWORD(sret); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index c8da1f8..998bca5 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -878,8 +878,9 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B, // Target-independent attributes: case lltok::kw_align: { - // As a hack, we allow "align 2" on functions as a synonym for "alignstack - // 2". + // As a hack, we allow function alignment to be initially parsed as an + // attribute on a function declaration/definition or added to an attribute + // group and later moved to the alignment field. unsigned Alignment; if (inAttrGrp) { Lex.Lex(); @@ -943,6 +944,7 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B, case lltok::kw_nest: case lltok::kw_noalias: case lltok::kw_nocapture: + case lltok::kw_returned: case lltok::kw_sret: HaveError |= Error(Lex.getLoc(), @@ -1155,21 +1157,35 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) { case lltok::kw_nest: B.addAttribute(Attribute::Nest); break; case lltok::kw_noalias: B.addAttribute(Attribute::NoAlias); break; case lltok::kw_nocapture: B.addAttribute(Attribute::NoCapture); break; + case lltok::kw_returned: B.addAttribute(Attribute::Returned); break; case lltok::kw_signext: B.addAttribute(Attribute::SExt); break; case lltok::kw_sret: B.addAttribute(Attribute::StructRet); break; case lltok::kw_zeroext: B.addAttribute(Attribute::ZExt); break; - case lltok::kw_alignstack: case lltok::kw_nounwind: - case lltok::kw_alwaysinline: case lltok::kw_optsize: - case lltok::kw_inlinehint: case lltok::kw_readnone: - case lltok::kw_minsize: case lltok::kw_readonly: - case lltok::kw_naked: case lltok::kw_returns_twice: - case lltok::kw_nobuiltin: case lltok::kw_sanitize_address: - case lltok::kw_noimplicitfloat: case lltok::kw_sanitize_memory: - case lltok::kw_noinline: case lltok::kw_sanitize_thread: - case lltok::kw_nonlazybind: case lltok::kw_ssp: - case lltok::kw_noredzone: case lltok::kw_sspreq: - case lltok::kw_noreturn: case lltok::kw_uwtable: + case lltok::kw_alignstack: + case lltok::kw_alwaysinline: + case lltok::kw_inlinehint: + case lltok::kw_minsize: + case lltok::kw_naked: + case lltok::kw_nobuiltin: + case lltok::kw_noduplicate: + case lltok::kw_noimplicitfloat: + case lltok::kw_noinline: + case lltok::kw_nonlazybind: + case lltok::kw_noredzone: + case lltok::kw_noreturn: + case lltok::kw_nounwind: + case lltok::kw_optsize: + case lltok::kw_readnone: + case lltok::kw_readonly: + case lltok::kw_returns_twice: + case lltok::kw_sanitize_address: + case lltok::kw_sanitize_memory: + case lltok::kw_sanitize_thread: + case lltok::kw_ssp: + case lltok::kw_sspreq: + case lltok::kw_sspstrong: + case lltok::kw_uwtable: HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute"); break; } @@ -1195,24 +1211,39 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) { case lltok::kw_zeroext: B.addAttribute(Attribute::ZExt); break; // Error handling. - case lltok::kw_sret: case lltok::kw_nocapture: - case lltok::kw_byval: case lltok::kw_nest: + case lltok::kw_align: + case lltok::kw_byval: + case lltok::kw_nest: + case lltok::kw_nocapture: + case lltok::kw_returned: + case lltok::kw_sret: HaveError |= Error(Lex.getLoc(), "invalid use of parameter-only attribute"); break; - case lltok::kw_align: case lltok::kw_noreturn: - case lltok::kw_alignstack: case lltok::kw_nounwind: - case lltok::kw_alwaysinline: case lltok::kw_optsize: - case lltok::kw_inlinehint: case lltok::kw_readnone: - case lltok::kw_minsize: case lltok::kw_readonly: - case lltok::kw_naked: case lltok::kw_returns_twice: - case lltok::kw_nobuiltin: case lltok::kw_sanitize_address: - case lltok::kw_noduplicate: case lltok::kw_sanitize_memory: - case lltok::kw_noimplicitfloat: case lltok::kw_sanitize_thread: - case lltok::kw_noinline: case lltok::kw_ssp: - case lltok::kw_nonlazybind: case lltok::kw_sspreq: - case lltok::kw_noredzone: case lltok::kw_sspstrong: - case lltok::kw_uwtable: + case lltok::kw_alignstack: + case lltok::kw_alwaysinline: + case lltok::kw_inlinehint: + case lltok::kw_minsize: + case lltok::kw_naked: + case lltok::kw_nobuiltin: + case lltok::kw_noduplicate: + case lltok::kw_noimplicitfloat: + case lltok::kw_noinline: + case lltok::kw_nonlazybind: + case lltok::kw_noredzone: + case lltok::kw_noreturn: + case lltok::kw_nounwind: + case lltok::kw_optsize: + case lltok::kw_readnone: + case lltok::kw_readonly: + case lltok::kw_returns_twice: + case lltok::kw_sanitize_address: + case lltok::kw_sanitize_memory: + case lltok::kw_sanitize_thread: + case lltok::kw_ssp: + case lltok::kw_sspreq: + case lltok::kw_sspstrong: + case lltok::kw_uwtable: HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute"); break; } @@ -4232,7 +4263,9 @@ int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { if (ParseTypeAndValue(Ptr, Loc, PFS)) return true; - if (!Ptr->getType()->getScalarType()->isPointerTy()) + Type *BaseType = Ptr->getType(); + PointerType *BasePointerType = dyn_cast<PointerType>(BaseType->getScalarType()); + if (!BasePointerType) return Error(Loc, "base of getelementptr must be a pointer"); SmallVector<Value*, 16> Indices; @@ -4257,7 +4290,10 @@ int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { Indices.push_back(Val); } - if (!GetElementPtrInst::getIndexedType(Ptr->getType(), Indices)) + if (!Indices.empty() && !BasePointerType->getElementType()->isSized()) + return Error(Loc, "base element of getelementptr must be sized"); + + if (!GetElementPtrInst::getIndexedType(BaseType, Indices)) return Error(Loc, "invalid getelementptr indices"); Inst = GetElementPtrInst::Create(Ptr, Indices); if (InBounds) diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index cd25ba3..3bf54fa 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -114,6 +114,7 @@ namespace lltok { kw_optsize, kw_readnone, kw_readonly, + kw_returned, kw_returns_twice, kw_signext, kw_ssp, diff --git a/lib/Bitcode/Reader/BitReader.cpp b/lib/Bitcode/Reader/BitReader.cpp index 5cd6c55..23630e5 100644 --- a/lib/Bitcode/Reader/BitReader.cpp +++ b/lib/Bitcode/Reader/BitReader.cpp @@ -10,6 +10,7 @@ #include "llvm-c/BitReader.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" #include <cstring> #include <string> diff --git a/lib/Bitcode/Reader/BitstreamReader.cpp b/lib/Bitcode/Reader/BitstreamReader.cpp index 942346b..9dafe2a 100644 --- a/lib/Bitcode/Reader/BitstreamReader.cpp +++ b/lib/Bitcode/Reader/BitstreamReader.cpp @@ -292,7 +292,7 @@ void BitstreamCursor::ReadAbbrevRecord() { Abbv->Add(BitCodeAbbrevOp(0)); continue; } - + Abbv->Add(BitCodeAbbrevOp(E, Data)); } else Abbv->Add(BitCodeAbbrevOp(E)); diff --git a/lib/Bitcode/Writer/BitWriter.cpp b/lib/Bitcode/Writer/BitWriter.cpp index 9f51c35..985208c 100644 --- a/lib/Bitcode/Writer/BitWriter.cpp +++ b/lib/Bitcode/Writer/BitWriter.cpp @@ -9,6 +9,7 @@ #include "llvm-c/BitWriter.h" #include "llvm/Bitcode/ReaderWriter.h" +#include "llvm/IR/Module.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp index 4f19dd0..8bac6da 100644 --- a/lib/Bitcode/Writer/ValueEnumerator.cpp +++ b/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -60,7 +60,7 @@ ValueEnumerator::ValueEnumerator(const Module *M) { I != E; ++I) EnumerateValue(I->getAliasee()); - // Insert constants and metadata that are named at module level into the slot + // Insert constants and metadata that are named at module level into the slot // pool so that the module symbol table can refer to them... EnumerateValueSymbolTable(M->getValueSymbolTable()); EnumerateNamedMetadata(M); diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index d1ea027..76ebe9a 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -1,6 +1,7 @@ # `Support' and `TableGen' libraries are added on the top-level CMakeLists.txt add_subdirectory(IR) +add_subdirectory(IRReader) add_subdirectory(CodeGen) add_subdirectory(Bitcode) add_subdirectory(Transforms) diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index dd7282c..4731af5 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -201,62 +201,161 @@ ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { } } +static bool isNoopBitcast(Type *T1, Type *T2, + const TargetLowering& TLI) { + return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) || + (isa<VectorType>(T1) && isa<VectorType>(T2) && + TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2))); +} -/// getNoopInput - If V is a noop (i.e., lowers to no machine code), look -/// through it (and any transitive noop operands to it) and return its input -/// value. This is used to determine if a tail call can be formed. -/// -static const Value *getNoopInput(const Value *V, const TargetLowering &TLI) { - // If V is not an instruction, it can't be looked through. - const Instruction *I = dyn_cast<Instruction>(V); - if (I == 0 || !I->hasOneUse() || I->getNumOperands() == 0) return V; - - Value *Op = I->getOperand(0); +/// sameNoopInput - Return true if V1 == V2, else if either V1 or V2 is a noop +/// (i.e., lowers to no machine code), look through it (and any transitive noop +/// operands to it) and check if it has the same noop input value. This is +/// used to determine if a tail call can be formed. +static bool sameNoopInput(const Value *V1, const Value *V2, + SmallVectorImpl<unsigned> &Els1, + SmallVectorImpl<unsigned> &Els2, + const TargetLowering &TLI) { + using std::swap; + bool swapParity = false; + bool equalEls = Els1 == Els2; + while (true) { + if ((equalEls && V1 == V2) || isa<UndefValue>(V1) || isa<UndefValue>(V2)) { + if (swapParity) + // Revert to original Els1 and Els2 to avoid confusing recursive calls + swap(Els1, Els2); + return true; + } - // Look through truly no-op truncates. - if (isa<TruncInst>(I) && - TLI.isTruncateFree(I->getOperand(0)->getType(), I->getType())) - return getNoopInput(I->getOperand(0), TLI); - - // Look through truly no-op bitcasts. - if (isa<BitCastInst>(I)) { - // No type change at all. - if (Op->getType() == I->getType()) - return getNoopInput(Op, TLI); + // Try to look through V1; if V1 is not an instruction, it can't be looked + // through. + const Instruction *I = dyn_cast<Instruction>(V1); + const Value *NoopInput = 0; + if (I != 0 && I->getNumOperands() > 0) { + Value *Op = I->getOperand(0); + if (isa<TruncInst>(I)) { + // Look through truly no-op truncates. + if (TLI.isTruncateFree(Op->getType(), I->getType())) + NoopInput = Op; + } else if (isa<BitCastInst>(I)) { + // Look through truly no-op bitcasts. + if (isNoopBitcast(Op->getType(), I->getType(), TLI)) + NoopInput = Op; + } else if (isa<GetElementPtrInst>(I)) { + // Look through getelementptr + if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) + NoopInput = Op; + } else if (isa<IntToPtrInst>(I)) { + // Look through inttoptr. + // Make sure this isn't a truncating or extending cast. We could + // support this eventually, but don't bother for now. + if (!isa<VectorType>(I->getType()) && + TLI.getPointerTy().getSizeInBits() == + cast<IntegerType>(Op->getType())->getBitWidth()) + NoopInput = Op; + } else if (isa<PtrToIntInst>(I)) { + // Look through ptrtoint. + // Make sure this isn't a truncating or extending cast. We could + // support this eventually, but don't bother for now. + if (!isa<VectorType>(I->getType()) && + TLI.getPointerTy().getSizeInBits() == + cast<IntegerType>(I->getType())->getBitWidth()) + NoopInput = Op; + } else if (isa<CallInst>(I)) { + // Look through call + for (User::const_op_iterator i = I->op_begin(), + // Skip Callee + e = I->op_end() - 1; + i != e; ++i) { + unsigned attrInd = i - I->op_begin() + 1; + if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && + isNoopBitcast((*i)->getType(), I->getType(), TLI)) { + NoopInput = *i; + break; + } + } + } else if (isa<InvokeInst>(I)) { + // Look through invoke + for (User::const_op_iterator i = I->op_begin(), + // Skip BB, BB, Callee + e = I->op_end() - 3; + i != e; ++i) { + unsigned attrInd = i - I->op_begin() + 1; + if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && + isNoopBitcast((*i)->getType(), I->getType(), TLI)) { + NoopInput = *i; + break; + } + } + } + } - // Pointer to pointer cast. - if (Op->getType()->isPointerTy() && I->getType()->isPointerTy()) - return getNoopInput(Op, TLI); - - if (isa<VectorType>(Op->getType()) && isa<VectorType>(I->getType()) && - TLI.isTypeLegal(EVT::getEVT(Op->getType())) && - TLI.isTypeLegal(EVT::getEVT(I->getType()))) - return getNoopInput(Op, TLI); - } - - // Look through inttoptr. - if (isa<IntToPtrInst>(I) && !isa<VectorType>(I->getType())) { - // Make sure this isn't a truncating or extending cast. We could support - // this eventually, but don't bother for now. - if (TLI.getPointerTy().getSizeInBits() == - cast<IntegerType>(Op->getType())->getBitWidth()) - return getNoopInput(Op, TLI); - } + if (NoopInput) { + V1 = NoopInput; + continue; + } - // Look through ptrtoint. - if (isa<PtrToIntInst>(I) && !isa<VectorType>(I->getType())) { - // Make sure this isn't a truncating or extending cast. We could support - // this eventually, but don't bother for now. - if (TLI.getPointerTy().getSizeInBits() == - cast<IntegerType>(I->getType())->getBitWidth()) - return getNoopInput(Op, TLI); + // If we already swapped, avoid infinite loop + if (swapParity) + break; + + // Otherwise, swap V1<->V2, Els1<->Els2 + swap(V1, V2); + swap(Els1, Els2); + swapParity = !swapParity; } + for (unsigned n = 0; n < 2; ++n) { + if (isa<InsertValueInst>(V1)) { + if (isa<StructType>(V1->getType())) { + // Look through insertvalue + unsigned i, e; + for (i = 0, e = cast<StructType>(V1->getType())->getNumElements(); + i != e; ++i) { + const Value *InScalar = FindInsertedValue(const_cast<Value*>(V1), i); + if (InScalar == 0) + break; + Els1.push_back(i); + if (!sameNoopInput(InScalar, V2, Els1, Els2, TLI)) { + Els1.pop_back(); + break; + } + Els1.pop_back(); + } + if (i == e) { + if (swapParity) + swap(Els1, Els2); + return true; + } + } + } else if (!Els1.empty() && isa<ExtractValueInst>(V1)) { + const ExtractValueInst *EVI = cast<ExtractValueInst>(V1); + unsigned i = Els1.back(); + // If the scalar value being inserted is an extractvalue of the right + // index from the call, then everything is good. + if (isa<StructType>(EVI->getOperand(0)->getType()) && + EVI->getNumIndices() == 1 && EVI->getIndices()[0] == i) { + // Look through extractvalue + Els1.pop_back(); + if (sameNoopInput(EVI->getOperand(0), V2, Els1, Els2, TLI)) { + Els1.push_back(i); + if (swapParity) + swap(Els1, Els2); + return true; + } + Els1.push_back(i); + } + } - // Otherwise it's not something we can look through. - return V; -} + swap(V1, V2); + swap(Els1, Els2); + swapParity = !swapParity; + } + if (swapParity) + swap(Els1, Els2); + return false; +} /// Test if the given instruction is in a position to be optimized /// with a tail-call. This roughly means that it's in a block with @@ -264,7 +363,8 @@ static const Value *getNoopInput(const Value *V, const TargetLowering &TLI) { /// between it and the return. /// /// This function only tests target-independent requirements. -bool llvm::isInTailCallPosition(ImmutableCallSite CS,const TargetLowering &TLI){ +bool llvm::isInTailCallPosition(ImmutableCallSite CS, + const TargetLowering &TLI) { const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); @@ -322,28 +422,7 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS,const TargetLowering &TLI){ CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) return false; - // Otherwise, make sure the unmodified return value of I is the return value. - // We handle two cases: multiple return values + scalars. - Value *RetVal = Ret->getOperand(0); - if (!isa<InsertValueInst>(RetVal) || !isa<StructType>(RetVal->getType())) - // Handle scalars first. - return getNoopInput(Ret->getOperand(0), TLI) == I; - - // If this is an aggregate return, look through the insert/extract values and - // see if each is transparent. - for (unsigned i = 0, e =cast<StructType>(RetVal->getType())->getNumElements(); - i != e; ++i) { - const Value *InScalar = FindInsertedValue(RetVal, i); - if (InScalar == 0) return false; - InScalar = getNoopInput(InScalar, TLI); - - // If the scalar value being inserted is an extractvalue of the right index - // from the call, then everything is good. - const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(InScalar); - if (EVI == 0 || EVI->getOperand(0) != I || EVI->getNumIndices() != 1 || - EVI->getIndices()[0] != i) - return false; - } - - return true; + // Otherwise, make sure the return value and I have the same value + SmallVector<unsigned, 4> Els1, Els2; + return sameNoopInput(Ret->getOperand(0), I, Els1, Els2, TLI); } diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index d4a745d..84162ace 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -133,9 +133,13 @@ const DataLayout &AsmPrinter::getDataLayout() const { return *TM.getDataLayout(); } +StringRef AsmPrinter::getTargetTriple() const { + return TM.getTargetTriple(); +} + /// getCurrentSection() - Return the current section we are emitting to. const MCSection *AsmPrinter::getCurrentSection() const { - return OutStreamer.getCurrentSection(); + return OutStreamer.getCurrentSection().first; } @@ -813,7 +817,7 @@ void AsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const { // caller might be in the middle of an dwarf expression. We should // probably assert that Reg >= 0 once debug info generation is more mature. - if (int Offset = MLoc.getOffset()) { + if (MLoc.isIndirect()) { if (Reg < 32) { OutStreamer.AddComment( dwarf::OperationEncodingString(dwarf::DW_OP_breg0 + Reg)); @@ -824,7 +828,7 @@ void AsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const { OutStreamer.AddComment(Twine(Reg)); EmitULEB128(Reg); } - EmitSLEB128(Offset); + EmitSLEB128(MLoc.getOffset()); } else { if (Reg < 32) { OutStreamer.AddComment( @@ -1213,7 +1217,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI, bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) { if (GV->getName() == "llvm.used") { if (MAI->hasNoDeadStrip()) // No need to emit this at all. - EmitLLVMUsedList(GV->getInitializer()); + EmitLLVMUsedList(cast<ConstantArray>(GV->getInitializer())); return true; } @@ -1256,11 +1260,8 @@ bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) { /// EmitLLVMUsedList - For targets that define a MAI::UsedDirective, mark each /// global in the specified llvm.used list for which emitUsedDirectiveFor /// is true, as being used with this directive. -void AsmPrinter::EmitLLVMUsedList(const Constant *List) { +void AsmPrinter::EmitLLVMUsedList(const ConstantArray *InitList) { // Should be an array of 'i8*'. - const ConstantArray *InitList = dyn_cast<ConstantArray>(List); - if (InitList == 0) return; - for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) { const GlobalValue *GV = dyn_cast<GlobalValue>(InitList->getOperand(i)->stripPointerCasts()); diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp index 156acac..31e42d4 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp @@ -141,7 +141,7 @@ void AsmPrinter::EmitTTypeReference(const GlobalValue *GV, void AsmPrinter::EmitSectionOffset(const MCSymbol *Label, const MCSymbol *SectionLabel) const { // On COFF targets, we have to emit the special .secrel32 directive. - if (MAI->getDwarfSectionOffsetDirective()) { + if (MAI->needsDwarfSectionOffsetDirective()) { OutStreamer.EmitCOFFSecRel32(Label); return; } diff --git a/lib/CodeGen/AsmPrinter/CMakeLists.txt b/lib/CodeGen/AsmPrinter/CMakeLists.txt index 58fe2ed..8d15c06 100644 --- a/lib/CodeGen/AsmPrinter/CMakeLists.txt +++ b/lib/CodeGen/AsmPrinter/CMakeLists.txt @@ -9,6 +9,7 @@ add_llvm_library(LLVMAsmPrinter DwarfCompileUnit.cpp DwarfDebug.cpp DwarfException.cpp + ErlangGCPrinter.cpp OcamlGCPrinter.cpp Win64Exception.cpp ) diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp index bbb0432..57e0acd 100644 --- a/lib/CodeGen/AsmPrinter/DIE.cpp +++ b/lib/CodeGen/AsmPrinter/DIE.cpp @@ -144,7 +144,7 @@ void DIE::print(raw_ostream &O, unsigned IncIndent) { O << "Size: " << Size << "\n"; } - const SmallVector<DIEAbbrevData, 8> &Data = Abbrev.getData(); + const SmallVectorImpl<DIEAbbrevData> &Data = Abbrev.getData(); IndentCount += 2; for (unsigned i = 0, N = Data.size(); i < N; ++i) { @@ -324,7 +324,7 @@ void DIEEntry::print(raw_ostream &O) { /// unsigned DIEBlock::ComputeSize(AsmPrinter *AP) { if (!Size) { - const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev.getData(); + const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData(); for (unsigned i = 0, N = Values.size(); i < N; ++i) Size += Values[i]->SizeOf(AP, AbbrevData[i].getForm()); } @@ -343,7 +343,7 @@ void DIEBlock::EmitValue(AsmPrinter *Asm, unsigned Form) const { case dwarf::DW_FORM_block: Asm->EmitULEB128(Size); break; } - const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev.getData(); + const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData(); for (unsigned i = 0, N = Values.size(); i < N; ++i) Values[i]->EmitValue(Asm, AbbrevData[i].getForm()); } diff --git a/lib/CodeGen/AsmPrinter/DIE.h b/lib/CodeGen/AsmPrinter/DIE.h index d087c54..c332aa2 100644 --- a/lib/CodeGen/AsmPrinter/DIE.h +++ b/lib/CodeGen/AsmPrinter/DIE.h @@ -66,7 +66,7 @@ namespace llvm { /// Data - Raw data bytes for abbreviation. /// - SmallVector<DIEAbbrevData, 8> Data; + SmallVector<DIEAbbrevData, 12> Data; public: DIEAbbrev(uint16_t T, uint16_t C) : Tag(T), ChildrenFlag(C), Data() {} @@ -75,7 +75,7 @@ namespace llvm { uint16_t getTag() const { return Tag; } unsigned getNumber() const { return Number; } uint16_t getChildrenFlag() const { return ChildrenFlag; } - const SmallVector<DIEAbbrevData, 8> &getData() const { return Data; } + const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; } void setTag(uint16_t T) { Tag = T; } void setChildrenFlag(uint16_t CF) { ChildrenFlag = CF; } void setNumber(unsigned N) { Number = N; } @@ -108,7 +108,7 @@ namespace llvm { //===--------------------------------------------------------------------===// /// DIE - A structured debug information entry. Has an abbreviation which - /// describes it's organization. + /// describes its organization. class DIEValue; class DIE { @@ -133,7 +133,7 @@ namespace llvm { /// Attribute values. /// - SmallVector<DIEValue*, 32> Values; + SmallVector<DIEValue*, 12> Values; // Private data for print() mutable unsigned IndentCount; @@ -150,7 +150,7 @@ namespace llvm { unsigned getOffset() const { return Offset; } unsigned getSize() const { return Size; } const std::vector<DIE *> &getChildren() const { return Children; } - const SmallVector<DIEValue*, 32> &getValues() const { return Values; } + const SmallVectorImpl<DIEValue*> &getValues() const { return Values; } DIE *getParent() const { return Parent; } /// Climb up the parent chain to get the compile unit DIE this DIE belongs /// to. diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 1c743c2..89abcff 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -685,7 +685,7 @@ bool CompileUnit::addConstantValue(DIE *Die, const APInt &Val, return true; } -/// addTemplateParams - Add template parameters in buffer. +/// addTemplateParams - Add template parameters into buffer. void CompileUnit::addTemplateParams(DIE &Buffer, DIArray TParams) { // Add template parameters. for (unsigned i = 0, e = TParams.getNumElements(); i != e; ++i) { @@ -707,7 +707,7 @@ DIE *CompileUnit::getOrCreateContextDIE(DIDescriptor Context) { return getOrCreateNameSpace(DINameSpace(Context)); else if (Context.isSubprogram()) return getOrCreateSubprogramDIE(DISubprogram(Context)); - else + else return getDIE(Context); } @@ -1366,7 +1366,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) { } } else if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(GV.getConstant())) { - // AT_const_value was added when the static memeber was created. To avoid + // AT_const_value was added when the static member was created. To avoid // emitting AT_const_value multiple times, we only add AT_const_value when // it is not a static member. if (!IsStaticMember) diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index b169602..ee6308c 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -55,7 +55,7 @@ static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden, cl::init(false)); static cl::opt<bool> GenerateDwarfPubNamesSection("generate-dwarf-pubnames", - cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::Hidden, cl::init(false), cl::desc("Generate DWARF pubnames section")); namespace { @@ -170,12 +170,13 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M) DwarfInfoSectionSym = DwarfAbbrevSectionSym = 0; DwarfStrSectionSym = TextSectionSym = 0; DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = DwarfLineSectionSym = 0; + DwarfAddrSectionSym = 0; DwarfAbbrevDWOSectionSym = DwarfStrDWOSectionSym = 0; FunctionBeginSym = FunctionEndSym = 0; // Turn on accelerator tables and older gdb compatibility // for Darwin. - bool IsDarwin = Triple(M->getTargetTriple()).isOSDarwin(); + bool IsDarwin = Triple(A->getTargetTriple()).isOSDarwin(); if (DarwinGDBCompat == Default) { if (IsDarwin) IsDarwinGDBCompat = true; @@ -643,7 +644,7 @@ unsigned DwarfDebug::getOrCreateSourceID(StringRef FileName, // We look up the CUID/file/dir by concatenating them with a zero byte. SmallString<128> NamePair; - NamePair += CUID; + NamePair += utostr(CUID); NamePair += '\0'; NamePair += DirName; NamePair += '\0'; // Zero bytes are not allowed in paths. @@ -681,9 +682,12 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) { NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2, DIUnit.getLanguage()); NewCU->addString(Die, dwarf::DW_AT_name, FN); + // 2.17.1 requires that we use DW_AT_low_pc for a single entry point - // into an entity. We're using 0 (or a NULL label) for this. - NewCU->addLabelAddress(Die, dwarf::DW_AT_low_pc, NULL); + // into an entity. We're using 0 (or a NULL label) for this. For + // split dwarf it's in the skeleton CU so omit it here. + if (!useSplitDwarf()) + NewCU->addLabelAddress(Die, dwarf::DW_AT_low_pc, NULL); // Define start line table label for each Compile Unit. MCSymbol *LineTableStartSym = Asm->GetTempSymbol("line_table_start", @@ -692,20 +696,25 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) { NewCU->getUniqueID()); // DW_AT_stmt_list is a offset of line number information for this - // compile unit in debug_line section. + // compile unit in debug_line section. For split dwarf this is + // left in the skeleton CU and so not included. // The line table entries are not always emitted in assembly, so it // is not okay to use line_table_start here. - if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) - NewCU->addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, - NewCU->getUniqueID() == 0 ? - Asm->GetTempSymbol("section_line") : LineTableStartSym); - else if (NewCU->getUniqueID() == 0) - NewCU->addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0); - else - NewCU->addDelta(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, - LineTableStartSym, DwarfLineSectionSym); + if (!useSplitDwarf()) { + if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) + NewCU->addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, + NewCU->getUniqueID() == 0 ? + Asm->GetTempSymbol("section_line") : LineTableStartSym); + else if (NewCU->getUniqueID() == 0) + NewCU->addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0); + else + NewCU->addDelta(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, + LineTableStartSym, DwarfLineSectionSym); + } - if (!CompilationDir.empty()) + // If we're using split dwarf the compilation dir is going to be in the + // skeleton CU and so we don't need to duplicate it here. + if (!useSplitDwarf() && !CompilationDir.empty()) NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir); if (DIUnit.isOptimized()) NewCU->addFlag(Die, dwarf::DW_AT_APPLE_optimized); @@ -721,13 +730,6 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) { if (!FirstCU) FirstCU = NewCU; - if (useSplitDwarf()) { - // This should be a unique identifier when we want to build .dwp files. - NewCU->addUInt(Die, dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8, 0); - // Now construct the skeleton CU associated. - constructSkeletonCU(N); - } - InfoHolder.addUnit(NewCU); CUMap.insert(std::make_pair(N, NewCU)); @@ -761,6 +763,23 @@ void DwarfDebug::constructSubprogramDIE(CompileUnit *TheCU, TheCU->addGlobalName(SP.getName(), SubprogramDie); } +void DwarfDebug::constructImportedModuleDIE(CompileUnit *TheCU, + const MDNode *N) { + DIImportedModule Module(N); + if (!Module.Verify()) + return; + DIE *IMDie = new DIE(dwarf::DW_TAG_imported_module); + TheCU->insertDIE(Module, IMDie); + DIE *NSDie = TheCU->getOrCreateNameSpace(Module.getNameSpace()); + unsigned FileID = getOrCreateSourceID(Module.getContext().getFilename(), + Module.getContext().getDirectory(), + TheCU->getUniqueID()); + TheCU->addUInt(IMDie, dwarf::DW_AT_decl_file, 0, FileID); + TheCU->addUInt(IMDie, dwarf::DW_AT_decl_line, 0, Module.getLineNumber()); + TheCU->addDIEEntry(IMDie, dwarf::DW_AT_import, dwarf::DW_FORM_ref4, NSDie); + TheCU->addToContextOwner(IMDie, Module.getContext()); +} + // Emit all Dwarf sections that should come prior to the content. Create // global DIEs and emit initial debug info sections. This is invoked by // the target AsmPrinter. @@ -794,6 +813,20 @@ void DwarfDebug::beginModule() { DIArray RetainedTypes = CUNode.getRetainedTypes(); for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i) CU->getOrCreateTypeDIE(RetainedTypes.getElement(i)); + // Emit imported_modules last so that the relevant context is already + // available. + DIArray ImportedModules = CUNode.getImportedModules(); + for (unsigned i = 0, e = ImportedModules.getNumElements(); i != e; ++i) + constructImportedModuleDIE(CU, ImportedModules.getElement(i)); + // If we're splitting the dwarf out now that we've got the entire + // CU then construct a skeleton CU based upon it. + if (useSplitDwarf()) { + // This should be a unique identifier when we want to build .dwp files. + CU->addUInt(CU->getCUDie(), dwarf::DW_AT_GNU_dwo_id, + dwarf::DW_FORM_data8, 0); + // Now construct the skeleton CU associated. + constructSkeletonCU(CUNode); + } } // Tell MMI that we have debug info. @@ -1098,7 +1131,13 @@ static DotDebugLocEntry getDebugLocEntry(AsmPrinter *Asm, } if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm()) { MachineLocation MLoc; - MLoc.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm()); + // TODO: Currently an offset of 0 in a DBG_VALUE means + // we need to generate a direct register value. + // There is no way to specify an indirect value with offset 0. + if (MI->getOperand(1).getImm() == 0) + MLoc.set(MI->getOperand(0).getReg()); + else + MLoc.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm()); return DotDebugLocEntry(FLabel, SLabel, MLoc, Var); } if (MI->getOperand(0).isImm()) @@ -1671,8 +1710,8 @@ DwarfUnits::computeSizeAndOffset(DIE *Die, unsigned Offset) { // Start the size with the size of abbreviation code. Offset += MCAsmInfo::getULEB128Size(AbbrevNumber); - const SmallVector<DIEValue*, 32> &Values = Die->getValues(); - const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev->getData(); + const SmallVectorImpl<DIEValue*> &Values = Die->getValues(); + const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev->getData(); // Size the DIE attribute values. for (unsigned i = 0, N = Values.size(); i < N; ++i) @@ -1699,7 +1738,7 @@ DwarfUnits::computeSizeAndOffset(DIE *Die, unsigned Offset) { void DwarfUnits::computeSizeAndOffsets() { // Offset from the beginning of debug info section. unsigned AccuOffset = 0; - for (SmallVector<CompileUnit *, 1>::iterator I = CUs.begin(), + for (SmallVectorImpl<CompileUnit *>::iterator I = CUs.begin(), E = CUs.end(); I != E; ++I) { (*I)->setDebugInfoOffset(AccuOffset); unsigned Offset = @@ -1739,9 +1778,12 @@ void DwarfDebug::emitSectionLabels() { emitSectionSym(Asm, TLOF.getDwarfPubTypesSection()); DwarfStrSectionSym = emitSectionSym(Asm, TLOF.getDwarfStrSection(), "info_string"); - if (useSplitDwarf()) + if (useSplitDwarf()) { DwarfStrDWOSectionSym = emitSectionSym(Asm, TLOF.getDwarfStrDWOSection(), "skel_string"); + DwarfAddrSectionSym = + emitSectionSym(Asm, TLOF.getDwarfAddrSection(), "addr_sec"); + } DwarfDebugRangeSectionSym = emitSectionSym(Asm, TLOF.getDwarfRangesSection(), "debug_range"); @@ -1766,8 +1808,8 @@ void DwarfDebug::emitDIE(DIE *Die, std::vector<DIEAbbrev *> *Abbrevs) { dwarf::TagString(Abbrev->getTag())); Asm->EmitULEB128(AbbrevNumber); - const SmallVector<DIEValue*, 32> &Values = Die->getValues(); - const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev->getData(); + const SmallVectorImpl<DIEValue*> &Values = Die->getValues(); + const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev->getData(); // Emit the DIE attribute values. for (unsigned i = 0, N = Values.size(); i < N; ++i) { @@ -1855,7 +1897,7 @@ void DwarfUnits::emitUnits(DwarfDebug *DD, const MCSection *ASection, const MCSymbol *ASectionSym) { Asm->OutStreamer.SwitchSection(USection); - for (SmallVector<CompileUnit *, 1>::iterator I = CUs.begin(), + for (SmallVectorImpl<CompileUnit *>::iterator I = CUs.begin(), E = CUs.end(); I != E; ++I) { CompileUnit *TheCU = *I; DIE *Die = TheCU->getCUDie(); @@ -1891,7 +1933,7 @@ void DwarfUnits::emitUnits(DwarfDebug *DD, unsigned DwarfUnits::getCUOffset(DIE *Die) { assert(Die->getTag() == dwarf::DW_TAG_compile_unit && "Input DIE should be compile unit in getCUOffset."); - for (SmallVector<CompileUnit *, 1>::iterator I = CUs.begin(), + for (SmallVectorImpl<CompileUnit *>::iterator I = CUs.begin(), E = CUs.end(); I != E; ++I) { CompileUnit *TheCU = *I; if (TheCU->getCUDie() == Die) @@ -2284,7 +2326,7 @@ void DwarfDebug::emitDebugLoc() { if (DotDebugLocEntries.empty()) return; - for (SmallVector<DotDebugLocEntry, 4>::iterator + for (SmallVectorImpl<DotDebugLocEntry>::iterator I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end(); I != E; ++I) { DotDebugLocEntry &Entry = *I; @@ -2298,7 +2340,7 @@ void DwarfDebug::emitDebugLoc() { unsigned char Size = Asm->getDataLayout().getPointerSize(); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0)); unsigned index = 1; - for (SmallVector<DotDebugLocEntry, 4>::iterator + for (SmallVectorImpl<DotDebugLocEntry>::iterator I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end(); I != E; ++I, ++index) { DotDebugLocEntry &Entry = *I; @@ -2391,7 +2433,7 @@ void DwarfDebug::emitDebugRanges() { Asm->OutStreamer.SwitchSection( Asm->getObjFileLowering().getDwarfRangesSection()); unsigned char Size = Asm->getDataLayout().getPointerSize(); - for (SmallVector<const MCSymbol *, 8>::iterator + for (SmallVectorImpl<const MCSymbol *>::iterator I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end(); I != E; ++I) { if (*I) @@ -2449,13 +2491,13 @@ void DwarfDebug::emitDebugInlineInfo() { Asm->OutStreamer.AddComment("Address Size (in bytes)"); Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); - for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(), + for (SmallVectorImpl<const MDNode *>::iterator I = InlinedSPNodes.begin(), E = InlinedSPNodes.end(); I != E; ++I) { const MDNode *Node = *I; DenseMap<const MDNode *, SmallVector<InlineInfoLabels, 4> >::iterator II = InlineInfo.find(Node); - SmallVector<InlineInfoLabels, 4> &Labels = II->second; + SmallVectorImpl<InlineInfoLabels> &Labels = II->second; DISubprogram SP(Node); StringRef LName = SP.getLinkageName(); StringRef Name = SP.getName(); @@ -2474,7 +2516,7 @@ void DwarfDebug::emitDebugInlineInfo() { DwarfStrSectionSym); Asm->EmitULEB128(Labels.size(), "Inline count"); - for (SmallVector<InlineInfoLabels, 4>::iterator LI = Labels.begin(), + for (SmallVectorImpl<InlineInfoLabels>::iterator LI = Labels.begin(), LE = Labels.end(); LI != LE; ++LI) { if (Asm->isVerbose()) Asm->OutStreamer.AddComment("DIE offset"); Asm->EmitInt32(LI->second->getOffset()); @@ -2509,9 +2551,14 @@ CompileUnit *DwarfDebug::constructSkeletonCU(const MDNode *N) { // This should be a unique identifier when we want to build .dwp files. NewCU->addUInt(Die, dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8, 0); - // FIXME: The addr base should be relative for each compile unit, however, - // this one is going to be 0 anyhow. - NewCU->addUInt(Die, dwarf::DW_AT_GNU_addr_base, dwarf::DW_FORM_sec_offset, 0); + // Relocate to the beginning of the addr_base section, else 0 for the + // beginning of the one for this compile unit. + if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) + NewCU->addLabel(Die, dwarf::DW_AT_GNU_addr_base, dwarf::DW_FORM_sec_offset, + DwarfAddrSectionSym); + else + NewCU->addUInt(Die, dwarf::DW_AT_GNU_addr_base, + dwarf::DW_FORM_sec_offset, 0); // 2.17.1 requires that we use DW_AT_low_pc for a single entry point // into an entity. We're using 0, or a NULL label for this. @@ -2519,6 +2566,7 @@ CompileUnit *DwarfDebug::constructSkeletonCU(const MDNode *N) { // DW_AT_stmt_list is a offset of line number information for this // compile unit in debug_line section. + // FIXME: Should handle multiple compile units. if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) NewCU->addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_sec_offset, DwarfLineSectionSym); diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h index 81e345e..1eb7e3e 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -392,7 +392,7 @@ class DwarfDebug { // section offsets and are created by EmitSectionLabels. MCSymbol *DwarfInfoSectionSym, *DwarfAbbrevSectionSym; MCSymbol *DwarfStrSectionSym, *TextSectionSym, *DwarfDebugRangeSectionSym; - MCSymbol *DwarfDebugLocSectionSym, *DwarfLineSectionSym; + MCSymbol *DwarfDebugLocSectionSym, *DwarfLineSectionSym, *DwarfAddrSectionSym; MCSymbol *FunctionBeginSym, *FunctionEndSym; MCSymbol *DwarfAbbrevDWOSectionSym, *DwarfStrDWOSectionSym; @@ -555,6 +555,9 @@ private: /// \brief Construct subprogram DIE. void constructSubprogramDIE(CompileUnit *TheCU, const MDNode *N); + /// \brief Construct import_module DIE. + void constructImportedModuleDIE(CompileUnit *TheCU, const MDNode *N); + /// \brief Register a source line with debug info. Returns the unique /// label that was emitted and which provides correspondence to the /// source line list. diff --git a/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp b/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp new file mode 100644 index 0000000..a8fb66d --- /dev/null +++ b/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp @@ -0,0 +1,120 @@ +//===-- ErlangGCPrinter.cpp - Erlang/OTP frametable emitter -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the compiler plugin that is used in order to emit +// garbage collection information in a convenient layout for parsing and +// loading in the Erlang/OTP runtime. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/GCs.h" +#include "llvm/CodeGen/GCMetadataPrinter.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Metadata.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Target/TargetLoweringObjectFile.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +namespace { + + class ErlangGCPrinter : public GCMetadataPrinter { + public: + void beginAssembly(AsmPrinter &AP); + void finishAssembly(AsmPrinter &AP); + }; + +} + +static GCMetadataPrinterRegistry::Add<ErlangGCPrinter> +X("erlang", "erlang-compatible garbage collector"); + +void llvm::linkErlangGCPrinter() { } + +void ErlangGCPrinter::beginAssembly(AsmPrinter &AP) { } + +void ErlangGCPrinter::finishAssembly(AsmPrinter &AP) { + MCStreamer &OS = AP.OutStreamer; + unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(); + + // Put this in a custom .note section. + AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getContext() + .getELFSection(".note.gc", ELF::SHT_PROGBITS, 0, + SectionKind::getDataRel())); + + // For each function... + for (iterator FI = begin(), FE = end(); FI != FE; ++FI) { + GCFunctionInfo &MD = **FI; + + /** A compact GC layout. Emit this data structure: + * + * struct { + * int16_t PointCount; + * void *SafePointAddress[PointCount]; + * int16_t StackFrameSize; (in words) + * int16_t StackArity; + * int16_t LiveCount; + * int16_t LiveOffsets[LiveCount]; + * } __gcmap_<FUNCTIONNAME>; + **/ + + // Align to address width. + AP.EmitAlignment(IntPtrSize == 4 ? 2 : 3); + + // Emit PointCount. + OS.AddComment("safe point count"); + AP.EmitInt16(MD.size()); + + // And each safe point... + for (GCFunctionInfo::iterator PI = MD.begin(), PE = MD.end(); PI != PE; + ++PI) { + // Emit the address of the safe point. + OS.AddComment("safe point address"); + MCSymbol *Label = PI->Label; + AP.EmitLabelPlusOffset(Label/*Hi*/, 0/*Offset*/, 4/*Size*/); + } + + // Stack information never change in safe points! Only print info from the + // first call-site. + GCFunctionInfo::iterator PI = MD.begin(); + + // Emit the stack frame size. + OS.AddComment("stack frame size (in words)"); + AP.EmitInt16(MD.getFrameSize() / IntPtrSize); + + // Emit stack arity, i.e. the number of stacked arguments. + unsigned RegisteredArgs = IntPtrSize == 4 ? 5 : 6; + unsigned StackArity = MD.getFunction().arg_size() > RegisteredArgs ? + MD.getFunction().arg_size() - RegisteredArgs : 0; + OS.AddComment("stack arity"); + AP.EmitInt16(StackArity); + + // Emit the number of live roots in the function. + OS.AddComment("live root count"); + AP.EmitInt16(MD.live_size(PI)); + + // And for each live root... + for (GCFunctionInfo::live_iterator LI = MD.live_begin(PI), + LE = MD.live_end(PI); + LI != LE; ++LI) { + // Emit live root's offset within the stack frame. + OS.AddComment("stack index (offset / wordsize)"); + AP.EmitInt16(LI->StackOffset / IntPtrSize); + } + } +} diff --git a/lib/CodeGen/BasicTargetTransformInfo.cpp b/lib/CodeGen/BasicTargetTransformInfo.cpp index 4cd1b80..4a99184 100644 --- a/lib/CodeGen/BasicTargetTransformInfo.cpp +++ b/lib/CodeGen/BasicTargetTransformInfo.cpp @@ -85,7 +85,9 @@ public: virtual unsigned getNumberOfRegisters(bool Vector) const; virtual unsigned getMaximumUnrollFactor() const; virtual unsigned getRegisterBitWidth(bool Vector) const; - virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind, + OperandValueKind) const; virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, Type *SubTp) const; virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, @@ -193,27 +195,34 @@ unsigned BasicTTI::getMaximumUnrollFactor() const { return 1; } -unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { +unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind, + OperandValueKind) const { // Check if any of the operands are vector operands. int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); + bool IsFloat = Ty->getScalarType()->isFloatingPointTy(); + // Assume that floating point arithmetic operations cost twice as much as + // integer operations. + unsigned OpCost = (IsFloat ? 2 : 1); + if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { // The operation is legal. Assume it costs 1. - // If the type is split to multiple registers, assume that thre is some + // If the type is split to multiple registers, assume that there is some // overhead to this. // TODO: Once we have extract/insert subvector cost we need to use them. if (LT.first > 1) - return LT.first * 2; - return LT.first * 1; + return LT.first * 2 * OpCost; + return LT.first * 1 * OpCost; } if (!TLI->isOperationExpand(ISD, LT.second)) { // If the operation is custom lowered then assume // thare the code is twice as expensive. - return LT.first * 2; + return LT.first * 2 * OpCost; } // Else, assume that we need to scalarize this op. @@ -226,7 +235,7 @@ unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { } // We don't know anything about this scalar instruction. - return 1; + return OpCost; } unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt index ddc7ada..56aa330 100644 --- a/lib/CodeGen/CMakeLists.txt +++ b/lib/CodeGen/CMakeLists.txt @@ -7,13 +7,13 @@ add_llvm_library(LLVMCodeGen CalcSpillWeights.cpp CallingConvLower.cpp CodeGen.cpp - CodePlacementOpt.cpp CriticalAntiDepBreaker.cpp DFAPacketizer.cpp DeadMachineInstructionElim.cpp DwarfEHPrepare.cpp EarlyIfConversion.cpp EdgeBundles.cpp + ErlangGC.cpp ExecutionDepsFix.cpp ExpandISelPseudos.cpp ExpandPostRAPseudos.cpp diff --git a/lib/CodeGen/CalcSpillWeights.cpp b/lib/CodeGen/CalcSpillWeights.cpp index dee339a..38ae17d 100644 --- a/lib/CodeGen/CalcSpillWeights.cpp +++ b/lib/CodeGen/CalcSpillWeights.cpp @@ -117,7 +117,7 @@ void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) { float totalWeight = 0; SmallPtrSet<MachineInstr*, 8> visited; - // Find the best physreg hist and the best virtreg hint. + // Find the best physreg hint and the best virtreg hint. float bestPhys = 0, bestVirt = 0; unsigned hintPhys = 0, hintVirt = 0; diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp index a33b672..c641991 100644 --- a/lib/CodeGen/CodeGen.cpp +++ b/lib/CodeGen/CodeGen.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" #include "llvm-c/Initialization.h" using namespace llvm; @@ -22,7 +23,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeBasicTTIPass(Registry); initializeBranchFolderPassPass(Registry); initializeCalculateSpillWeightsPass(Registry); - initializeCodePlacementOptPass(Registry); initializeDeadMachineInstructionElimPass(Registry); initializeEarlyIfConverterPass(Registry); initializeExpandPostRAPass(Registry); diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp deleted file mode 100644 index 2451844..0000000 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ /dev/null @@ -1,423 +0,0 @@ -//===-- CodePlacementOpt.cpp - Code Placement pass. -----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the pass that optimizes code placement and aligns loop -// headers to target-specific alignment boundaries. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "code-placement" -#include "llvm/CodeGen/Passes.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/Support/Compiler.h" -#include "llvm/Support/Debug.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetLowering.h" -#include "llvm/Target/TargetMachine.h" -using namespace llvm; - -STATISTIC(NumLoopsAligned, "Number of loops aligned"); -STATISTIC(NumIntraElim, "Number of intra loop branches eliminated"); -STATISTIC(NumIntraMoved, "Number of intra loop branches moved"); - -namespace { - class CodePlacementOpt : public MachineFunctionPass { - const MachineLoopInfo *MLI; - const TargetInstrInfo *TII; - const TargetLowering *TLI; - - public: - static char ID; - CodePlacementOpt() : MachineFunctionPass(ID) {} - - virtual bool runOnMachineFunction(MachineFunction &MF); - - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<MachineLoopInfo>(); - AU.addPreservedID(MachineDominatorsID); - MachineFunctionPass::getAnalysisUsage(AU); - } - - private: - bool HasFallthrough(MachineBasicBlock *MBB); - bool HasAnalyzableTerminator(MachineBasicBlock *MBB); - void Splice(MachineFunction &MF, - MachineFunction::iterator InsertPt, - MachineFunction::iterator Begin, - MachineFunction::iterator End); - bool EliminateUnconditionalJumpsToTop(MachineFunction &MF, - MachineLoop *L); - bool MoveDiscontiguousLoopBlocks(MachineFunction &MF, - MachineLoop *L); - bool OptimizeIntraLoopEdgesInLoopNest(MachineFunction &MF, MachineLoop *L); - bool OptimizeIntraLoopEdges(MachineFunction &MF); - bool AlignLoops(MachineFunction &MF); - bool AlignLoop(MachineFunction &MF, MachineLoop *L, unsigned Align); - }; - - char CodePlacementOpt::ID = 0; -} // end anonymous namespace - -char &llvm::CodePlacementOptID = CodePlacementOpt::ID; -INITIALIZE_PASS(CodePlacementOpt, "code-placement", - "Code Placement Optimizer", false, false) - -/// HasFallthrough - Test whether the given branch has a fallthrough, either as -/// a plain fallthrough or as a fallthrough case of a conditional branch. -/// -bool CodePlacementOpt::HasFallthrough(MachineBasicBlock *MBB) { - MachineBasicBlock *TBB = 0, *FBB = 0; - SmallVector<MachineOperand, 4> Cond; - if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond)) - return false; - // This conditional branch has no fallthrough. - if (FBB) - return false; - // An unconditional branch has no fallthrough. - if (Cond.empty() && TBB) - return false; - // It has a fallthrough. - return true; -} - -/// HasAnalyzableTerminator - Test whether AnalyzeBranch will succeed on MBB. -/// This is called before major changes are begun to test whether it will be -/// possible to complete the changes. -/// -/// Target-specific code is hereby encouraged to make AnalyzeBranch succeed -/// whenever possible. -/// -bool CodePlacementOpt::HasAnalyzableTerminator(MachineBasicBlock *MBB) { - // Conservatively ignore EH landing pads. - if (MBB->isLandingPad()) return false; - - // Aggressively handle return blocks and similar constructs. - if (MBB->succ_empty()) return true; - - // Ask the target's AnalyzeBranch if it can handle this block. - MachineBasicBlock *TBB = 0, *FBB = 0; - SmallVector<MachineOperand, 4> Cond; - // Make sure the terminator is understood. - if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond)) - return false; - // Ignore blocks which look like they might have EH-related control flow. - // AnalyzeBranch thinks it knows how to analyze such things, but it doesn't - // recognize the possibility of a control transfer through an unwind. - // Such blocks contain EH_LABEL instructions, however they may be in the - // middle of the block. Instead of searching for them, just check to see - // if the CFG disagrees with AnalyzeBranch. - if (1u + !Cond.empty() != MBB->succ_size()) - return false; - // Make sure we have the option of reversing the condition. - if (!Cond.empty() && TII->ReverseBranchCondition(Cond)) - return false; - return true; -} - -/// Splice - Move the sequence of instructions [Begin,End) to just before -/// InsertPt. Update branch instructions as needed to account for broken -/// fallthrough edges and to take advantage of newly exposed fallthrough -/// opportunities. -/// -void CodePlacementOpt::Splice(MachineFunction &MF, - MachineFunction::iterator InsertPt, - MachineFunction::iterator Begin, - MachineFunction::iterator End) { - assert(Begin != MF.begin() && End != MF.begin() && InsertPt != MF.begin() && - "Splice can't change the entry block!"); - MachineFunction::iterator OldBeginPrior = prior(Begin); - MachineFunction::iterator OldEndPrior = prior(End); - - MF.splice(InsertPt, Begin, End); - - prior(Begin)->updateTerminator(); - OldBeginPrior->updateTerminator(); - OldEndPrior->updateTerminator(); -} - -/// EliminateUnconditionalJumpsToTop - Move blocks which unconditionally jump -/// to the loop top to the top of the loop so that they have a fall through. -/// This can introduce a branch on entry to the loop, but it can eliminate a -/// branch within the loop. See the @simple case in -/// test/CodeGen/X86/loop_blocks.ll for an example of this. -bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF, - MachineLoop *L) { - bool Changed = false; - MachineBasicBlock *TopMBB = L->getTopBlock(); - - bool BotHasFallthrough = HasFallthrough(L->getBottomBlock()); - - if (TopMBB == MF.begin() || - HasAnalyzableTerminator(prior(MachineFunction::iterator(TopMBB)))) { - new_top: - for (MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin(), - PE = TopMBB->pred_end(); PI != PE; ++PI) { - MachineBasicBlock *Pred = *PI; - if (Pred == TopMBB) continue; - if (HasFallthrough(Pred)) continue; - if (!L->contains(Pred)) continue; - - // Verify that we can analyze all the loop entry edges before beginning - // any changes which will require us to be able to analyze them. - if (Pred == MF.begin()) - continue; - if (!HasAnalyzableTerminator(Pred)) - continue; - if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(Pred)))) - continue; - - // Move the block. - DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << Pred->getNumber() - << " to top of loop.\n"); - Changed = true; - - // Move it and all the blocks that can reach it via fallthrough edges - // exclusively, to keep existing fallthrough edges intact. - MachineFunction::iterator Begin = Pred; - MachineFunction::iterator End = llvm::next(Begin); - while (Begin != MF.begin()) { - MachineFunction::iterator Prior = prior(Begin); - if (Prior == MF.begin()) - break; - // Stop when a non-fallthrough edge is found. - if (!HasFallthrough(Prior)) - break; - // Stop if a block which could fall-through out of the loop is found. - if (Prior->isSuccessor(End)) - break; - // If we've reached the top, stop scanning. - if (Prior == MachineFunction::iterator(TopMBB)) { - // We know top currently has a fall through (because we just checked - // it) which would be lost if we do the transformation, so it isn't - // worthwhile to do the transformation unless it would expose a new - // fallthrough edge. - if (!Prior->isSuccessor(End)) - goto next_pred; - // Otherwise we can stop scanning and proceed to move the blocks. - break; - } - // If we hit a switch or something complicated, don't move anything - // for this predecessor. - if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(Prior)))) - break; - // Ok, the block prior to Begin will be moved along with the rest. - // Extend the range to include it. - Begin = Prior; - ++NumIntraMoved; - } - - // Move the blocks. - Splice(MF, TopMBB, Begin, End); - - // Update TopMBB. - TopMBB = L->getTopBlock(); - - // We have a new loop top. Iterate on it. We shouldn't have to do this - // too many times if BranchFolding has done a reasonable job. - goto new_top; - next_pred:; - } - } - - // If the loop previously didn't exit with a fall-through and it now does, - // we eliminated a branch. - if (Changed && - !BotHasFallthrough && - HasFallthrough(L->getBottomBlock())) { - ++NumIntraElim; - } - - return Changed; -} - -/// MoveDiscontiguousLoopBlocks - Move any loop blocks that are not in the -/// portion of the loop contiguous with the header. This usually makes the loop -/// contiguous, provided that AnalyzeBranch can handle all the relevant -/// branching. See the @cfg_islands case in test/CodeGen/X86/loop_blocks.ll -/// for an example of this. -bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF, - MachineLoop *L) { - bool Changed = false; - MachineBasicBlock *TopMBB = L->getTopBlock(); - MachineBasicBlock *BotMBB = L->getBottomBlock(); - - // Determine a position to move orphaned loop blocks to. If TopMBB is not - // entered via fallthrough and BotMBB is exited via fallthrough, prepend them - // to the top of the loop to avoid losing that fallthrough. Otherwise append - // them to the bottom, even if it previously had a fallthrough, on the theory - // that it's worth an extra branch to keep the loop contiguous. - MachineFunction::iterator InsertPt = - llvm::next(MachineFunction::iterator(BotMBB)); - bool InsertAtTop = false; - if (TopMBB != MF.begin() && - !HasFallthrough(prior(MachineFunction::iterator(TopMBB))) && - HasFallthrough(BotMBB)) { - InsertPt = TopMBB; - InsertAtTop = true; - } - - // Keep a record of which blocks are in the portion of the loop contiguous - // with the loop header. - SmallPtrSet<MachineBasicBlock *, 8> ContiguousBlocks; - for (MachineFunction::iterator I = TopMBB, - E = llvm::next(MachineFunction::iterator(BotMBB)); I != E; ++I) - ContiguousBlocks.insert(I); - - // Find non-contigous blocks and fix them. - if (InsertPt != MF.begin() && HasAnalyzableTerminator(prior(InsertPt))) - for (MachineLoop::block_iterator BI = L->block_begin(), BE = L->block_end(); - BI != BE; ++BI) { - MachineBasicBlock *BB = *BI; - - // Verify that we can analyze all the loop entry edges before beginning - // any changes which will require us to be able to analyze them. - if (!HasAnalyzableTerminator(BB)) - continue; - if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(BB)))) - continue; - - // If the layout predecessor is part of the loop, this block will be - // processed along with it. This keeps them in their relative order. - if (BB != MF.begin() && - L->contains(prior(MachineFunction::iterator(BB)))) - continue; - - // Check to see if this block is already contiguous with the main - // portion of the loop. - if (!ContiguousBlocks.insert(BB)) - continue; - - // Move the block. - DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << BB->getNumber() - << " to be contiguous with loop.\n"); - Changed = true; - - // Process this block and all loop blocks contiguous with it, to keep - // them in their relative order. - MachineFunction::iterator Begin = BB; - MachineFunction::iterator End = llvm::next(MachineFunction::iterator(BB)); - for (; End != MF.end(); ++End) { - if (!L->contains(End)) break; - if (!HasAnalyzableTerminator(End)) break; - ContiguousBlocks.insert(End); - ++NumIntraMoved; - } - - // If we're inserting at the bottom of the loop, and the code we're - // moving originally had fall-through successors, bring the sucessors - // up with the loop blocks to preserve the fall-through edges. - if (!InsertAtTop) - for (; End != MF.end(); ++End) { - if (L->contains(End)) break; - if (!HasAnalyzableTerminator(End)) break; - if (!HasFallthrough(prior(End))) break; - } - - // Move the blocks. This may invalidate TopMBB and/or BotMBB, but - // we don't need them anymore at this point. - Splice(MF, InsertPt, Begin, End); - } - - return Changed; -} - -/// OptimizeIntraLoopEdgesInLoopNest - Reposition loop blocks to minimize -/// intra-loop branching and to form contiguous loops. -/// -/// This code takes the approach of making minor changes to the existing -/// layout to fix specific loop-oriented problems. Also, it depends on -/// AnalyzeBranch, which can't understand complex control instructions. -/// -bool CodePlacementOpt::OptimizeIntraLoopEdgesInLoopNest(MachineFunction &MF, - MachineLoop *L) { - bool Changed = false; - - // Do optimization for nested loops. - for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) - Changed |= OptimizeIntraLoopEdgesInLoopNest(MF, *I); - - // Do optimization for this loop. - Changed |= EliminateUnconditionalJumpsToTop(MF, L); - Changed |= MoveDiscontiguousLoopBlocks(MF, L); - - return Changed; -} - -/// OptimizeIntraLoopEdges - Reposition loop blocks to minimize -/// intra-loop branching and to form contiguous loops. -/// -bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) { - bool Changed = false; - - if (!TLI->shouldOptimizeCodePlacement()) - return Changed; - - // Do optimization for each loop in the function. - for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); - I != E; ++I) - if (!(*I)->getParentLoop()) - Changed |= OptimizeIntraLoopEdgesInLoopNest(MF, *I); - - return Changed; -} - -/// AlignLoops - Align loop headers to target preferred alignments. -/// -bool CodePlacementOpt::AlignLoops(MachineFunction &MF) { - const Function *F = MF.getFunction(); - if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, - Attribute::OptimizeForSize)) - return false; - - unsigned Align = TLI->getPrefLoopAlignment(); - if (!Align) - return false; // Don't care about loop alignment. - - bool Changed = false; - - for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); - I != E; ++I) - Changed |= AlignLoop(MF, *I, Align); - - return Changed; -} - -/// AlignLoop - Align loop headers to target preferred alignments. -/// -bool CodePlacementOpt::AlignLoop(MachineFunction &MF, MachineLoop *L, - unsigned Align) { - bool Changed = false; - - // Do alignment for nested loops. - for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) - Changed |= AlignLoop(MF, *I, Align); - - L->getTopBlock()->setAlignment(Align); - Changed = true; - ++NumLoopsAligned; - - return Changed; -} - -bool CodePlacementOpt::runOnMachineFunction(MachineFunction &MF) { - MLI = &getAnalysis<MachineLoopInfo>(); - if (MLI->empty()) - return false; // No loops. - - TLI = MF.getTarget().getTargetLowering(); - TII = MF.getTarget().getInstrInfo(); - - bool Changed = OptimizeIntraLoopEdges(MF); - - Changed |= AlignLoops(MF); - - return Changed; -} diff --git a/lib/CodeGen/ErlangGC.cpp b/lib/CodeGen/ErlangGC.cpp new file mode 100644 index 0000000..8a1e2d9 --- /dev/null +++ b/lib/CodeGen/ErlangGC.cpp @@ -0,0 +1,81 @@ +//===-- ErlangGC.cpp - Erlang/OTP GC strategy -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Erlang/OTP runtime-compatible garbage collector +// (e.g. defines safe points, root initialization etc.) +// +// The frametable emitter is in ErlangGCPrinter.cpp. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GCs.h" +#include "llvm/CodeGen/GCStrategy.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +namespace { + + class ErlangGC : public GCStrategy { + MCSymbol *InsertLabel(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + DebugLoc DL) const; + public: + ErlangGC(); + bool findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF); + }; + +} + +static GCRegistry::Add<ErlangGC> +X("erlang", "erlang-compatible garbage collector"); + +void llvm::linkErlangGC() { } + +ErlangGC::ErlangGC() { + InitRoots = false; + NeededSafePoints = 1 << GC::PostCall; + UsesMetadata = true; + CustomRoots = false; + CustomSafePoints = true; +} + +MCSymbol *ErlangGC::InsertLabel(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + DebugLoc DL) const { + const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo(); + MCSymbol *Label = MBB.getParent()->getContext().CreateTempSymbol(); + BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label); + return Label; +} + +bool ErlangGC::findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) { + for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE; + ++BBI) + for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); + MI != ME; ++MI) + + if (MI->getDesc().isCall()) { + + // Do not treat tail call sites as safe points. + if (MI->getDesc().isTerminator()) + continue; + + /* Code copied from VisitCallPoint(...) */ + MachineBasicBlock::iterator RAI = MI; ++RAI; + MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc()); + FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc()); + } + + return false; +} diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 9958d7d..eaff93f 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -1054,6 +1054,10 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) { // Copy instructions in the true block, predicate them, and add them to // the entry block. CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs); + + // RemoveExtraEdges won't work if the block has an unanalyzable branch, so + // explicitly remove CvtBBI as a successor. + BBI.BB->removeSuccessor(CvtBBI->BB); } else { PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs); @@ -1146,6 +1150,10 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) { // Copy instructions in the true block, predicate them, and add them to // the entry block. CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs, true); + + // RemoveExtraEdges won't work if the block has an unanalyzable branch, so + // explicitly remove CvtBBI as a successor. + BBI.BB->removeSuccessor(CvtBBI->BB); } else { // Predicate the 'true' block after removing its branch. CvtBBI->NonPredSize -= TII->RemoveBranch(*CvtBBI->BB); diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index 3b28e6a..7793e96 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -77,7 +77,7 @@ bool LiveRangeEdit::anyRematerializable(AliasAnalysis *aa) { /// OrigIdx are also available with the same value at UseIdx. bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, - SlotIndex UseIdx) { + SlotIndex UseIdx) const { OrigIdx = OrigIdx.getRegSlot(true); UseIdx = UseIdx.getRegSlot(true); for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { diff --git a/lib/CodeGen/LocalStackSlotAllocation.cpp b/lib/CodeGen/LocalStackSlotAllocation.cpp index 352ef94..26a1176 100644 --- a/lib/CodeGen/LocalStackSlotAllocation.cpp +++ b/lib/CodeGen/LocalStackSlotAllocation.cpp @@ -46,13 +46,16 @@ namespace { class FrameRef { MachineBasicBlock::iterator MI; // Instr referencing the frame int64_t LocalOffset; // Local offset of the frame idx referenced + int FrameIdx; // The frame index public: - FrameRef(MachineBasicBlock::iterator I, int64_t Offset) : - MI(I), LocalOffset(Offset) {} + FrameRef(MachineBasicBlock::iterator I, int64_t Offset, int Idx) : + MI(I), LocalOffset(Offset), FrameIdx(Idx) {} bool operator<(const FrameRef &RHS) const { return LocalOffset < RHS.LocalOffset; } - MachineBasicBlock::iterator getMachineInstr() { return MI; } + MachineBasicBlock::iterator getMachineInstr() const { return MI; } + int64_t getLocalOffset() const { return LocalOffset; } + int getFrameIndex() const { return FrameIdx; } }; class LocalStackSlotPass: public MachineFunctionPass { @@ -194,22 +197,15 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) { } static inline bool -lookupCandidateBaseReg(const SmallVector<std::pair<unsigned, int64_t>, 8> &Regs, - std::pair<unsigned, int64_t> &RegOffset, +lookupCandidateBaseReg(int64_t BaseOffset, int64_t FrameSizeAdjust, int64_t LocalFrameOffset, const MachineInstr *MI, const TargetRegisterInfo *TRI) { - unsigned e = Regs.size(); - for (unsigned i = 0; i < e; ++i) { - RegOffset = Regs[i]; - // Check if the relative offset from the where the base register references - // to the target address is in range for the instruction. - int64_t Offset = FrameSizeAdjust + LocalFrameOffset - RegOffset.second; - if (TRI->isFrameOffsetLegal(MI, Offset)) - return true; - } - return false; + // Check if the relative offset from the where the base register references + // to the target address is in range for the instruction. + int64_t Offset = FrameSizeAdjust + LocalFrameOffset - BaseOffset; + return TRI->isFrameOffsetLegal(MI, Offset); } bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) { @@ -233,9 +229,6 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) { // choose the first one). SmallVector<FrameRef, 64> FrameReferenceInsns; - // A base register definition is a register + offset pair. - SmallVector<std::pair<unsigned, int64_t>, 8> BaseRegisters; - for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { MachineInstr *MI = I; @@ -258,8 +251,12 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) { // Don't try this with values not in the local block. if (!MFI->isObjectPreAllocated(MI->getOperand(i).getIndex())) break; + int Idx = MI->getOperand(i).getIndex(); + int64_t LocalOffset = LocalOffsets[Idx]; + if (!TRI->needsFrameBaseReg(MI, LocalOffset)) + break; FrameReferenceInsns. - push_back(FrameRef(MI, LocalOffsets[MI->getOperand(i).getIndex()])); + push_back(FrameRef(MI, LocalOffset, Idx)); break; } } @@ -271,86 +268,106 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) { MachineBasicBlock *Entry = Fn.begin(); + unsigned BaseReg = 0; + int64_t BaseOffset = 0; + // Loop through the frame references and allocate for them as necessary. for (int ref = 0, e = FrameReferenceInsns.size(); ref < e ; ++ref) { - MachineBasicBlock::iterator I = - FrameReferenceInsns[ref].getMachineInstr(); + FrameRef &FR = FrameReferenceInsns[ref]; + MachineBasicBlock::iterator I = FR.getMachineInstr(); MachineInstr *MI = I; - for (unsigned idx = 0, e = MI->getNumOperands(); idx != e; ++idx) { - // Consider replacing all frame index operands that reference - // an object allocated in the local block. - if (MI->getOperand(idx).isFI()) { - int FrameIdx = MI->getOperand(idx).getIndex(); - - assert(MFI->isObjectPreAllocated(FrameIdx) && - "Only pre-allocated locals expected!"); - - DEBUG(dbgs() << "Considering: " << *MI); - if (TRI->needsFrameBaseReg(MI, LocalOffsets[FrameIdx])) { - unsigned BaseReg = 0; - int64_t Offset = 0; - int64_t FrameSizeAdjust = - StackGrowsDown ? MFI->getLocalFrameSize() : 0; - - DEBUG(dbgs() << " Replacing FI in: " << *MI); - - // If we have a suitable base register available, use it; otherwise - // create a new one. Note that any offset encoded in the - // instruction itself will be taken into account by the target, - // so we don't have to adjust for it here when reusing a base - // register. - std::pair<unsigned, int64_t> RegOffset; - if (lookupCandidateBaseReg(BaseRegisters, RegOffset, - FrameSizeAdjust, - LocalOffsets[FrameIdx], - MI, TRI)) { - DEBUG(dbgs() << " Reusing base register " << - RegOffset.first << "\n"); - // We found a register to reuse. - BaseReg = RegOffset.first; - Offset = FrameSizeAdjust + LocalOffsets[FrameIdx] - - RegOffset.second; - } else { - // No previously defined register was in range, so create a - // new one. - int64_t InstrOffset = TRI->getFrameIndexInstrOffset(MI, idx); - const MachineFunction *MF = MI->getParent()->getParent(); - const TargetRegisterClass *RC = TRI->getPointerRegClass(*MF); - BaseReg = Fn.getRegInfo().createVirtualRegister(RC); - - DEBUG(dbgs() << " Materializing base register " << BaseReg << - " at frame local offset " << - LocalOffsets[FrameIdx] + InstrOffset << "\n"); - - // Tell the target to insert the instruction to initialize - // the base register. - // MachineBasicBlock::iterator InsertionPt = Entry->begin(); - TRI->materializeFrameBaseRegister(Entry, BaseReg, FrameIdx, - InstrOffset); - - // The base register already includes any offset specified - // by the instruction, so account for that so it doesn't get - // applied twice. - Offset = -InstrOffset; - - int64_t BaseOffset = FrameSizeAdjust + LocalOffsets[FrameIdx] + - InstrOffset; - BaseRegisters.push_back( - std::pair<unsigned, int64_t>(BaseReg, BaseOffset)); - ++NumBaseRegisters; - UsedBaseReg = true; - } - assert(BaseReg != 0 && "Unable to allocate virtual base register!"); - - // Modify the instruction to use the new base register rather - // than the frame index operand. - TRI->resolveFrameIndex(I, BaseReg, Offset); - DEBUG(dbgs() << "Resolved: " << *MI); - - ++NumReplacements; - } + int64_t LocalOffset = FR.getLocalOffset(); + int FrameIdx = FR.getFrameIndex(); + assert(MFI->isObjectPreAllocated(FrameIdx) && + "Only pre-allocated locals expected!"); + + DEBUG(dbgs() << "Considering: " << *MI); + + unsigned idx = 0; + for (unsigned f = MI->getNumOperands(); idx != f; ++idx) { + if (!MI->getOperand(idx).isFI()) + continue; + + if (FrameIdx == I->getOperand(idx).getIndex()) + break; + } + + assert(idx < MI->getNumOperands() && "Cannot find FI operand"); + + int64_t Offset = 0; + int64_t FrameSizeAdjust = StackGrowsDown ? MFI->getLocalFrameSize() : 0; + + DEBUG(dbgs() << " Replacing FI in: " << *MI); + + // If we have a suitable base register available, use it; otherwise + // create a new one. Note that any offset encoded in the + // instruction itself will be taken into account by the target, + // so we don't have to adjust for it here when reusing a base + // register. + if (UsedBaseReg && lookupCandidateBaseReg(BaseOffset, FrameSizeAdjust, + LocalOffset, MI, TRI)) { + DEBUG(dbgs() << " Reusing base register " << BaseReg << "\n"); + // We found a register to reuse. + Offset = FrameSizeAdjust + LocalOffset - BaseOffset; + } else { + // No previously defined register was in range, so create a // new one. + + int64_t InstrOffset = TRI->getFrameIndexInstrOffset(MI, idx); + + int64_t PrevBaseOffset = BaseOffset; + BaseOffset = FrameSizeAdjust + LocalOffset + InstrOffset; + + // We'd like to avoid creating single-use virtual base registers. + // Because the FrameRefs are in sorted order, and we've already + // processed all FrameRefs before this one, just check whether or not + // the next FrameRef will be able to reuse this new register. If not, + // then don't bother creating it. + bool CanReuse = false; + for (int refn = ref + 1; refn < e; ++refn) { + FrameRef &FRN = FrameReferenceInsns[refn]; + MachineBasicBlock::iterator J = FRN.getMachineInstr(); + MachineInstr *MIN = J; + + CanReuse = lookupCandidateBaseReg(BaseOffset, FrameSizeAdjust, + FRN.getLocalOffset(), MIN, TRI); + break; } + + if (!CanReuse) { + BaseOffset = PrevBaseOffset; + continue; + } + + const MachineFunction *MF = MI->getParent()->getParent(); + const TargetRegisterClass *RC = TRI->getPointerRegClass(*MF); + BaseReg = Fn.getRegInfo().createVirtualRegister(RC); + + DEBUG(dbgs() << " Materializing base register " << BaseReg << + " at frame local offset " << LocalOffset + InstrOffset << "\n"); + + // Tell the target to insert the instruction to initialize + // the base register. + // MachineBasicBlock::iterator InsertionPt = Entry->begin(); + TRI->materializeFrameBaseRegister(Entry, BaseReg, FrameIdx, + InstrOffset); + + // The base register already includes any offset specified + // by the instruction, so account for that so it doesn't get + // applied twice. + Offset = -InstrOffset; + + ++NumBaseRegisters; + UsedBaseReg = true; } + assert(BaseReg != 0 && "Unable to allocate virtual base register!"); + + // Modify the instruction to use the new base register rather + // than the frame index operand. + TRI->resolveFrameIndex(I, BaseReg, Offset); + DEBUG(dbgs() << "Resolved: " << *MI); + + ++NumReplacements; } + return UsedBaseReg; } diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 898e165..78e9950 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -37,7 +37,7 @@ using namespace llvm; MachineBasicBlock::MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb) : BB(bb), Number(-1), xParent(&mf), Alignment(0), IsLandingPad(false), - AddressTaken(false) { + AddressTaken(false), CachedMCSymbol(NULL) { Insts.Parent = this; } @@ -48,12 +48,16 @@ MachineBasicBlock::~MachineBasicBlock() { /// getSymbol - Return the MCSymbol for this basic block. /// MCSymbol *MachineBasicBlock::getSymbol() const { - const MachineFunction *MF = getParent(); - MCContext &Ctx = MF->getContext(); - const char *Prefix = Ctx.getAsmInfo().getPrivateGlobalPrefix(); - return Ctx.GetOrCreateSymbol(Twine(Prefix) + "BB" + - Twine(MF->getFunctionNumber()) + "_" + - Twine(getNumber())); + if (!CachedMCSymbol) { + const MachineFunction *MF = getParent(); + MCContext &Ctx = MF->getContext(); + const char *Prefix = Ctx.getAsmInfo().getPrivateGlobalPrefix(); + CachedMCSymbol = Ctx.GetOrCreateSymbol(Twine(Prefix) + "BB" + + Twine(MF->getFunctionNumber()) + + "_" + Twine(getNumber())); + } + + return CachedMCSymbol; } diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 3b09c6b..bfba503 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -39,6 +39,7 @@ #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/Support/Allocator.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" @@ -52,6 +53,11 @@ STATISTIC(CondBranchTakenFreq, STATISTIC(UncondBranchTakenFreq, "Potential frequency of taking unconditional branches"); +static cl::opt<unsigned> AlignAllBlock("align-all-blocks", + cl::desc("Force the alignment of all " + "blocks in the function."), + cl::init(0), cl::Hidden); + namespace { class BlockChain; /// \brief Type for our function-wide basic block -> block chain mapping. @@ -1061,7 +1067,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { } // Align this block if the layout predecessor's edge into this block is - // cold relative to the block. When this is true, othe predecessors make up + // cold relative to the block. When this is true, other predecessors make up // all of the hot entries into the block and thus alignment is likely to be // important. BranchProbability LayoutProb = MBPI->getEdgeProbability(LayoutPred, *BI); @@ -1088,6 +1094,12 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) { BlockToChain.clear(); ChainAllocator.DestroyAll(); + if (AlignAllBlock) + // Align all of the blocks in the function to a specific alignment. + for (MachineFunction::iterator FI = F.begin(), FE = F.end(); + FI != FE; ++FI) + FI->setAlignment(AlignAllBlock); + // We always return true as we have no way to track whether the final order // differs from the original order. return true; diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp index 0ea9ae0..8af9d05 100644 --- a/lib/CodeGen/MachineModuleInfo.cpp +++ b/lib/CodeGen/MachineModuleInfo.cpp @@ -326,8 +326,7 @@ void MachineModuleInfo::AnalyzeModule(const Module &M) { if (!GV || !GV->hasInitializer()) return; // Should be an array of 'i8*'. - const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer()); - if (InitList == 0) return; + const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer()); for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) if (const Function *F = diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp index 1af00e8..68372f6 100644 --- a/lib/CodeGen/MachineRegisterInfo.cpp +++ b/lib/CodeGen/MachineRegisterInfo.cpp @@ -15,6 +15,8 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Support/raw_os_ostream.h" + using namespace llvm; MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) @@ -106,13 +108,59 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){ /// clearVirtRegs - Remove all virtual registers (after physreg assignment). void MachineRegisterInfo::clearVirtRegs() { #ifndef NDEBUG - for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) - assert(VRegInfo[TargetRegisterInfo::index2VirtReg(i)].second == 0 && - "Vreg use list non-empty still?"); + for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) { + unsigned Reg = TargetRegisterInfo::index2VirtReg(i); + if (!VRegInfo[Reg].second) + continue; + verifyUseList(Reg); + llvm_unreachable("Remaining virtual register operands"); + } #endif VRegInfo.clear(); } +void MachineRegisterInfo::verifyUseList(unsigned Reg) const { +#ifndef NDEBUG + bool Valid = true; + for (reg_iterator I = reg_begin(Reg), E = reg_end(); I != E; ++I) { + MachineOperand *MO = &I.getOperand(); + MachineInstr *MI = MO->getParent(); + if (!MI) { + errs() << PrintReg(Reg, TRI) << " use list MachineOperand " << MO + << " has no parent instruction.\n"; + Valid = false; + } + MachineOperand *MO0 = &MI->getOperand(0); + unsigned NumOps = MI->getNumOperands(); + if (!(MO >= MO0 && MO < MO0+NumOps)) { + errs() << PrintReg(Reg, TRI) << " use list MachineOperand " << MO + << " doesn't belong to parent MI: " << *MI; + Valid = false; + } + if (!MO->isReg()) { + errs() << PrintReg(Reg, TRI) << " MachineOperand " << MO << ": " << *MO + << " is not a register\n"; + Valid = false; + } + if (MO->getReg() != Reg) { + errs() << PrintReg(Reg, TRI) << " use-list MachineOperand " << MO << ": " + << *MO << " is the wrong register\n"; + Valid = false; + } + } + assert(Valid && "Invalid use list"); +#endif +} + +void MachineRegisterInfo::verifyUseLists() const { +#ifndef NDEBUG + for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) + verifyUseList(TargetRegisterInfo::index2VirtReg(i)); + for (unsigned i = 1, e = TRI->getNumRegs(); i != e; ++i) + verifyUseList(i); +#endif +} + /// Add MO to the linked list of operands for its register. void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) { assert(!MO->isOnRegUseList() && "Already on list"); diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index c872355..fff6b2b 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -51,7 +51,11 @@ static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, static bool ViewMISchedDAGs = false; #endif // NDEBUG -// Experimental heuristics +// FIXME: remove this flag after initial testing. It should always be a good +// thing. +static cl::opt<bool> EnableCopyConstrain("misched-vcopy", cl::Hidden, + cl::desc("Constrain vreg copies."), cl::init(true)); + static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, cl::desc("Enable load clustering."), cl::init(true)); @@ -305,7 +309,7 @@ void MachineScheduler::print(raw_ostream &O, const Module* m) const { #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void ReadyQueue::dump() { - dbgs() << Name << ": "; + dbgs() << " " << Name << ": "; for (unsigned i = 0, e = Queue.size(); i < e; ++i) dbgs() << Queue[i]->NodeNum << " "; dbgs() << "\n"; @@ -323,6 +327,10 @@ ScheduleDAGMI::~ScheduleDAGMI() { delete SchedImpl; } +bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { + return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); +} + bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { if (SuccSU != &ExitSU) { // Do not use WillCreateCycle, it assumes SD scheduling. @@ -404,6 +412,8 @@ void ScheduleDAGMI::releasePredecessors(SUnit *SU) { } } +/// This is normally called from the main scheduler loop but may also be invoked +/// by the scheduling strategy to perform additional code motion. void ScheduleDAGMI::moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos) { // Advance RegionBegin if the first instruction moves down. @@ -505,6 +515,14 @@ updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { if ((int)NewMaxPressure[ID] > MaxUnits) MaxUnits = NewMaxPressure[ID]; } + DEBUG( + for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) { + unsigned Limit = TRI->getRegPressureSetLimit(i); + if (NewMaxPressure[i] > Limit ) { + dbgs() << " " << TRI->getRegPressureSetName(i) << ": " + << NewMaxPressure[i] << " > " << Limit << "\n"; + } + }); } /// schedule - Called back from MachineScheduler::runOnMachineFunction @@ -905,6 +923,184 @@ void MacroFusion::apply(ScheduleDAGMI *DAG) { } //===----------------------------------------------------------------------===// +// CopyConstrain - DAG post-processing to encourage copy elimination. +//===----------------------------------------------------------------------===// + +namespace { +/// \brief Post-process the DAG to create weak edges from all uses of a copy to +/// the one use that defines the copy's source vreg, most likely an induction +/// variable increment. +class CopyConstrain : public ScheduleDAGMutation { + // Transient state. + SlotIndex RegionBeginIdx; + // RegionEndIdx is the slot index of the last non-debug instruction in the + // scheduling region. So we may have RegionBeginIdx == RegionEndIdx. + SlotIndex RegionEndIdx; +public: + CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {} + + virtual void apply(ScheduleDAGMI *DAG); + +protected: + void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG); +}; +} // anonymous + +/// constrainLocalCopy handles two possibilities: +/// 1) Local src: +/// I0: = dst +/// I1: src = ... +/// I2: = dst +/// I3: dst = src (copy) +/// (create pred->succ edges I0->I1, I2->I1) +/// +/// 2) Local copy: +/// I0: dst = src (copy) +/// I1: = dst +/// I2: src = ... +/// I3: = dst +/// (create pred->succ edges I1->I2, I3->I2) +/// +/// Although the MachineScheduler is currently constrained to single blocks, +/// this algorithm should handle extended blocks. An EBB is a set of +/// contiguously numbered blocks such that the previous block in the EBB is +/// always the single predecessor. +void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) { + LiveIntervals *LIS = DAG->getLIS(); + MachineInstr *Copy = CopySU->getInstr(); + + // Check for pure vreg copies. + unsigned SrcReg = Copy->getOperand(1).getReg(); + if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) + return; + + unsigned DstReg = Copy->getOperand(0).getReg(); + if (!TargetRegisterInfo::isVirtualRegister(DstReg)) + return; + + // Check if either the dest or source is local. If it's live across a back + // edge, it's not local. Note that if both vregs are live across the back + // edge, we cannot successfully contrain the copy without cyclic scheduling. + unsigned LocalReg = DstReg; + unsigned GlobalReg = SrcReg; + LiveInterval *LocalLI = &LIS->getInterval(LocalReg); + if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) { + LocalReg = SrcReg; + GlobalReg = DstReg; + LocalLI = &LIS->getInterval(LocalReg); + if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) + return; + } + LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg); + + // Find the global segment after the start of the local LI. + LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex()); + // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a + // local live range. We could create edges from other global uses to the local + // start, but the coalescer should have already eliminated these cases, so + // don't bother dealing with it. + if (GlobalSegment == GlobalLI->end()) + return; + + // If GlobalSegment is killed at the LocalLI->start, the call to find() + // returned the next global segment. But if GlobalSegment overlaps with + // LocalLI->start, then advance to the next segement. If a hole in GlobalLI + // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole. + if (GlobalSegment->contains(LocalLI->beginIndex())) + ++GlobalSegment; + + if (GlobalSegment == GlobalLI->end()) + return; + + // Check if GlobalLI contains a hole in the vicinity of LocalLI. + if (GlobalSegment != GlobalLI->begin()) { + // Two address defs have no hole. + if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, + GlobalSegment->start)) { + return; + } + // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise + // it would be a disconnected component in the live range. + assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && + "Disconnected LRG within the scheduling region."); + } + MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); + if (!GlobalDef) + return; + + SUnit *GlobalSU = DAG->getSUnit(GlobalDef); + if (!GlobalSU) + return; + + // GlobalDef is the bottom of the GlobalLI hole. Open the hole by + // constraining the uses of the last local def to precede GlobalDef. + SmallVector<SUnit*,8> LocalUses; + const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex()); + MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def); + SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef); + for (SUnit::const_succ_iterator + I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end(); + I != E; ++I) { + if (I->getKind() != SDep::Data || I->getReg() != LocalReg) + continue; + if (I->getSUnit() == GlobalSU) + continue; + if (!DAG->canAddEdge(GlobalSU, I->getSUnit())) + return; + LocalUses.push_back(I->getSUnit()); + } + // Open the top of the GlobalLI hole by constraining any earlier global uses + // to precede the start of LocalLI. + SmallVector<SUnit*,8> GlobalUses; + MachineInstr *FirstLocalDef = + LIS->getInstructionFromIndex(LocalLI->beginIndex()); + SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef); + for (SUnit::const_pred_iterator + I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) { + if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg) + continue; + if (I->getSUnit() == FirstLocalSU) + continue; + if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit())) + return; + GlobalUses.push_back(I->getSUnit()); + } + DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n"); + // Add the weak edges. + for (SmallVectorImpl<SUnit*>::const_iterator + I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) { + DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU(" + << GlobalSU->NodeNum << ")\n"); + DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak)); + } + for (SmallVectorImpl<SUnit*>::const_iterator + I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) { + DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU(" + << FirstLocalSU->NodeNum << ")\n"); + DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak)); + } +} + +/// \brief Callback from DAG postProcessing to create weak edges to encourage +/// copy elimination. +void CopyConstrain::apply(ScheduleDAGMI *DAG) { + MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end()); + if (FirstPos == DAG->end()) + return; + RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos); + RegionEndIdx = DAG->getLIS()->getInstructionIndex( + &*priorNonDebug(DAG->end(), DAG->begin())); + + for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { + SUnit *SU = &DAG->SUnits[Idx]; + if (!SU->getInstr()->isCopy()) + continue; + + constrainLocalCopy(SU, DAG); + } +} + +//===----------------------------------------------------------------------===// // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. //===----------------------------------------------------------------------===// @@ -916,7 +1112,7 @@ public: /// Represent the type of SchedCandidate found within a single queue. /// pickNodeBidirectional depends on these listed by decreasing priority. enum CandReason { - NoCand, SingleExcess, SingleCritical, Cluster, + NoCand, PhysRegCopy, SingleExcess, SingleCritical, Cluster, Weak, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, NodeOrder}; @@ -1191,8 +1387,10 @@ protected: const RegPressureTracker &RPTracker, SchedCandidate &Candidate); + void reschedulePhysRegCopies(SUnit *SU, bool isTop); + #ifndef NDEBUG - void traceCandidate(const SchedCandidate &Cand, const SchedBoundary &Zone); + void traceCandidate(const SchedCandidate &Cand); #endif }; } // namespace @@ -1243,8 +1441,6 @@ void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { Top.init(DAG, SchedModel, &Rem); Bot.init(DAG, SchedModel, &Rem); - DAG->computeDFSResult(); - // Initialize resource counts. // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or @@ -1341,6 +1537,8 @@ void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { for (ReadyQueue::iterator I = Available.begin(), E = Available.end(); I != E; ++I) { unsigned L = getUnscheduledLatency(*I); + DEBUG(dbgs() << " " << Available.getName() + << " RemLatency SU(" << (*I)->NodeNum << ") " << L << '\n'); if (L > RemLatency) RemLatency = L; } @@ -1351,10 +1549,13 @@ void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { RemLatency = L; } unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow(); + DEBUG(dbgs() << " " << Available.getName() + << " ExpectedLatency " << ExpectedLatency + << " CP Limit " << CriticalPathLimit << '\n'); if (RemLatency + ExpectedLatency >= CriticalPathLimit && RemLatency > Rem->getMaxRemainingCount(SchedModel)) { Policy.ReduceLatency = true; - DEBUG(dbgs() << "Increase ILP: " << Available.getName() << '\n'); + DEBUG(dbgs() << " Increase ILP: " << Available.getName() << '\n'); } } @@ -1403,8 +1604,8 @@ void ConvergingScheduler::SchedBoundary::bumpCycle() { CheckPending = true; IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); - DEBUG(dbgs() << " *** " << Available.getName() << " cycle " - << CurrCycle << '\n'); + DEBUG(dbgs() << " " << Available.getName() + << " Cycle: " << CurrCycle << '\n'); } /// Add the given processor resource to this scheduled zone. @@ -1571,7 +1772,8 @@ void ConvergingScheduler::balanceZones( if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount) > (int)SchedModel->getLatencyFactor()) { CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; - DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce " + DEBUG(dbgs() << " Balance " << CriticalZone.Available.getName() + << " reduce " << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name << '\n'); } @@ -1582,7 +1784,8 @@ void ConvergingScheduler::balanceZones( if ((int)(OppositeZone.ExpectedCount - OppositeCount) > (int)SchedModel->getLatencyFactor()) { OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; - DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand " + DEBUG(dbgs() << " Balance " << OppositeZone.Available.getName() + << " demand " << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name << '\n'); } @@ -1606,7 +1809,7 @@ void ConvergingScheduler::checkResourceLimits( if (Top.CritResIdx != Rem.CritResIdx) { TopCand.Policy.ReduceResIdx = Top.CritResIdx; BotCand.Policy.ReduceResIdx = Bot.CritResIdx; - DEBUG(dbgs() << "Reduce scheduled " + DEBUG(dbgs() << " Reduce scheduled " << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); } return; @@ -1623,7 +1826,7 @@ void ConvergingScheduler::checkResourceLimits( && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { TopCand.Policy.ReduceLatency = true; BotCand.Policy.ReduceLatency = true; - DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency + DEBUG(dbgs() << " Reduce scheduled latency " << Top.ExpectedLatency << " + " << Bot.ExpectedLatency << '\n'); } return; @@ -1662,7 +1865,7 @@ initResourceDelta(const ScheduleDAGMI *DAG, } /// Return true if this heuristic determines order. -static bool tryLess(unsigned TryVal, unsigned CandVal, +static bool tryLess(int TryVal, int CandVal, ConvergingScheduler::SchedCandidate &TryCand, ConvergingScheduler::SchedCandidate &Cand, ConvergingScheduler::CandReason Reason) { @@ -1678,7 +1881,7 @@ static bool tryLess(unsigned TryVal, unsigned CandVal, return false; } -static bool tryGreater(unsigned TryVal, unsigned CandVal, +static bool tryGreater(int TryVal, int CandVal, ConvergingScheduler::SchedCandidate &TryCand, ConvergingScheduler::SchedCandidate &Cand, ConvergingScheduler::CandReason Reason) { @@ -1698,6 +1901,34 @@ static unsigned getWeakLeft(const SUnit *SU, bool isTop) { return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; } +/// Minimize physical register live ranges. Regalloc wants them adjacent to +/// their physreg def/use. +/// +/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf +/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled +/// with the operation that produces or consumes the physreg. We'll do this when +/// regalloc has support for parallel copies. +static int biasPhysRegCopy(const SUnit *SU, bool isTop) { + const MachineInstr *MI = SU->getInstr(); + if (!MI->isCopy()) + return 0; + + unsigned ScheduledOper = isTop ? 1 : 0; + unsigned UnscheduledOper = isTop ? 0 : 1; + // If we have already scheduled the physreg produce/consumer, immediately + // schedule the copy. + if (TargetRegisterInfo::isPhysicalRegister( + MI->getOperand(ScheduledOper).getReg())) + return 1; + // If the physreg is at the boundary, defer it. Otherwise schedule it + // immediately to free the dependent. We can hoist the copy later. + bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft; + if (TargetRegisterInfo::isPhysicalRegister( + MI->getOperand(UnscheduledOper).getReg())) + return AtBoundary ? -1 : 1; + return 0; +} + /// Apply a set of heursitics to a new candidate. Heuristics are currently /// hierarchical. This may be more efficient than a graduated cost model because /// we don't need to evaluate all aspects of the model for each node in the @@ -1725,6 +1956,12 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, TryCand.Reason = NodeOrder; return; } + + if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()), + biasPhysRegCopy(Cand.SU, Zone.isTop()), + TryCand, Cand, PhysRegCopy)) + return; + // Avoid exceeding the target's limit. if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) @@ -1751,12 +1988,16 @@ void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, TryCand, Cand, Cluster)) return; - // Currently, weak edges are for clustering, so we hard-code that reason. - // However, deferring the current TryCand will not change Cand's reason. + + // Weak edges are for clustering and other constraints. + // + // Deferring TryCand here does not change Cand's reason. This is good in the + // sense that a bad candidate shouldn't affect a previous candidate's + // goodness, but bad in that it is assymetric and depends on queue order. CandReason OrigReason = Cand.Reason; if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), getWeakLeft(Cand.SU, Zone.isTop()), - TryCand, Cand, Cluster)) { + TryCand, Cand, Weak)) { Cand.Reason = OrigReason; return; } @@ -1827,20 +2068,20 @@ static bool compareRPDelta(const RegPressureDelta &LHS, // Avoid increasing the max critical pressure in the scheduled region. if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { - DEBUG(dbgs() << "RP excess top - bot: " + DEBUG(dbgs() << " RP excess top - bot: " << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; } // Avoid increasing the max critical pressure in the scheduled region. if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { - DEBUG(dbgs() << "RP critical top - bot: " + DEBUG(dbgs() << " RP critical top - bot: " << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) << '\n'); return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; } // Avoid increasing the max pressure of the entire region. if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { - DEBUG(dbgs() << "RP current top - bot: " + DEBUG(dbgs() << " RP current top - bot: " << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) << '\n'); return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; @@ -1853,9 +2094,11 @@ const char *ConvergingScheduler::getReasonStr( ConvergingScheduler::CandReason Reason) { switch (Reason) { case NoCand: return "NOCAND "; + case PhysRegCopy: return "PREG-COPY"; case SingleExcess: return "REG-EXCESS"; case SingleCritical: return "REG-CRIT "; case Cluster: return "CLUSTER "; + case Weak: return "WEAK "; case SingleMax: return "REG-MAX "; case MultiPressure: return "REG-MULTI "; case ResourceReduce: return "RES-REDUCE"; @@ -1870,9 +2113,7 @@ const char *ConvergingScheduler::getReasonStr( llvm_unreachable("Unknown reason!"); } -void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand, - const SchedBoundary &Zone) { - const char *Label = getReasonStr(Cand.Reason); +void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) { PressureElement P; unsigned ResIdx = 0; unsigned Latency = 0; @@ -1907,21 +2148,21 @@ void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand, Latency = Cand.SU->getDepth(); break; } - dbgs() << Label << " " << Zone.Available.getName() << " "; + dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); if (P.isValid()) - dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease - << " "; + dbgs() << " " << TRI->getRegPressureSetName(P.PSetID) + << ":" << P.UnitIncrease << " "; else - dbgs() << " "; + dbgs() << " "; if (ResIdx) - dbgs() << SchedModel->getProcResource(ResIdx)->Name << " "; + dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " "; else - dbgs() << " "; + dbgs() << " "; if (Latency) - dbgs() << Latency << " cycles "; + dbgs() << " " << Latency << " cycles "; else - dbgs() << " "; - Cand.SU->dump(DAG); + dbgs() << " "; + dbgs() << '\n'; } #endif @@ -1950,15 +2191,14 @@ void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, if (TryCand.ResDelta == SchedResourceDelta()) TryCand.initResourceDelta(DAG, SchedModel); Cand.setBest(TryCand); - DEBUG(traceCandidate(Cand, Zone)); + DEBUG(traceCandidate(Cand)); } } } static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, bool IsTop) { - DEBUG(dbgs() << "Pick " << (IsTop ? "top" : "bot") - << " SU(" << Cand.SU->NodeNum << ") " + DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ") << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); } @@ -1968,10 +2208,12 @@ SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { // efficient, but also provides the best heuristics for CriticalPSets. if (SUnit *SU = Bot.pickOnlyChoice()) { IsTopNode = false; + DEBUG(dbgs() << "Pick Top NOCAND\n"); return SU; } if (SUnit *SU = Top.pickOnlyChoice()) { IsTopNode = true; + DEBUG(dbgs() << "Pick Bot NOCAND\n"); return SU; } CandPolicy NoPolicy; @@ -2069,24 +2311,53 @@ SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { if (SU->isBottomReady()) Bot.removeReady(SU); - DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") - << " Scheduling Instruction in cycle " - << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; - SU->dump(DAG)); + DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr()); return SU; } +void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) { + + MachineBasicBlock::iterator InsertPos = SU->getInstr(); + if (!isTop) + ++InsertPos; + SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs; + + // Find already scheduled copies with a single physreg dependence and move + // them just above the scheduled instruction. + for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end(); + I != E; ++I) { + if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg())) + continue; + SUnit *DepSU = I->getSUnit(); + if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1) + continue; + MachineInstr *Copy = DepSU->getInstr(); + if (!Copy->isCopy()) + continue; + DEBUG(dbgs() << " Rescheduling physreg copy "; + I->getSUnit()->dump(DAG)); + DAG->moveInstruction(Copy, InsertPos); + } +} + /// Update the scheduler's state after scheduling a node. This is the same node /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update /// it's state based on the current cycle before MachineSchedStrategy does. +/// +/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling +/// them here. See comments in biasPhysRegCopy. void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { if (IsTopNode) { SU->TopReadyCycle = Top.CurrCycle; Top.bumpNode(SU); + if (SU->hasPhysRegUses) + reschedulePhysRegCopies(SU, true); } else { SU->BotReadyCycle = Bot.CurrCycle; Bot.bumpNode(SU); + if (SU->hasPhysRegDefs) + reschedulePhysRegCopies(SU, false); } } @@ -2097,6 +2368,12 @@ static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { "-misched-topdown incompatible with -misched-bottomup"); ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); // Register DAG post-processors. + // + // FIXME: extend the mutation API to allow earlier mutations to instantiate + // data and pass it to later mutations. Have a single mutation that gathers + // the interesting nodes in one pass. + if (EnableCopyConstrain) + DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI)); if (EnableLoadCluster) DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); if (EnableMacroFusion) @@ -2186,12 +2463,12 @@ public: SUnit *SU = ReadyQ.back(); ReadyQ.pop_back(); IsTopNode = false; - DEBUG(dbgs() << "*** Scheduling " << "SU(" << SU->NodeNum << "): " - << *SU->getInstr() + DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") " << " ILP: " << DAG->getDFSResult()->getILP(SU) << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" << DAG->getDFSResult()->getSubtreeLevel( - DAG->getDFSResult()->getSubtreeID(SU)) << '\n'); + DAG->getDFSResult()->getSubtreeID(SU)) << '\n' + << "Scheduling " << *SU->getInstr()); return SU; } diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp index 5bf0176..00f702c 100644 --- a/lib/CodeGen/MachineTraceMetrics.cpp +++ b/lib/CodeGen/MachineTraceMetrics.cpp @@ -18,6 +18,7 @@ #include "llvm/CodeGen/Passes.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -57,6 +58,8 @@ bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) { MF->getTarget().getSubtarget<TargetSubtargetInfo>(); SchedModel.init(*ST.getSchedModel(), &ST, TII); BlockInfo.resize(MF->getNumBlockIDs()); + ProcResourceCycles.resize(MF->getNumBlockIDs() * + SchedModel.getNumProcResourceKinds()); return false; } @@ -85,9 +88,13 @@ MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) { return FBI; // Compute resource usage in the block. - // FIXME: Compute per-functional unit counts. FBI->HasCalls = false; unsigned InstrCount = 0; + + // Add up per-processor resource cycles as well. + unsigned PRKinds = SchedModel.getNumProcResourceKinds(); + SmallVector<unsigned, 32> PRCycles(PRKinds); + for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); I != E; ++I) { const MachineInstr *MI = I; @@ -96,11 +103,43 @@ MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) { ++InstrCount; if (MI->isCall()) FBI->HasCalls = true; + + // Count processor resources used. + if (!SchedModel.hasInstrSchedModel()) + continue; + const MCSchedClassDesc *SC = SchedModel.resolveSchedClass(MI); + if (!SC->isValid()) + continue; + + for (TargetSchedModel::ProcResIter + PI = SchedModel.getWriteProcResBegin(SC), + PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { + assert(PI->ProcResourceIdx < PRKinds && "Bad processor resource kind"); + PRCycles[PI->ProcResourceIdx] += PI->Cycles; + } } FBI->InstrCount = InstrCount; + + // Scale the resource cycles so they are comparable. + unsigned PROffset = MBB->getNumber() * PRKinds; + for (unsigned K = 0; K != PRKinds; ++K) + ProcResourceCycles[PROffset + K] = + PRCycles[K] * SchedModel.getResourceFactor(K); + return FBI; } +ArrayRef<unsigned> +MachineTraceMetrics::getProcResourceCycles(unsigned MBBNum) const { + assert(BlockInfo[MBBNum].hasResources() && + "getResources() must be called before getProcResourceCycles()"); + unsigned PRKinds = SchedModel.getNumProcResourceKinds(); + assert((MBBNum+1) * PRKinds <= ProcResourceCycles.size()); + return ArrayRef<unsigned>(ProcResourceCycles.data() + MBBNum * PRKinds, + PRKinds); +} + + //===----------------------------------------------------------------------===// // Ensemble utility functions //===----------------------------------------------------------------------===// @@ -108,6 +147,9 @@ MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) { MachineTraceMetrics::Ensemble::Ensemble(MachineTraceMetrics *ct) : MTM(*ct) { BlockInfo.resize(MTM.BlockInfo.size()); + unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds(); + ProcResourceDepths.resize(MTM.BlockInfo.size() * PRKinds); + ProcResourceHeights.resize(MTM.BlockInfo.size() * PRKinds); } // Virtual destructor serves as an anchor. @@ -123,21 +165,32 @@ MachineTraceMetrics::Ensemble::getLoopFor(const MachineBasicBlock *MBB) const { void MachineTraceMetrics::Ensemble:: computeDepthResources(const MachineBasicBlock *MBB) { TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()]; + unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds(); + unsigned PROffset = MBB->getNumber() * PRKinds; // Compute resources from trace above. The top block is simple. if (!TBI->Pred) { TBI->InstrDepth = 0; TBI->Head = MBB->getNumber(); + std::fill(ProcResourceDepths.begin() + PROffset, + ProcResourceDepths.begin() + PROffset + PRKinds, 0); return; } // Compute from the block above. A post-order traversal ensures the // predecessor is always computed first. - TraceBlockInfo *PredTBI = &BlockInfo[TBI->Pred->getNumber()]; + unsigned PredNum = TBI->Pred->getNumber(); + TraceBlockInfo *PredTBI = &BlockInfo[PredNum]; assert(PredTBI->hasValidDepth() && "Trace above has not been computed yet"); const FixedBlockInfo *PredFBI = MTM.getResources(TBI->Pred); TBI->InstrDepth = PredTBI->InstrDepth + PredFBI->InstrCount; TBI->Head = PredTBI->Head; + + // Compute per-resource depths. + ArrayRef<unsigned> PredPRDepths = getProcResourceDepths(PredNum); + ArrayRef<unsigned> PredPRCycles = MTM.getProcResourceCycles(PredNum); + for (unsigned K = 0; K != PRKinds; ++K) + ProcResourceDepths[PROffset + K] = PredPRDepths[K] + PredPRCycles[K]; } // Update resource-related information in the TraceBlockInfo for MBB. @@ -145,22 +198,33 @@ computeDepthResources(const MachineBasicBlock *MBB) { void MachineTraceMetrics::Ensemble:: computeHeightResources(const MachineBasicBlock *MBB) { TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()]; + unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds(); + unsigned PROffset = MBB->getNumber() * PRKinds; // Compute resources for the current block. TBI->InstrHeight = MTM.getResources(MBB)->InstrCount; + ArrayRef<unsigned> PRCycles = MTM.getProcResourceCycles(MBB->getNumber()); // The trace tail is done. if (!TBI->Succ) { TBI->Tail = MBB->getNumber(); + std::copy(PRCycles.begin(), PRCycles.end(), + ProcResourceHeights.begin() + PROffset); return; } // Compute from the block below. A post-order traversal ensures the // predecessor is always computed first. - TraceBlockInfo *SuccTBI = &BlockInfo[TBI->Succ->getNumber()]; + unsigned SuccNum = TBI->Succ->getNumber(); + TraceBlockInfo *SuccTBI = &BlockInfo[SuccNum]; assert(SuccTBI->hasValidHeight() && "Trace below has not been computed yet"); TBI->InstrHeight += SuccTBI->InstrHeight; TBI->Tail = SuccTBI->Tail; + + // Compute per-resource heights. + ArrayRef<unsigned> SuccPRHeights = getProcResourceHeights(SuccNum); + for (unsigned K = 0; K != PRKinds; ++K) + ProcResourceHeights[PROffset + K] = SuccPRHeights[K] + PRCycles[K]; } // Check if depth resources for MBB are valid and return the TBI. @@ -181,6 +245,35 @@ getHeightResources(const MachineBasicBlock *MBB) const { return TBI->hasValidHeight() ? TBI : 0; } +/// Get an array of processor resource depths for MBB. Indexed by processor +/// resource kind, this array contains the scaled processor resources consumed +/// by all blocks preceding MBB in its trace. It does not include instructions +/// in MBB. +/// +/// Compare TraceBlockInfo::InstrDepth. +ArrayRef<unsigned> +MachineTraceMetrics::Ensemble:: +getProcResourceDepths(unsigned MBBNum) const { + unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds(); + assert((MBBNum+1) * PRKinds <= ProcResourceDepths.size()); + return ArrayRef<unsigned>(ProcResourceDepths.data() + MBBNum * PRKinds, + PRKinds); +} + +/// Get an array of processor resource heights for MBB. Indexed by processor +/// resource kind, this array contains the scaled processor resources consumed +/// by this block and all blocks following it in its trace. +/// +/// Compare TraceBlockInfo::InstrHeight. +ArrayRef<unsigned> +MachineTraceMetrics::Ensemble:: +getProcResourceHeights(unsigned MBBNum) const { + unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds(); + assert((MBBNum+1) * PRKinds <= ProcResourceHeights.size()); + return ArrayRef<unsigned>(ProcResourceHeights.data() + MBBNum * PRKinds, + PRKinds); +} + //===----------------------------------------------------------------------===// // Trace Selection Strategies //===----------------------------------------------------------------------===// @@ -713,11 +806,24 @@ computeInstrDepths(const MachineBasicBlock *MBB) { SmallVector<DataDep, 8> Deps; while (!Stack.empty()) { MBB = Stack.pop_back_val(); - DEBUG(dbgs() << "Depths for BB#" << MBB->getNumber() << ":\n"); + DEBUG(dbgs() << "\nDepths for BB#" << MBB->getNumber() << ":\n"); TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()]; TBI.HasValidInstrDepths = true; TBI.CriticalPath = 0; + // Print out resource depths here as well. + DEBUG({ + dbgs() << format("%7u Instructions\n", TBI.InstrDepth); + ArrayRef<unsigned> PRDepths = getProcResourceDepths(MBB->getNumber()); + for (unsigned K = 0; K != PRDepths.size(); ++K) + if (PRDepths[K]) { + unsigned Factor = MTM.SchedModel.getResourceFactor(K); + dbgs() << format("%6uc @ ", MTM.getCycles(PRDepths[K])) + << MTM.SchedModel.getProcResource(K)->Name << " (" + << PRDepths[K]/Factor << " ops x" << Factor << ")\n"; + } + }); + // Also compute the critical path length through MBB when possible. if (TBI.HasValidInstrHeights) TBI.CriticalPath = computeCrossBlockCriticalPath(TBI); @@ -928,6 +1034,18 @@ computeInstrHeights(const MachineBasicBlock *MBB) { TBI.HasValidInstrHeights = true; TBI.CriticalPath = 0; + DEBUG({ + dbgs() << format("%7u Instructions\n", TBI.InstrHeight); + ArrayRef<unsigned> PRHeights = getProcResourceHeights(MBB->getNumber()); + for (unsigned K = 0; K != PRHeights.size(); ++K) + if (PRHeights[K]) { + unsigned Factor = MTM.SchedModel.getResourceFactor(K); + dbgs() << format("%6uc @ ", MTM.getCycles(PRHeights[K])) + << MTM.SchedModel.getProcResource(K)->Name << " (" + << PRHeights[K]/Factor << " ops x" << Factor << ")\n"; + } + }); + // Get dependencies from PHIs in the trace successor. const MachineBasicBlock *Succ = TBI.Succ; // If MBB is the last block in the trace, and it has a back-edge to the @@ -1058,27 +1176,66 @@ MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const { } unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const { - // For now, we compute the resource depth from instruction count / issue - // width. Eventually, we should compute resource depth per functional unit - // and return the max. + // Find the limiting processor resource. + // Numbers have been pre-scaled to be comparable. + unsigned PRMax = 0; + ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum()); + if (Bottom) { + ArrayRef<unsigned> PRCycles = TE.MTM.getProcResourceCycles(getBlockNum()); + for (unsigned K = 0; K != PRDepths.size(); ++K) + PRMax = std::max(PRMax, PRDepths[K] + PRCycles[K]); + } else { + for (unsigned K = 0; K != PRDepths.size(); ++K) + PRMax = std::max(PRMax, PRDepths[K]); + } + // Convert to cycle count. + PRMax = TE.MTM.getCycles(PRMax); + unsigned Instrs = TBI.InstrDepth; if (Bottom) Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount; if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) Instrs /= IW; // Assume issue width 1 without a schedule model. - return Instrs; + return std::max(Instrs, PRMax); } + unsigned MachineTraceMetrics::Trace:: -getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks) const { +getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks, + ArrayRef<const MCSchedClassDesc*> ExtraInstrs) const { + // Add up resources above and below the center block. + ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum()); + ArrayRef<unsigned> PRHeights = TE.getProcResourceHeights(getBlockNum()); + unsigned PRMax = 0; + for (unsigned K = 0; K != PRDepths.size(); ++K) { + unsigned PRCycles = PRDepths[K] + PRHeights[K]; + for (unsigned I = 0; I != Extrablocks.size(); ++I) + PRCycles += TE.MTM.getProcResourceCycles(Extrablocks[I]->getNumber())[K]; + for (unsigned I = 0; I != ExtraInstrs.size(); ++I) { + const MCSchedClassDesc* SC = ExtraInstrs[I]; + if (!SC->isValid()) + continue; + for (TargetSchedModel::ProcResIter + PI = TE.MTM.SchedModel.getWriteProcResBegin(SC), + PE = TE.MTM.SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { + if (PI->ProcResourceIdx != K) + continue; + PRCycles += (PI->Cycles * TE.MTM.SchedModel.getResourceFactor(K)); + } + } + PRMax = std::max(PRMax, PRCycles); + } + // Convert to cycle count. + PRMax = TE.MTM.getCycles(PRMax); + unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight; for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i) Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount; if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) Instrs /= IW; // Assume issue width 1 without a schedule model. - return Instrs; + return std::max(Instrs, PRMax); } void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const { diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index 4b12300..037043f 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -472,6 +472,9 @@ void MachineVerifier::visitMachineFunctionBefore() { if (MInfo.Succs.size() != I->succ_size()) report("MBB has duplicate entries in its successor list.", I); } + + // Check that the register use lists are sane. + MRI->verifyUseLists(); } // Does iterator point to a and b as the first two elements? diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp index 6e1cad3..bfbc062 100644 --- a/lib/CodeGen/Passes.cpp +++ b/lib/CodeGen/Passes.cpp @@ -39,12 +39,9 @@ static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden, static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden, cl::desc("Disable pre-register allocation tail duplication")); static cl::opt<bool> DisableBlockPlacement("disable-block-placement", - cl::Hidden, cl::desc("Disable the probability-driven block placement, and " - "re-enable the old code placement pass")); + cl::Hidden, cl::desc("Disable probability-driven block placement")); static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats", cl::Hidden, cl::desc("Collect probability-driven block placement stats")); -static cl::opt<bool> DisableCodePlace("disable-code-place", cl::Hidden, - cl::desc("Disable code placement")); static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden, cl::desc("Disable Stack Slot Coloring")); static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden, @@ -96,9 +93,10 @@ static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden, /// simple binary flags that either suppress the pass or do nothing. /// i.e. -disable-mypass=false has no effect. /// These should be converted to boolOrDefault in order to use applyOverride. -static AnalysisID applyDisable(AnalysisID PassID, bool Override) { +static IdentifyingPassPtr applyDisable(IdentifyingPassPtr PassID, + bool Override) { if (Override) - return 0; + return IdentifyingPassPtr(); return PassID; } @@ -106,19 +104,20 @@ static AnalysisID applyDisable(AnalysisID PassID, bool Override) { /// flags with ternary conditions. TargetID is passed through by default. The /// pass is suppressed when the option is false. When the option is true, the /// StandardID is selected if the target provides no default. -static AnalysisID applyOverride(AnalysisID TargetID, cl::boolOrDefault Override, - AnalysisID StandardID) { +static IdentifyingPassPtr applyOverride(IdentifyingPassPtr TargetID, + cl::boolOrDefault Override, + AnalysisID StandardID) { switch (Override) { case cl::BOU_UNSET: return TargetID; case cl::BOU_TRUE: - if (TargetID) + if (TargetID.isValid()) return TargetID; if (StandardID == 0) report_fatal_error("Target cannot enable pass"); return StandardID; case cl::BOU_FALSE: - return 0; + return IdentifyingPassPtr(); } llvm_unreachable("Invalid command line option state"); } @@ -135,7 +134,8 @@ static AnalysisID applyOverride(AnalysisID TargetID, cl::boolOrDefault Override, /// StandardID may be a pseudo ID. In that case TargetID is the name of the real /// pass to run. This allows multiple options to control a single pass depending /// on where in the pipeline that pass is added. -static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) { +static IdentifyingPassPtr overridePass(AnalysisID StandardID, + IdentifyingPassPtr TargetID) { if (StandardID == &PostRASchedulerID) return applyDisable(TargetID, DisablePostRA); @@ -149,10 +149,7 @@ static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) { return applyDisable(TargetID, DisableEarlyTailDup); if (StandardID == &MachineBlockPlacementID) - return applyDisable(TargetID, DisableCodePlace); - - if (StandardID == &CodePlacementOptID) - return applyDisable(TargetID, DisableCodePlace); + return applyDisable(TargetID, DisableBlockPlacement); if (StandardID == &StackSlotColoringID) return applyDisable(TargetID, DisableSSC); @@ -206,11 +203,11 @@ public: // user interface. For example, a target may disable a standard pass by // default by substituting a pass ID of zero, and the user may still enable // that standard pass with an explicit command line option. - DenseMap<AnalysisID,AnalysisID> TargetPasses; + DenseMap<AnalysisID,IdentifyingPassPtr> TargetPasses; /// Store the pairs of <AnalysisID, AnalysisID> of which the second pass /// is inserted after each instance of the first one. - SmallVector<std::pair<AnalysisID, AnalysisID>, 4> InsertedPasses; + SmallVector<std::pair<AnalysisID, IdentifyingPassPtr>, 4> InsertedPasses; }; } // namespace llvm @@ -245,9 +242,13 @@ TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm) /// Insert InsertedPassID pass after TargetPassID. void TargetPassConfig::insertPass(AnalysisID TargetPassID, - AnalysisID InsertedPassID) { - assert(TargetPassID != InsertedPassID && "Insert a pass after itself!"); - std::pair<AnalysisID, AnalysisID> P(TargetPassID, InsertedPassID); + IdentifyingPassPtr InsertedPassID) { + assert(((!InsertedPassID.isInstance() && + TargetPassID != InsertedPassID.getID()) || + (InsertedPassID.isInstance() && + TargetPassID != InsertedPassID.getInstance()->getPassID())) && + "Insert a pass after itself!"); + std::pair<AnalysisID, IdentifyingPassPtr> P(TargetPassID, InsertedPassID); Impl->InsertedPasses.push_back(P); } @@ -271,12 +272,12 @@ void TargetPassConfig::setOpt(bool &Opt, bool Val) { } void TargetPassConfig::substitutePass(AnalysisID StandardID, - AnalysisID TargetID) { + IdentifyingPassPtr TargetID) { Impl->TargetPasses[StandardID] = TargetID; } -AnalysisID TargetPassConfig::getPassSubstitution(AnalysisID ID) const { - DenseMap<AnalysisID, AnalysisID>::const_iterator +IdentifyingPassPtr TargetPassConfig::getPassSubstitution(AnalysisID ID) const { + DenseMap<AnalysisID, IdentifyingPassPtr>::const_iterator I = Impl->TargetPasses.find(ID); if (I == Impl->TargetPasses.end()) return ID; @@ -309,24 +310,39 @@ void TargetPassConfig::addPass(Pass *P) { /// Add a CodeGen pass at this point in the pipeline after checking for target /// and command line overrides. +/// +/// addPass cannot return a pointer to the pass instance because is internal the +/// PassManager and the instance we create here may already be freed. AnalysisID TargetPassConfig::addPass(AnalysisID PassID) { - AnalysisID TargetID = getPassSubstitution(PassID); - AnalysisID FinalID = overridePass(PassID, TargetID); - if (FinalID == 0) - return FinalID; - - Pass *P = Pass::createPass(FinalID); - if (!P) - llvm_unreachable("Pass ID not registered"); - addPass(P); + IdentifyingPassPtr TargetID = getPassSubstitution(PassID); + IdentifyingPassPtr FinalPtr = overridePass(PassID, TargetID); + if (!FinalPtr.isValid()) + return 0; + + Pass *P; + if (FinalPtr.isInstance()) + P = FinalPtr.getInstance(); + else { + P = Pass::createPass(FinalPtr.getID()); + if (!P) + llvm_unreachable("Pass ID not registered"); + } + AnalysisID FinalID = P->getPassID(); + addPass(P); // Ends the lifetime of P. + // Add the passes after the pass P if there is any. - for (SmallVector<std::pair<AnalysisID, AnalysisID>, 4>::iterator + for (SmallVector<std::pair<AnalysisID, IdentifyingPassPtr>, 4>::iterator I = Impl->InsertedPasses.begin(), E = Impl->InsertedPasses.end(); I != E; ++I) { if ((*I).first == PassID) { - assert((*I).second && "Illegal Pass ID!"); - Pass *NP = Pass::createPass((*I).second); - assert(NP && "Pass ID not registered"); + assert((*I).second.isValid() && "Illegal Pass ID!"); + Pass *NP; + if ((*I).second.isInstance()) + NP = (*I).second.getInstance(); + else { + NP = Pass::createPass((*I).second.getID()); + assert(NP && "Pass ID not registered"); + } addPass(NP); } } @@ -693,14 +709,6 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { addPass(&VirtRegRewriterID); printAndVerify("After Virtual Register Rewriter"); - // FinalizeRegAlloc is convenient until MachineInstrBundles is more mature, - // but eventually, all users of it should probably be moved to addPostRA and - // it can go away. Currently, it's the intended place for targets to run - // FinalizeMachineBundles, because passes other than MachineScheduling an - // RegAlloc itself may not be aware of bundles. - if (addFinalizeRegAlloc()) - printAndVerify("After RegAlloc finalization"); - // Perform stack slot coloring and post-ra machine LICM. // // FIXME: Re-enable coloring with register when it's capable of adding @@ -742,16 +750,7 @@ bool TargetPassConfig::addGCPasses() { /// Add standard basic block placement passes. void TargetPassConfig::addBlockPlacement() { - AnalysisID PassID = 0; - if (!DisableBlockPlacement) { - // MachineBlockPlacement is a new pass which subsumes the functionality of - // CodPlacementOpt. The old code placement pass can be restored by - // disabling block placement, but eventually it will be removed. - PassID = addPass(&MachineBlockPlacementID); - } else { - PassID = addPass(&CodePlacementOptID); - } - if (PassID) { + if (addPass(&MachineBlockPlacementID)) { // Run a separate pass to collect block placement statistics. if (EnableBlockPlacementStats) addPass(&MachineBlockPlacementStatsID); diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 855a8c5..959dd7d 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -55,7 +55,6 @@ INITIALIZE_PASS_END(PEI, "prologepilog", "Prologue/Epilogue Insertion & Frame Finalization", false, false) -STATISTIC(NumVirtualFrameRegs, "Number of virtual frame regs encountered"); STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged"); STATISTIC(NumBytesStackSpace, "Number of bytes used for stack in all functions"); @@ -548,9 +547,11 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo(); if (RS && TFI.hasFP(Fn) && RegInfo->useFPForScavengingIndex(Fn) && !RegInfo->needsStackRealignment(Fn)) { - int SFI = RS->getScavengingFrameIndex(); - if (SFI >= 0) - AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); + SmallVector<int, 2> SFIs; + RS->getScavengingFrameIndices(SFIs); + for (SmallVector<int, 2>::iterator I = SFIs.begin(), + IE = SFIs.end(); I != IE; ++I) + AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign); } // FIXME: Once this is working, then enable flag will change to a target @@ -593,7 +594,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; - if (RS && (int)i == RS->getScavengingFrameIndex()) + if (RS && RS->isScavengingFrameIndex((int)i)) continue; if (MFI->isDeadObjectIndex(i)) continue; @@ -615,7 +616,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; - if (RS && (int)i == RS->getScavengingFrameIndex()) + if (RS && RS->isScavengingFrameIndex((int)i)) continue; if (MFI->isDeadObjectIndex(i)) continue; @@ -631,9 +632,11 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { // stack pointer. if (RS && (!TFI.hasFP(Fn) || RegInfo->needsStackRealignment(Fn) || !RegInfo->useFPForScavengingIndex(Fn))) { - int SFI = RS->getScavengingFrameIndex(); - if (SFI >= 0) - AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); + SmallVector<int, 2> SFIs; + RS->getScavengingFrameIndices(SFIs); + for (SmallVector<int, 2>::iterator I = SFIs.begin(), + IE = SFIs.end(); I != IE; ++I) + AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign); } if (!TFI.targetHandlesStackFrameRounding()) { @@ -816,14 +819,28 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) { E = Fn.end(); BB != E; ++BB) { RS->enterBasicBlock(BB); - unsigned VirtReg = 0; - unsigned ScratchReg = 0; int SPAdj = 0; // The instruction stream may change in the loop, so check BB->end() // directly. for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { + // We might end up here again with a NULL iterator if we scavenged a + // register for which we inserted spill code for definition by what was + // originally the first instruction in BB. + if (I == MachineBasicBlock::iterator(NULL)) + I = BB->begin(); + MachineInstr *MI = I; + MachineBasicBlock::iterator J = llvm::next(I); + MachineBasicBlock::iterator P = I == BB->begin() ? + MachineBasicBlock::iterator(NULL) : llvm::prior(I); + + // RS should process this instruction before we might scavenge at this + // location. This is because we might be replacing a virtual register + // defined by this instruction, and if so, registers killed by this + // instruction are available, and defined registers are not. + RS->forward(I); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { if (MI->getOperand(i).isReg()) { MachineOperand &MO = MI->getOperand(i); @@ -833,29 +850,47 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) { if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - ++NumVirtualFrameRegs; - - // Have we already allocated a scratch register for this virtual? - if (Reg != VirtReg) { - // When we first encounter a new virtual register, it - // must be a definition. - assert(MI->getOperand(i).isDef() && - "frame index virtual missing def!"); - // Scavenge a new scratch register - VirtReg = Reg; - const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg); - ScratchReg = RS->scavengeRegister(RC, I, SPAdj); - ++NumScavengedRegs; - } + // When we first encounter a new virtual register, it + // must be a definition. + assert(MI->getOperand(i).isDef() && + "frame index virtual missing def!"); + // Scavenge a new scratch register + const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg); + unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj); + + ++NumScavengedRegs; + // Replace this reference to the virtual register with the // scratch register. assert (ScratchReg && "Missing scratch register!"); - MI->getOperand(i).setReg(ScratchReg); + Fn.getRegInfo().replaceRegWith(Reg, ScratchReg); + // Because this instruction was processed by the RS before this + // register was allocated, make sure that the RS now records the + // register as being used. + RS->setUsed(ScratchReg); } } - RS->forward(I); - ++I; + + // If the scavenger needed to use one of its spill slots, the + // spill code will have been inserted in between I and J. This is a + // problem because we need the spill code before I: Move I to just + // prior to J. + if (I != llvm::prior(J)) { + BB->splice(J, BB, I); + + // Before we move I, we need to prepare the RS to visit I again. + // Specifically, RS will assert if it sees uses of registers that + // it believes are undefined. Because we have already processed + // register kills in I, when it visits I again, it will believe that + // those registers are undefined. To avoid this situation, unprocess + // the instruction I. + assert(RS->getCurrentPosition() == I && + "The register scavenger has an unexpected position"); + I = P; + RS->unprocess(P); + } else + ++I; } } } diff --git a/lib/CodeGen/RegAllocBasic.cpp b/lib/CodeGen/RegAllocBasic.cpp index 0b6dc68..7fcfe9e 100644 --- a/lib/CodeGen/RegAllocBasic.cpp +++ b/lib/CodeGen/RegAllocBasic.cpp @@ -63,7 +63,7 @@ class RABasic : public MachineFunctionPass, public RegAllocBase MachineFunction *MF; // state - std::auto_ptr<Spiller> SpillerInstance; + OwningPtr<Spiller> SpillerInstance; std::priority_queue<LiveInterval*, std::vector<LiveInterval*>, CompSpillWeight> Queue; diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index 6d84176..9eed1fc 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -78,7 +78,7 @@ class RAGreedy : public MachineFunctionPass, LiveDebugVariables *DebugVars; // state - std::auto_ptr<Spiller> SpillerInstance; + OwningPtr<Spiller> SpillerInstance; std::priority_queue<std::pair<unsigned, unsigned> > Queue; unsigned NextCascade; @@ -166,8 +166,8 @@ class RAGreedy : public MachineFunctionPass, }; // splitting state. - std::auto_ptr<SplitAnalysis> SA; - std::auto_ptr<SplitEditor> SE; + OwningPtr<SplitAnalysis> SA; + OwningPtr<SplitEditor> SE; /// Cached per-block interference maps InterferenceCache IntfCache; diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp index 607edac..15a88e2 100644 --- a/lib/CodeGen/RegAllocPBQP.cpp +++ b/lib/CodeGen/RegAllocPBQP.cpp @@ -34,6 +34,7 @@ #include "llvm/CodeGen/RegAllocPBQP.h" #include "RegisterCoalescer.h" #include "Spiller.h" +#include "llvm/ADT/OwningPtr.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" @@ -89,8 +90,8 @@ public: static char ID; /// Construct a PBQP register allocator. - RegAllocPBQP(std::auto_ptr<PBQPBuilder> b, char *cPassID=0) - : MachineFunctionPass(ID), builder(b), customPassID(cPassID) { + RegAllocPBQP(OwningPtr<PBQPBuilder> &b, char *cPassID=0) + : MachineFunctionPass(ID), builder(b.take()), customPassID(cPassID) { initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); @@ -121,7 +122,7 @@ private: typedef std::set<unsigned> RegSet; - std::auto_ptr<PBQPBuilder> builder; + OwningPtr<PBQPBuilder> builder; char *customPassID; @@ -132,7 +133,7 @@ private: const MachineLoopInfo *loopInfo; MachineRegisterInfo *mri; - std::auto_ptr<Spiller> spiller; + OwningPtr<Spiller> spiller; LiveIntervals *lis; LiveStacks *lss; VirtRegMap *vrm; @@ -186,16 +187,15 @@ unsigned PBQPRAProblem::getPRegForOption(unsigned vreg, unsigned option) const { return allowedSet[option - 1]; } -std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf, - const LiveIntervals *lis, - const MachineLoopInfo *loopInfo, - const RegSet &vregs) { +PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis, + const MachineLoopInfo *loopInfo, + const RegSet &vregs) { LiveIntervals *LIS = const_cast<LiveIntervals*>(lis); MachineRegisterInfo *mri = &mf->getRegInfo(); const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo(); - std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem()); + OwningPtr<PBQPRAProblem> p(new PBQPRAProblem()); PBQP::Graph &g = p->getGraph(); RegSet pregs; @@ -282,7 +282,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf, } } - return p; + return p.take(); } void PBQPBuilder::addSpillCosts(PBQP::Vector &costVec, @@ -311,13 +311,12 @@ void PBQPBuilder::addInterferenceCosts( } } -std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build( - MachineFunction *mf, +PBQPRAProblem *PBQPBuilderWithCoalescing::build(MachineFunction *mf, const LiveIntervals *lis, const MachineLoopInfo *loopInfo, const RegSet &vregs) { - std::auto_ptr<PBQPRAProblem> p = PBQPBuilder::build(mf, lis, loopInfo, vregs); + OwningPtr<PBQPRAProblem> p(PBQPBuilder::build(mf, lis, loopInfo, vregs)); PBQP::Graph &g = p->getGraph(); const TargetMachine &tm = mf->getTarget(); @@ -391,7 +390,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build( } } - return p; + return p.take(); } void PBQPBuilderWithCoalescing::addPhysRegCoalesce(PBQP::Vector &costVec, @@ -584,8 +583,8 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) { while (!pbqpAllocComplete) { DEBUG(dbgs() << " PBQP Regalloc round " << round << ":\n"); - std::auto_ptr<PBQPRAProblem> problem = - builder->build(mf, lis, loopInfo, vregsToAlloc); + OwningPtr<PBQPRAProblem> problem( + builder->build(mf, lis, loopInfo, vregsToAlloc)); #ifndef NDEBUG if (pbqpDumpGraphs) { @@ -621,18 +620,18 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) { } FunctionPass* llvm::createPBQPRegisterAllocator( - std::auto_ptr<PBQPBuilder> builder, + OwningPtr<PBQPBuilder> &builder, char *customPassID) { return new RegAllocPBQP(builder, customPassID); } FunctionPass* llvm::createDefaultPBQPRegisterAllocator() { - if (pbqpCoalescing) { - return createPBQPRegisterAllocator( - std::auto_ptr<PBQPBuilder>(new PBQPBuilderWithCoalescing())); - } // else - return createPBQPRegisterAllocator( - std::auto_ptr<PBQPBuilder>(new PBQPBuilder())); + OwningPtr<PBQPBuilder> Builder; + if (pbqpCoalescing) + Builder.reset(new PBQPBuilderWithCoalescing()); + else + Builder.reset(new PBQPBuilder()); + return createPBQPRegisterAllocator(Builder); } #undef DEBUG_TYPE diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index 6da901f..f82ccbe 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -45,9 +45,11 @@ bool RegScavenger::isAliasUsed(unsigned Reg) const { } void RegScavenger::initRegState() { - ScavengedReg = 0; - ScavengedRC = NULL; - ScavengeRestore = NULL; + for (SmallVector<ScavengedInfo, 2>::iterator I = Scavenged.begin(), + IE = Scavenged.end(); I != IE; ++I) { + I->Reg = 0; + I->Restore = NULL; + } // All registers started out unused. RegsAvailable.set(); @@ -108,27 +110,11 @@ void RegScavenger::addRegWithSubRegs(BitVector &BV, unsigned Reg) { BV.set(*SubRegs); } -void RegScavenger::forward() { - // Move ptr forward. - if (!Tracking) { - MBBI = MBB->begin(); - Tracking = true; - } else { - assert(MBBI != MBB->end() && "Already past the end of the basic block!"); - MBBI = llvm::next(MBBI); - } - assert(MBBI != MBB->end() && "Already at the end of the basic block!"); +void RegScavenger::determineKillsAndDefs() { + assert(Tracking && "Must be tracking to determine kills and defs"); MachineInstr *MI = MBBI; - - if (MI == ScavengeRestore) { - ScavengedReg = 0; - ScavengedRC = NULL; - ScavengeRestore = NULL; - } - - if (MI->isDebugValue()) - return; + assert(!MI->isDebugValue() && "Debug values have no kills or defs"); // Find out which registers are early clobbered, killed, defined, and marked // def-dead in this instruction. @@ -145,7 +131,7 @@ void RegScavenger::forward() { if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); - if (!Reg || isReserved(Reg)) + if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || isReserved(Reg)) continue; if (MO.isUse()) { @@ -162,6 +148,53 @@ void RegScavenger::forward() { addRegWithSubRegs(DefRegs, Reg); } } +} + +void RegScavenger::unprocess() { + assert(Tracking && "Cannot unprocess because we're not tracking"); + + MachineInstr *MI = MBBI; + if (!MI->isDebugValue()) { + determineKillsAndDefs(); + + // Commit the changes. + setUsed(KillRegs); + setUnused(DefRegs); + } + + if (MBBI == MBB->begin()) { + MBBI = MachineBasicBlock::iterator(NULL); + Tracking = false; + } else + --MBBI; +} + +void RegScavenger::forward() { + // Move ptr forward. + if (!Tracking) { + MBBI = MBB->begin(); + Tracking = true; + } else { + assert(MBBI != MBB->end() && "Already past the end of the basic block!"); + MBBI = llvm::next(MBBI); + } + assert(MBBI != MBB->end() && "Already at the end of the basic block!"); + + MachineInstr *MI = MBBI; + + for (SmallVector<ScavengedInfo, 2>::iterator I = Scavenged.begin(), + IE = Scavenged.end(); I != IE; ++I) { + if (I->Restore != MI) + continue; + + I->Reg = 0; + I->Restore = NULL; + } + + if (MI->isDebugValue()) + return; + + determineKillsAndDefs(); // Verify uses and defs. #ifndef NDEBUG @@ -170,7 +203,7 @@ void RegScavenger::forward() { if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); - if (!Reg || isReserved(Reg)) + if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || isReserved(Reg)) continue; if (MO.isUse()) { if (MO.isUndef()) @@ -360,37 +393,47 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, return SReg; } - assert(ScavengedReg == 0 && - "Scavenger slot is live, unable to scavenge another register!"); + // Find an available scavenging slot. + unsigned SI; + for (SI = 0; SI < Scavenged.size(); ++SI) + if (Scavenged[SI].Reg == 0) + break; + + if (SI == Scavenged.size()) { + // We need to scavenge a register but have no spill slot, the target + // must know how to do it (if not, we'll assert below). + Scavenged.push_back(ScavengedInfo()); + } // Avoid infinite regress - ScavengedReg = SReg; + Scavenged[SI].Reg = SReg; // If the target knows how to save/restore the register, let it do so; // otherwise, use the emergency stack spill slot. if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { // Spill the scavenged register before I. - assert(ScavengingFrameIndex >= 0 && + assert(Scavenged[SI].FrameIndex >= 0 && "Cannot scavenge register without an emergency spill slot!"); - TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC,TRI); + TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex, + RC, TRI); MachineBasicBlock::iterator II = prior(I); unsigned FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); // Restore the scavenged register before its use (or first terminator). - TII->loadRegFromStackSlot(*MBB, UseMI, SReg, ScavengingFrameIndex, RC, TRI); + TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, + RC, TRI); II = prior(UseMI); FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } - ScavengeRestore = prior(UseMI); + Scavenged[SI].Restore = prior(UseMI); // Doing this here leads to infinite regress. - // ScavengedReg = SReg; - ScavengedRC = RC; + // Scavenged[SI].Reg = SReg; DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 71e7a21..e4da6a4 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -262,6 +262,9 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { if (UseOp < 0) Dep = SDep(SU, SDep::Artificial); else { + // Set the hasPhysRegDefs only for physreg defs that have a use within + // the scheduling region. + SU->hasPhysRegDefs = true; Dep = SDep(SU, SDep::Data, *Alias); RegUse = UseSU->getInstr(); Dep.setMinLatency( @@ -318,6 +321,7 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { } if (!MO.isDef()) { + SU->hasPhysRegUses = true; // Either insert a new Reg2SUnits entry with an empty SUnits list, or // retrieve the existing SUnits list for this register's uses. // Push this SUnit on the use list. diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 61603e1..85f5df9 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -205,6 +205,7 @@ namespace { SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); SDValue visitCTPOP(SDNode *N); SDValue visitSELECT(SDNode *N); + SDValue visitVSELECT(SDNode *N); SDValue visitSELECT_CC(SDNode *N); SDValue visitSETCC(SDNode *N); SDValue visitSIGN_EXTEND(SDNode *N); @@ -243,7 +244,6 @@ namespace { SDValue visitCONCAT_VECTORS(SDNode *N); SDValue visitEXTRACT_SUBVECTOR(SDNode *N); SDValue visitVECTOR_SHUFFLE(SDNode *N); - SDValue visitMEMBARRIER(SDNode *N); SDValue XformToShuffleWithZero(SDNode *N); SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS); @@ -1127,6 +1127,7 @@ SDValue DAGCombiner::visit(SDNode *N) { case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); case ISD::CTPOP: return visitCTPOP(N); case ISD::SELECT: return visitSELECT(N); + case ISD::VSELECT: return visitVSELECT(N); case ISD::SELECT_CC: return visitSELECT_CC(N); case ISD::SETCC: return visitSETCC(N); case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); @@ -1165,7 +1166,6 @@ SDValue DAGCombiner::visit(SDNode *N) { case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); - case ISD::MEMBARRIER: return visitMEMBARRIER(N); } return SDValue(); } @@ -4164,6 +4164,46 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) { return SDValue(); } +SDValue DAGCombiner::visitVSELECT(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + DebugLoc DL = N->getDebugLoc(); + + // Canonicalize integer abs. + // vselect (setg[te] X, 0), X, -X -> + // vselect (setgt X, -1), X, -X -> + // vselect (setl[te] X, 0), -X, X -> + // Y = sra (X, size(X)-1); xor (add (X, Y), Y) + if (N0.getOpcode() == ISD::SETCC) { + SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); + ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); + bool isAbs = false; + bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); + + if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || + (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && + N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) + isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); + else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && + N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) + isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); + + if (isAbs) { + EVT VT = LHS.getValueType(); + SDValue Shift = DAG.getNode( + ISD::SRA, DL, VT, LHS, + DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, VT)); + SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); + AddToWorkList(Shift.getNode()); + AddToWorkList(Add.getNode()); + return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); + } + } + + return SDValue(); +} + SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -4453,7 +4493,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { if (N0.getOpcode() == ISD::SETCC) { // sext(setcc) -> sext_in_reg(vsetcc) for vectors. // Only do this before legalize for now. - if (VT.isVector() && !LegalOperations) { + if (VT.isVector() && !LegalOperations && + TLI.getBooleanContents(true) == + TargetLowering::ZeroOrNegativeOneBooleanContent) { EVT N0VT = N0.getOperand(0).getValueType(); // On some architectures (such as SSE/NEON/etc) the SETCC result type is // of the same size as the compared operands. Only optimize sext(setcc()) @@ -5835,14 +5877,25 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(1), N1)); + // No FP constant should be created after legalization as Instruction + // Selection pass has hard time in dealing with FP constant. + // + // We don't need test this condition for transformation like following, as + // the DAG being transformed implies it is legal to take FP constant as + // operand. + // + // (fadd (fmul c, x), x) -> (fmul c+1, x) + // + bool AllowNewFpConst = (Level < AfterLegalizeDAG); + // If allow, fold (fadd (fneg x), x) -> 0.0 - if (DAG.getTarget().Options.UnsafeFPMath && + if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) { return DAG.getConstantFP(0.0, VT); } // If allow, fold (fadd x, (fneg x)) -> 0.0 - if (DAG.getTarget().Options.UnsafeFPMath && + if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) { return DAG.getConstantFP(0.0, VT); } @@ -5944,7 +5997,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { } } - if (N0.getOpcode() == ISD::FADD) { + if (N0.getOpcode() == ISD::FADD && AllowNewFpConst) { ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); // (fadd (fadd x, x), x) -> (fmul 3.0, x) if (!CFP && N0.getOperand(0) == N0.getOperand(1) && @@ -5954,7 +6007,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { } } - if (N1.getOpcode() == ISD::FADD) { + if (N1.getOpcode() == ISD::FADD && AllowNewFpConst) { ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); // (fadd x, (fadd x, x)) -> (fmul 3.0, x) if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && @@ -5965,7 +6018,8 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { } // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x) - if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && + if (AllowNewFpConst && + N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && N0.getOperand(0) == N0.getOperand(1) && N1.getOperand(0) == N1.getOperand(1) && N0.getOperand(0) == N1.getOperand(0)) { @@ -6811,9 +6865,9 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) { MVT::Other, Chain, Tmp, N2); } - // visitXOR has changed XOR's operands. - Op0 = TheXor->getOperand(0); - Op1 = TheXor->getOperand(1); + // visitXOR has changed XOR's operands or replaced the XOR completely, + // bail out. + return SDValue(N, 0); } } @@ -7098,25 +7152,40 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == BasePtr.getNode() && "Expected BasePtr operand"); - APInt OV = - cast<ConstantSDNode>(Offset)->getAPIntValue(); - if (AM == ISD::PRE_DEC) - OV = -OV; + // We need to replace ptr0 in the following expression: + // x0 * offset0 + y0 * ptr0 = t0 + // knowing that + // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) + // + // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the + // indexed load/store and the expresion that needs to be re-written. + // + // Therefore, we have: + // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 ConstantSDNode *CN = cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); - APInt CNV = CN->getAPIntValue(); - if (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) - CNV += OV; - else - CNV -= OV; + int X0, X1, Y0, Y1; + APInt Offset0 = CN->getAPIntValue(); + APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); - SDValue NewOp1 = Result.getValue(isLoad ? 1 : 0); - SDValue NewOp2 = DAG.getConstant(CNV, CN->getValueType(0)); - if (OffsetIdx == 0) - std::swap(NewOp1, NewOp2); + X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; + Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; + X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; + Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; - SDValue NewUse = DAG.getNode(OtherUses[i]->getOpcode(), + unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; + + APInt CNV = Offset0; + if (X0 < 0) CNV = -CNV; + if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; + else CNV = CNV - Offset1; + + // We can now generate the new expression. + SDValue NewOp1 = DAG.getConstant(CNV, CN->getValueType(0)); + SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); + + SDValue NewUse = DAG.getNode(Opcode, OtherUses[i]->getDebugLoc(), OtherUses[i]->getValueType(0), NewOp1, NewOp2); DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); @@ -7699,16 +7768,82 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { return SDValue(); } -/// Returns the base pointer and an integer offset from that object. -static std::pair<SDValue, int64_t> GetPointerBaseAndOffset(SDValue Ptr) { - if (Ptr->getOpcode() == ISD::ADD && isa<ConstantSDNode>(Ptr->getOperand(1))) { - int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); - SDValue Base = Ptr->getOperand(0); - return std::make_pair(Base, Offset); +/// Helper struct to parse and store a memory address as base + index + offset. +/// We ignore sign extensions when it is safe to do so. +/// The following two expressions are not equivalent. To differentiate we need +/// to store whether there was a sign extension involved in the index +/// computation. +/// (load (i64 add (i64 copyfromreg %c) +/// (i64 signextend (add (i8 load %index) +/// (i8 1)))) +/// vs +/// +/// (load (i64 add (i64 copyfromreg %c) +/// (i64 signextend (i32 add (i32 signextend (i8 load %index)) +/// (i32 1))))) +struct BaseIndexOffset { + SDValue Base; + SDValue Index; + int64_t Offset; + bool IsIndexSignExt; + + BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} + + BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, + bool IsIndexSignExt) : + Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} + + bool equalBaseIndex(const BaseIndexOffset &Other) { + return Other.Base == Base && Other.Index == Index && + Other.IsIndexSignExt == IsIndexSignExt; } - return std::make_pair(Ptr, 0); -} + /// Parses tree in Ptr for base, index, offset addresses. + static BaseIndexOffset match(SDValue Ptr) { + bool IsIndexSignExt = false; + + // Just Base or possibly anything else. + if (Ptr->getOpcode() != ISD::ADD) + return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); + + // Base + offset. + if (isa<ConstantSDNode>(Ptr->getOperand(1))) { + int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); + return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset, + IsIndexSignExt); + } + + // Look at Base + Index + Offset cases. + SDValue Base = Ptr->getOperand(0); + SDValue IndexOffset = Ptr->getOperand(1); + + // Skip signextends. + if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { + IndexOffset = IndexOffset->getOperand(0); + IsIndexSignExt = true; + } + + // Either the case of Base + Index (no offset) or something else. + if (IndexOffset->getOpcode() != ISD::ADD) + return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt); + + // Now we have the case of Base + Index + offset. + SDValue Index = IndexOffset->getOperand(0); + SDValue Offset = IndexOffset->getOperand(1); + + if (!isa<ConstantSDNode>(Offset)) + return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); + + // Ignore signextends. + if (Index->getOpcode() == ISD::SIGN_EXTEND) { + Index = Index->getOperand(0); + IsIndexSignExt = true; + } else IsIndexSignExt = false; + + int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); + return BaseIndexOffset(Base, Index, Off, IsIndexSignExt); + } +}; /// Holds a pointer to an LSBaseSDNode as well as information on where it /// is located in a sequence of memory operations connected by a chain. @@ -7755,16 +7890,16 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) return false; - // This holds the base pointer and the offset in bytes from the base pointer. - std::pair<SDValue, int64_t> BasePtr = - GetPointerBaseAndOffset(St->getBasePtr()); + // This holds the base pointer, index, and the offset in bytes from the base + // pointer. + BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr()); // We must have a base and an offset. - if (!BasePtr.first.getNode()) + if (!BasePtr.Base.getNode()) return false; // Do not handle stores to undef base pointers. - if (BasePtr.first.getOpcode() == ISD::UNDEF) + if (BasePtr.Base.getOpcode() == ISD::UNDEF) return false; // Save the LoadSDNodes that we find in the chain. @@ -7786,11 +7921,10 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { break; // Find the base pointer and offset for this memory node. - std::pair<SDValue, int64_t> Ptr = - GetPointerBaseAndOffset(Index->getBasePtr()); + BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr()); // Check that the base pointer is the same as the original one. - if (Ptr.first.getNode() != BasePtr.first.getNode()) + if (!Ptr.equalBaseIndex(BasePtr)) break; // Check that the alignment is the same. @@ -7816,7 +7950,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { break; // We found a potential memory operand to merge. - StoreNodes.push_back(MemOpLink(Index, Ptr.second, Seq++)); + StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); // Find the next memory operand in the chain. If the next operand in the // chain is a store then move up and continue the scan with the next @@ -7903,6 +8037,14 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); if (TLI.isTypeLegal(StoreTy)) LastLegalType = i+1; + // Or check whether a truncstore is legal. + else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == + TargetLowering::TypePromoteInteger) { + EVT LegalizedStoredValueTy = + TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType()); + if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy)) + LastLegalType = i+1; + } // Find a legal type for the vector store. EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); @@ -8013,7 +8155,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { // Find acceptable loads. Loads need to have the same chain (token factor), // must not be zext, volatile, indexed, and they must be consecutive. - SDValue LdBasePtr; + BaseIndexOffset LdBasePtr; for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); @@ -8039,21 +8181,19 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { if (Ld->getMemoryVT() != MemVT) break; - std::pair<SDValue, int64_t> LdPtr = - GetPointerBaseAndOffset(Ld->getBasePtr()); - + BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr()); // If this is not the first ptr that we check. - if (LdBasePtr.getNode()) { + if (LdBasePtr.Base.getNode()) { // The base ptr must be the same. - if (LdPtr.first != LdBasePtr) + if (!LdPtr.equalBaseIndex(LdBasePtr)) break; } else { // Check that all other base pointers are the same as this one. - LdBasePtr = LdPtr.first; + LdBasePtr = LdPtr; } // We found a potential memory operand to merge. - LoadNodes.push_back(MemOpLink(Ld, LdPtr.second, 0)); + LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); } if (LoadNodes.size() < 2) @@ -8088,6 +8228,17 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); if (TLI.isTypeLegal(StoreTy)) LastLegalIntegerType = i + 1; + // Or check whether a truncstore and extload is legal. + else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == + TargetLowering::TypePromoteInteger) { + EVT LegalizedStoredValueTy = + TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy); + if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && + TLI.isLoadExtLegal(ISD::ZEXTLOAD, StoreTy) && + TLI.isLoadExtLegal(ISD::SEXTLOAD, StoreTy) && + TLI.isLoadExtLegal(ISD::EXTLOAD, StoreTy)) + LastLegalIntegerType = i+1; + } } // Only use vector types if the vector type is larger than the integer type. @@ -8971,6 +9122,45 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { if (ISD::allOperandsUndef(N)) return DAG.getUNDEF(N->getValueType(0)); + // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR + // nodes often generate nop CONCAT_VECTOR nodes. + // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that + // place the incoming vectors at the exact same location. + SDValue SingleSource = SDValue(); + unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); + + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + SDValue Op = N->getOperand(i); + + if (Op.getOpcode() == ISD::UNDEF) + continue; + + // Check if this is the identity extract: + if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) + return SDValue(); + + // Find the single incoming vector for the extract_subvector. + if (SingleSource.getNode()) { + if (Op.getOperand(0) != SingleSource) + return SDValue(); + } else { + SingleSource = Op.getOperand(0); + } + + unsigned IdentityIndex = i * PartNumElem; + ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); + // The extract index must be constant. + if (!CS) + return SDValue(); + + // Check that we are reading from the identity index. + if (CS->getZExtValue() != IdentityIndex) + return SDValue(); + } + + if (SingleSource.getNode()) + return SingleSource; + return SDValue(); } @@ -8978,12 +9168,32 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { EVT NVT = N->getValueType(0); SDValue V = N->getOperand(0); + if (V->getOpcode() == ISD::CONCAT_VECTORS) { + // Combine: + // (extract_subvec (concat V1, V2, ...), i) + // Into: + // Vi if possible + // Only operand 0 is checked as 'concat' assumes all inputs of the same type. + if (V->getOperand(0).getValueType() != NVT) + return SDValue(); + unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); + unsigned NumElems = NVT.getVectorNumElements(); + assert((Idx % NumElems) == 0 && + "IDX in concat is not a multiple of the result vector length."); + return V->getOperand(Idx / NumElems); + } + + // Skip bitcasting + if (V->getOpcode() == ISD::BITCAST) + V = V.getOperand(0); + if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { + DebugLoc dl = N->getDebugLoc(); // Handle only simple case where vector being inserted and vector // being extracted are of same type, and are half size of larger vectors. EVT BigVT = V->getOperand(0).getValueType(); EVT SmallVT = V->getOperand(1).getValueType(); - if (NVT != SmallVT || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) + if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) return SDValue(); // Only handle cases where both indexes are constants with the same type. @@ -8996,31 +9206,57 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { // Combine: // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) // Into: - // indices are equal => V1 + // indices are equal or bit offsets are equal => V1 // otherwise => (extract_subvec V1, ExtIdx) - if (InsIdx->getZExtValue() == ExtIdx->getZExtValue()) - return V->getOperand(1); - return DAG.getNode(ISD::EXTRACT_SUBVECTOR, N->getDebugLoc(), NVT, - V->getOperand(0), N->getOperand(1)); + if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() == + ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits()) + return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1)); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, + DAG.getNode(ISD::BITCAST, dl, + N->getOperand(0).getValueType(), + V->getOperand(0)), N->getOperand(1)); } } - if (V->getOpcode() == ISD::CONCAT_VECTORS) { - // Combine: - // (extract_subvec (concat V1, V2, ...), i) - // Into: - // Vi if possible - // Only operand 0 is checked as 'concat' assumes all inputs of the same type. - if (V->getOperand(0).getValueType() != NVT) + return SDValue(); +} + +// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat. +static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + unsigned NumElts = VT.getVectorNumElements(); + + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); + + SmallVector<SDValue, 4> Ops; + EVT ConcatVT = N0.getOperand(0).getValueType(); + unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); + unsigned NumConcats = NumElts / NumElemsPerConcat; + + // Look at every vector that's inserted. We're looking for exact + // subvector-sized copies from a concatenated vector + for (unsigned I = 0; I != NumConcats; ++I) { + // Make sure we're dealing with a copy. + unsigned Begin = I * NumElemsPerConcat; + if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) return SDValue(); - unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); - unsigned NumElems = NVT.getVectorNumElements(); - assert((Idx % NumElems) == 0 && - "IDX in concat is not a multiple of the result vector length."); - return V->getOperand(Idx / NumElems); + + for (unsigned J = 1; J != NumElemsPerConcat; ++J) { + if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) + return SDValue(); + } + + unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; + if (FirstElt < N0.getNumOperands()) + Ops.push_back(N0.getOperand(FirstElt)); + else + Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); } - return SDValue(); + return DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, Ops.data(), + Ops.size()); } SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { @@ -9124,6 +9360,17 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } } + if (N0.getOpcode() == ISD::CONCAT_VECTORS && + Level < AfterLegalizeVectorOps && + (N1.getOpcode() == ISD::UNDEF || + (N1.getOpcode() == ISD::CONCAT_VECTORS && + N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { + SDValue V = partitionShuffleOfConcats(N, DAG); + + if (V.getNode()) + return V; + } + // If this shuffle node is simply a swizzle of another shuffle node, // and it reverses the swizzle of the previous shuffle then we can // optimize shuffle(shuffle(x, undef), undef) -> x. @@ -9160,59 +9407,6 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { return SDValue(); } -SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) { - if (!TLI.getShouldFoldAtomicFences()) - return SDValue(); - - SDValue atomic = N->getOperand(0); - switch (atomic.getOpcode()) { - case ISD::ATOMIC_CMP_SWAP: - case ISD::ATOMIC_SWAP: - case ISD::ATOMIC_LOAD_ADD: - case ISD::ATOMIC_LOAD_SUB: - case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: - case ISD::ATOMIC_LOAD_XOR: - case ISD::ATOMIC_LOAD_NAND: - case ISD::ATOMIC_LOAD_MIN: - case ISD::ATOMIC_LOAD_MAX: - case ISD::ATOMIC_LOAD_UMIN: - case ISD::ATOMIC_LOAD_UMAX: - break; - default: - return SDValue(); - } - - SDValue fence = atomic.getOperand(0); - if (fence.getOpcode() != ISD::MEMBARRIER) - return SDValue(); - - switch (atomic.getOpcode()) { - case ISD::ATOMIC_CMP_SWAP: - return SDValue(DAG.UpdateNodeOperands(atomic.getNode(), - fence.getOperand(0), - atomic.getOperand(1), atomic.getOperand(2), - atomic.getOperand(3)), atomic.getResNo()); - case ISD::ATOMIC_SWAP: - case ISD::ATOMIC_LOAD_ADD: - case ISD::ATOMIC_LOAD_SUB: - case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: - case ISD::ATOMIC_LOAD_XOR: - case ISD::ATOMIC_LOAD_NAND: - case ISD::ATOMIC_LOAD_MIN: - case ISD::ATOMIC_LOAD_MAX: - case ISD::ATOMIC_LOAD_UMIN: - case ISD::ATOMIC_LOAD_UMAX: - return SDValue(DAG.UpdateNodeOperands(atomic.getNode(), - fence.getOperand(0), - atomic.getOperand(1), atomic.getOperand(2)), - atomic.getResNo()); - default: - return SDValue(); - } -} - /// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform /// an AND to a vector_shuffle with the destination vector and a zero vector. /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 10e2dc6..288499a 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1183,6 +1183,8 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); + assert (MaterialReg != 0 && "Unable to materialize imm."); + if (MaterialReg == 0) return 0; } return FastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, @@ -1503,3 +1505,61 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { return true; } + +bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { + assert(LI->hasOneUse() && + "tryToFoldLoad expected a LoadInst with a single use"); + // We know that the load has a single use, but don't know what it is. If it + // isn't one of the folded instructions, then we can't succeed here. Handle + // this by scanning the single-use users of the load until we get to FoldInst. + unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. + + const Instruction *TheUser = LI->use_back(); + while (TheUser != FoldInst && // Scan up until we find FoldInst. + // Stay in the right block. + TheUser->getParent() == FoldInst->getParent() && + --MaxUsers) { // Don't scan too far. + // If there are multiple or no uses of this instruction, then bail out. + if (!TheUser->hasOneUse()) + return false; + + TheUser = TheUser->use_back(); + } + + // If we didn't find the fold instruction, then we failed to collapse the + // sequence. + if (TheUser != FoldInst) + return false; + + // Don't try to fold volatile loads. Target has to deal with alignment + // constraints. + if (LI->isVolatile()) + return false; + + // Figure out which vreg this is going into. If there is no assigned vreg yet + // then there actually was no reference to it. Perhaps the load is referenced + // by a dead instruction. + unsigned LoadReg = getRegForValue(LI); + if (LoadReg == 0) + return false; + + // We can't fold if this vreg has no uses or more than one use. Multiple uses + // may mean that the instruction got lowered to multiple MIs, or the use of + // the loaded value ended up being multiple operands of the result. + if (!MRI.hasOneUse(LoadReg)) + return false; + + MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); + MachineInstr *User = &*RI; + + // Set the insertion point properly. Folding the load can cause generation of + // other random instructions (like sign extends) for addressing modes; make + // sure they get inserted in a logical place before the new instruction. + FuncInfo.InsertPt = User; + FuncInfo.MBB = User->getParent(); + + // Ask the target to try folding the load. + return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); +} + + diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 51cc254..2a1d8c2 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2759,8 +2759,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) { Results.push_back(DAG.getConstant(0, MVT::i32)); Results.push_back(Node->getOperand(0)); break; - case ISD::ATOMIC_FENCE: - case ISD::MEMBARRIER: { + case ISD::ATOMIC_FENCE: { // If the target didn't lower this, lower it to '__sync_synchronize()' call // FIXME: handle "fence singlethread" more efficiently. TargetLowering::ArgListTy Args; diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 6a05cf8..de217d8 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -824,6 +824,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) { case ISD::LOAD: ExpandFloatRes_LOAD(N, Lo, Hi); break; case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break; + case ISD::FREM: ExpandFloatRes_FREM(N, Lo, Hi); break; } // If Lo/Hi is null, the sub-method took care of registering results etc. @@ -1051,6 +1052,16 @@ void DAGTypeLegalizer::ExpandFloatRes_FPOWI(SDNode *N, GetPairElements(Call, Lo, Hi); } +void DAGTypeLegalizer::ExpandFloatRes_FREM(SDNode *N, + SDValue &Lo, SDValue &Hi) { + SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), + RTLIB::REM_F32, RTLIB::REM_F64, + RTLIB::REM_F80, RTLIB::REM_F128, + RTLIB::REM_PPCF128), + N, false); + GetPairElements(Call, Lo, Hi); +} + void DAGTypeLegalizer::ExpandFloatRes_FRINT(SDNode *N, SDValue &Lo, SDValue &Hi) { SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index beeb6b3..cd2f060 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -515,7 +515,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) { // Only use the result of getSetCCResultType if it is legal, // otherwise just use the promoted result type (NVT). if (!TLI.isTypeLegal(SVT)) - SVT = NVT; + SVT = NVT; DebugLoc dl = N->getDebugLoc(); assert(SVT.isVector() == N->getOperand(0).getValueType().isVector() && @@ -550,7 +550,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) { SDValue LHS = GetPromotedInteger(N->getOperand(0)); SDValue RHS = GetPromotedInteger(N->getOperand(1)); return DAG.getNode(N->getOpcode(), N->getDebugLoc(), - LHS.getValueType(), LHS, RHS); + LHS.getValueType(), LHS, RHS); } SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) { @@ -777,7 +777,6 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) { Res = PromoteIntOp_CONVERT_RNDSAT(N); break; case ISD::INSERT_VECTOR_ELT: Res = PromoteIntOp_INSERT_VECTOR_ELT(N, OpNo);break; - case ISD::MEMBARRIER: Res = PromoteIntOp_MEMBARRIER(N); break; case ISD::SCALAR_TO_VECTOR: Res = PromoteIntOp_SCALAR_TO_VECTOR(N); break; case ISD::VSELECT: @@ -961,17 +960,6 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, N->getOperand(1), Idx), 0); } -SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) { - SDValue NewOps[6]; - DebugLoc dl = N->getDebugLoc(); - NewOps[0] = N->getOperand(0); - for (unsigned i = 1; i < array_lengthof(NewOps); ++i) { - SDValue Flag = GetPromotedInteger(N->getOperand(i)); - NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1); - } - return SDValue(DAG.UpdateNodeOperands(N, NewOps, array_lengthof(NewOps)), 0); -} - SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) { // Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote // the operand in place. diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index e26d165..b6436bf 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -735,6 +735,9 @@ void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) { SDValue &OpEntry = PromotedIntegers[Op]; assert(OpEntry.getNode() == 0 && "Node is already promoted!"); OpEntry = Result; + + // Propagate node ordering + DAG.AssignOrdering(Result.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) { @@ -746,6 +749,9 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) { SDValue &OpEntry = SoftenedFloats[Op]; assert(OpEntry.getNode() == 0 && "Node is already converted to integer!"); OpEntry = Result; + + // Propagate node ordering + DAG.AssignOrdering(Result.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) { @@ -760,6 +766,9 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) { SDValue &OpEntry = ScalarizedVectors[Op]; assert(OpEntry.getNode() == 0 && "Node is already scalarized!"); OpEntry = Result; + + // Propagate node ordering + DAG.AssignOrdering(Result.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::GetExpandedInteger(SDValue Op, SDValue &Lo, @@ -787,6 +796,10 @@ void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo, assert(Entry.first.getNode() == 0 && "Node already expanded"); Entry.first = Lo; Entry.second = Hi; + + // Propagate ordering + DAG.AssignOrdering(Lo.getNode(), DAG.GetOrdering(Op.getNode())); + DAG.AssignOrdering(Hi.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::GetExpandedFloat(SDValue Op, SDValue &Lo, @@ -814,6 +827,10 @@ void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo, assert(Entry.first.getNode() == 0 && "Node already expanded"); Entry.first = Lo; Entry.second = Hi; + + // Propagate ordering + DAG.AssignOrdering(Lo.getNode(), DAG.GetOrdering(Op.getNode())); + DAG.AssignOrdering(Hi.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::GetSplitVector(SDValue Op, SDValue &Lo, @@ -843,6 +860,10 @@ void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo, assert(Entry.first.getNode() == 0 && "Node already split"); Entry.first = Lo; Entry.second = Hi; + + // Propagate ordering + DAG.AssignOrdering(Lo.getNode(), DAG.GetOrdering(Op.getNode())); + DAG.AssignOrdering(Hi.getNode(), DAG.GetOrdering(Op.getNode())); } void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) { @@ -854,6 +875,9 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) { SDValue &OpEntry = WidenedVectors[Op]; assert(OpEntry.getNode() == 0 && "Node already widened!"); OpEntry = Result; + + // Propagate node ordering + DAG.AssignOrdering(Result.getNode(), DAG.GetOrdering(Op.getNode())); } @@ -919,8 +943,11 @@ bool DAGTypeLegalizer::CustomLowerNode(SDNode *N, EVT VT, bool LegalizeResult) { // Make everything that once used N's values now use those in Results instead. assert(Results.size() == N->getNumValues() && "Custom lowering returned the wrong number of results!"); - for (unsigned i = 0, e = Results.size(); i != e; ++i) + for (unsigned i = 0, e = Results.size(); i != e; ++i) { ReplaceValueWith(SDValue(N, i), Results[i]); + // Propagate node ordering + DAG.AssignOrdering(Results[i].getNode(), DAG.GetOrdering(N)); + } return true; } diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 27b3cf2..1c4274a 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -270,7 +270,6 @@ private: SDValue PromoteIntOp_EXTRACT_ELEMENT(SDNode *N); SDValue PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N); SDValue PromoteIntOp_CONCAT_VECTORS(SDNode *N); - SDValue PromoteIntOp_MEMBARRIER(SDNode *N); SDValue PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N); SDValue PromoteIntOp_SELECT(SDNode *N, unsigned OpNo); SDValue PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo); @@ -465,6 +464,7 @@ private: void ExpandFloatRes_FP_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FPOW (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FPOWI (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FREM (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FRINT (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FSIN (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FSQRT (SDNode *N, SDValue &Lo, SDValue &Hi); @@ -581,6 +581,7 @@ private: SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N); SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo); SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N); + SDValue SplitVecOp_TRUNCATE(SDNode *N); SDValue SplitVecOp_VSETCC(SDNode *N); SDValue SplitVecOp_FP_ROUND(SDNode *N); diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 5ec8535..04c6bfd 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1046,6 +1046,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break; case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break; case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break; + case ISD::TRUNCATE: Res = SplitVecOp_TRUNCATE(N); break; case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break; case ISD::STORE: Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo); @@ -1062,7 +1063,6 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: case ISD::FTRUNC: - case ISD::TRUNCATE: case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: case ISD::ANY_EXTEND: @@ -1272,8 +1272,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) { DebugLoc DL = N->getDebugLoc(); - // The input operands all must have the same type, and we know the result the - // result type is valid. Convert this to a buildvector which extracts all the + // The input operands all must have the same type, and we know the result + // type is valid. Convert this to a buildvector which extracts all the // input elements. // TODO: If the input elements are power-two vectors, we could convert this to // a new CONCAT_VECTORS node with elements that are half-wide. @@ -1293,6 +1293,66 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) { &Elts[0], Elts.size()); } +SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) { + // The result type is legal, but the input type is illegal. If splitting + // ends up with the result type of each half still being legal, just + // do that. If, however, that would result in an illegal result type, + // we can try to get more clever with power-two vectors. Specifically, + // split the input type, but also widen the result element size, then + // concatenate the halves and truncate again. For example, consider a target + // where v8i8 is legal and v8i32 is not (ARM, which doesn't have 256-bit + // vectors). To perform a "%res = v8i8 trunc v8i32 %in" we do: + // %inlo = v4i32 extract_subvector %in, 0 + // %inhi = v4i32 extract_subvector %in, 4 + // %lo16 = v4i16 trunc v4i32 %inlo + // %hi16 = v4i16 trunc v4i32 %inhi + // %in16 = v8i16 concat_vectors v4i16 %lo16, v4i16 %hi16 + // %res = v8i8 trunc v8i16 %in16 + // + // Without this transform, the original truncate would end up being + // scalarized, which is pretty much always a last resort. + SDValue InVec = N->getOperand(0); + EVT InVT = InVec->getValueType(0); + EVT OutVT = N->getValueType(0); + unsigned NumElements = OutVT.getVectorNumElements(); + // Widening should have already made sure this is a power-two vector + // if we're trying to split it at all. assert() that's true, just in case. + assert(!(NumElements & 1) && "Splitting vector, but not in half!"); + + unsigned InElementSize = InVT.getVectorElementType().getSizeInBits(); + unsigned OutElementSize = OutVT.getVectorElementType().getSizeInBits(); + + // If the input elements are only 1/2 the width of the result elements, + // just use the normal splitting. Our trick only work if there's room + // to split more than once. + if (InElementSize <= OutElementSize * 2) + return SplitVecOp_UnaryOp(N); + DebugLoc DL = N->getDebugLoc(); + + // Extract the halves of the input via extract_subvector. + EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), + InVT.getVectorElementType(), NumElements/2); + SDValue InLoVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, InVec, + DAG.getIntPtrConstant(0)); + SDValue InHiVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, InVec, + DAG.getIntPtrConstant(NumElements/2)); + // Truncate them to 1/2 the element size. + EVT HalfElementVT = EVT::getIntegerVT(*DAG.getContext(), InElementSize/2); + EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, + NumElements/2); + SDValue HalfLo = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, InLoVec); + SDValue HalfHi = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, InHiVec); + // Concatenate them to get the full intermediate truncation result. + EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements); + SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo, + HalfHi); + // Now finish up by truncating all the way down to the original result + // type. This should normally be something that ends up being legal directly, + // but in theory if a target has very wide vectors and an annoyingly + // restricted set of legal types, this split can chain to build things up. + return DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec); +} + SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) { assert(N->getValueType(0).isVector() && N->getOperand(0).getValueType().isVector() && diff --git a/lib/CodeGen/SelectionDAG/SDNodeOrdering.h b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h index d2269f8..7e7b897 100644 --- a/lib/CodeGen/SelectionDAG/SDNodeOrdering.h +++ b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h @@ -33,8 +33,10 @@ class SDNodeOrdering { public: SDNodeOrdering() {} - void add(const SDNode *Node, unsigned O) { - OrderMap[Node] = O; + void add(const SDNode *Node, unsigned NewOrder) { + unsigned &OldOrder = OrderMap[Node]; + if (OldOrder == 0 || (OldOrder > 0 && NewOrder < OldOrder)) + OldOrder = NewOrder; } void remove(const SDNode *Node) { DenseMap<const SDNode*, unsigned>::iterator Itr = OrderMap.find(Node); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 35707e8..5e30c25 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1917,7 +1917,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, } case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Op); - if (ISD::isZEXTLoad(Op.getNode())) { + // If this is a ZEXTLoad and we are looking at the loaded value. + if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { EVT VT = LD->getMemoryVT(); unsigned MemBits = VT.getScalarType().getSizeInBits(); KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); @@ -2287,17 +2288,20 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ break; } - // Handle LOADX separately here. EXTLOAD case will fallthrough. - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { - unsigned ExtType = LD->getExtensionType(); - switch (ExtType) { - default: break; - case ISD::SEXTLOAD: // '17' bits known - Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); - return VTBits-Tmp+1; - case ISD::ZEXTLOAD: // '16' bits known - Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); - return VTBits-Tmp; + // If we are looking at the loaded value of the SDNode. + if (Op.getResNo() == 0) { + // Handle LOADX separately here. EXTLOAD case will fallthrough. + if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { + unsigned ExtType = LD->getExtensionType(); + switch (ExtType) { + default: break; + case ISD::SEXTLOAD: // '17' bits known + Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); + return VTBits-Tmp+1; + case ISD::ZEXTLOAD: // '16' bits known + Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); + return VTBits-Tmp; + } } } @@ -2781,7 +2785,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT, } // Handle the scalar case first. - if (Outputs.size() == 1) + if (Scalar1 && Scalar2) return Outputs.back(); // Otherwise build a big vector out of the scalar elements we generated. @@ -5248,14 +5252,14 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) { SDVTList VTs = getVTList(VT); - return getMachineNode(Opcode, dl, VTs, 0, 0); + return getMachineNode(Opcode, dl, VTs, ArrayRef<SDValue>()); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) { SDVTList VTs = getVTList(VT); SDValue Ops[] = { Op1 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5263,7 +5267,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1, SDValue Op2) { SDVTList VTs = getVTList(VT); SDValue Ops[] = { Op1, Op2 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5271,20 +5275,20 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1, SDValue Op2, SDValue Op3) { SDVTList VTs = getVTList(VT); SDValue Ops[] = { Op1, Op2, Op3 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> Ops) { SDVTList VTs = getVTList(VT); - return getMachineNode(Opcode, dl, VTs, Ops, NumOps); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) { SDVTList VTs = getVTList(VT1, VT2); - return getMachineNode(Opcode, dl, VTs, 0, 0); + return getMachineNode(Opcode, dl, VTs, ArrayRef<SDValue>()); } MachineSDNode * @@ -5292,7 +5296,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2, SDValue Op1) { SDVTList VTs = getVTList(VT1, VT2); SDValue Ops[] = { Op1 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5300,7 +5304,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { SDVTList VTs = getVTList(VT1, VT2); SDValue Ops[] = { Op1, Op2 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5309,15 +5313,15 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, SDValue Op2, SDValue Op3) { SDVTList VTs = getVTList(VT1, VT2); SDValue Ops[] = { Op1, Op2, Op3 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> Ops) { SDVTList VTs = getVTList(VT1, VT2); - return getMachineNode(Opcode, dl, VTs, Ops, NumOps); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5326,7 +5330,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, SDValue Op1, SDValue Op2) { SDVTList VTs = getVTList(VT1, VT2, VT3); SDValue Ops[] = { Op1, Op2 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * @@ -5335,39 +5339,41 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, SDValue Op1, SDValue Op2, SDValue Op3) { SDVTList VTs = getVTList(VT1, VT2, VT3); SDValue Ops[] = { Op1, Op2, Op3 }; - return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2, EVT VT3, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> Ops) { SDVTList VTs = getVTList(VT1, VT2, VT3); - return getMachineNode(Opcode, dl, VTs, Ops, NumOps); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2, EVT VT3, EVT VT4, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> Ops) { SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); - return getMachineNode(Opcode, dl, VTs, Ops, NumOps); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, ArrayRef<EVT> ResultTys, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> Ops) { SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size()); - return getMachineNode(Opcode, dl, VTs, Ops, NumOps); + return getMachineNode(Opcode, dl, VTs, Ops); } MachineSDNode * SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, - const SDValue *Ops, unsigned NumOps) { + ArrayRef<SDValue> OpsArray) { bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; MachineSDNode *N; void *IP = 0; + const SDValue *Ops = OpsArray.data(); + unsigned NumOps = OpsArray.size(); if (DoCSE) { FoldingSetNodeID ID; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 33d100e..2ded723 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -314,7 +314,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, } else { Ctx.emitError(ErrMsg); } - report_fatal_error("Cannot handle scalar-to-vector conversion!"); + return DAG.getUNDEF(ValueVT); } if (ValueVT.getVectorNumElements() == 1 && @@ -4914,7 +4914,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { case Intrinsic::fmuladd: { EVT VT = TLI.getValueType(I.getType()); if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && - TLI.isOperationLegalOrCustom(ISD::FMA, VT) && TLI.isFMAFasterThanMulAndAdd(VT)){ setValue(&I, DAG.getNode(ISD::FMA, dl, getValue(I.getArgOperand(0)).getValueType(), @@ -5233,6 +5232,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, Entry.isSRet = true; Entry.isNest = false; Entry.isByVal = false; + Entry.isReturned = false; Entry.Alignment = Align; Args.push_back(Entry); RetTy = Type::getVoidTy(FTy->getContext()); @@ -5250,13 +5250,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, Entry.Node = ArgNode; Entry.Ty = V->getType(); unsigned attrInd = i - CS.arg_begin() + 1; - Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt); - Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt); - Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg); - Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet); - Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest); - Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal); - Entry.Alignment = CS.getParamAlignment(attrInd); + Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt); + Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt); + Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg); + Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet); + Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest); + Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal); + Entry.isReturned = CS.paramHasAttr(attrInd, Attribute::Returned); + Entry.Alignment = CS.getParamAlignment(attrInd); Args.push_back(Entry); } @@ -6170,10 +6171,17 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { MatchedRegs.RegVTs.push_back(RegVT); MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag); - i != e; ++i) - MatchedRegs.Regs.push_back - (RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT))); - + i != e; ++i) { + if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) + MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC)); + else { + LLVMContext &Ctx = *DAG.getContext(); + Ctx.emitError(CS.getInstruction(), "inline asm error: This value" + " type register class is not natively supported!"); + report_fatal_error("inline asm error: This value type register " + "class is not natively supported!"); + } + } // Use the produced MatchedRegs object to MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(), Chain, &Flag, CS.getInstruction()); @@ -6390,6 +6398,28 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) { /// migrated to using LowerCall, this hook should be integrated into SDISel. std::pair<SDValue, SDValue> TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { + // Handle the incoming return values from the call. + CLI.Ins.clear(); + SmallVector<EVT, 4> RetTys; + ComputeValueVTs(*this, CLI.RetTy, RetTys); + for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { + EVT VT = RetTys[I]; + MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); + unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); + for (unsigned i = 0; i != NumRegs; ++i) { + ISD::InputArg MyFlags; + MyFlags.VT = RegisterVT; + MyFlags.Used = CLI.IsReturnValueUsed; + if (CLI.RetSExt) + MyFlags.Flags.setSExt(); + if (CLI.RetZExt) + MyFlags.Flags.setZExt(); + if (CLI.IsInReg) + MyFlags.Flags.setInReg(); + CLI.Ins.push_back(MyFlags); + } + } + // Handle all of the outgoing arguments. CLI.Outs.clear(); CLI.OutVals.clear(); @@ -6443,6 +6473,26 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { else if (Args[i].isZExt) ExtendKind = ISD::ZERO_EXTEND; + // Conservatively only handle 'returned' on non-vectors for now + if (Args[i].isReturned && !Op.getValueType().isVector()) { + assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues && + "unexpected use of 'returned'"); + // Before passing 'returned' to the target lowering code, ensure that + // either the register MVT and the actual EVT are the same size or that + // the return value and argument are extended in the same way; in these + // cases it's safe to pass the argument register value unchanged as the + // return register value (although it's at the target's option whether + // to do so) + // TODO: allow code generation to take advantage of partially preserved + // registers rather than clobbering the entire register when the + // parameter extension method is not compatible with the return + // extension method + if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || + (ExtendKind != ISD::ANY_EXTEND && + CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt)) + Flags.setReturned(); + } + getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind); @@ -6462,28 +6512,6 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { } } - // Handle the incoming return values from the call. - CLI.Ins.clear(); - SmallVector<EVT, 4> RetTys; - ComputeValueVTs(*this, CLI.RetTy, RetTys); - for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { - EVT VT = RetTys[I]; - MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); - unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); - for (unsigned i = 0; i != NumRegs; ++i) { - ISD::InputArg MyFlags; - MyFlags.VT = RegisterVT; - MyFlags.Used = CLI.IsReturnValueUsed; - if (CLI.RetSExt) - MyFlags.Flags.setSExt(); - if (CLI.RetZExt) - MyFlags.Flags.setZExt(); - if (CLI.IsInReg) - MyFlags.Flags.setInReg(); - CLI.Ins.push_back(MyFlags); - } - } - SmallVector<SDValue, 4> InVals; CLI.Chain = LowerCall(CLI, InVals); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index 3b5823b..47b0391 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -54,7 +54,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::DELETED_NODE: return "<<Deleted Node!>>"; #endif case ISD::PREFETCH: return "Prefetch"; - case ISD::MEMBARRIER: return "MemBarrier"; case ISD::ATOMIC_FENCE: return "AtomicFence"; case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap"; case ISD::ATOMIC_SWAP: return "AtomicSwap"; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index c3b6276..e21f26e 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -63,12 +63,16 @@ STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected"); STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel"); STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG"); STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path"); +STATISTIC(NumEntryBlocks, "Number of entry blocks encountered"); +STATISTIC(NumFastIselFailLowerArguments, + "Number of entry blocks where fast isel failed to lower arguments"); #ifndef NDEBUG static cl::opt<bool> EnableFastISelVerbose2("fast-isel-verbose2", cl::Hidden, cl::desc("Enable extra verbose messages in the \"fast\" " "instruction selector")); + // Terminators STATISTIC(NumFastIselFailRet,"Fast isel fails on Ret"); STATISTIC(NumFastIselFailBr,"Fast isel fails on Br"); @@ -742,7 +746,7 @@ public: } // end anonymous namespace void SelectionDAGISel::DoInstructionSelection() { - DEBUG(errs() << "===== Instruction selection begins: BB#" + DEBUG(dbgs() << "===== Instruction selection begins: BB#" << FuncInfo->MBB->getNumber() << " '" << FuncInfo->MBB->getName() << "'\n"); @@ -785,8 +789,12 @@ void SelectionDAGISel::DoInstructionSelection() { if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE) continue; // Replace node. - if (ResNode) + if (ResNode) { + // Propagate ordering + CurDAG->AssignOrdering(ResNode, CurDAG->GetOrdering(Node)); + ReplaceUses(Node, ResNode); + } // If after the replacement this node is not used any more, // remove this dead node. @@ -797,7 +805,7 @@ void SelectionDAGISel::DoInstructionSelection() { CurDAG->setRoot(Dummy.getValue()); } - DEBUG(errs() << "===== Instruction selection ends:\n"); + DEBUG(dbgs() << "===== Instruction selection ends:\n"); PostprocessISelDAG(); } @@ -827,84 +835,6 @@ void SelectionDAGISel::PrepareEHLandingPad() { if (Reg) MBB->addLiveIn(Reg); } -/// TryToFoldFastISelLoad - We're checking to see if we can fold the specified -/// load into the specified FoldInst. Note that we could have a sequence where -/// multiple LLVM IR instructions are folded into the same machineinstr. For -/// example we could have: -/// A: x = load i32 *P -/// B: y = icmp A, 42 -/// C: br y, ... -/// -/// In this scenario, LI is "A", and FoldInst is "C". We know about "B" (and -/// any other folded instructions) because it is between A and C. -/// -/// If we succeed in folding the load into the operation, return true. -/// -bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI, - const Instruction *FoldInst, - FastISel *FastIS) { - // We know that the load has a single use, but don't know what it is. If it - // isn't one of the folded instructions, then we can't succeed here. Handle - // this by scanning the single-use users of the load until we get to FoldInst. - unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. - - const Instruction *TheUser = LI->use_back(); - while (TheUser != FoldInst && // Scan up until we find FoldInst. - // Stay in the right block. - TheUser->getParent() == FoldInst->getParent() && - --MaxUsers) { // Don't scan too far. - // If there are multiple or no uses of this instruction, then bail out. - if (!TheUser->hasOneUse()) - return false; - - TheUser = TheUser->use_back(); - } - - // If we didn't find the fold instruction, then we failed to collapse the - // sequence. - if (TheUser != FoldInst) - return false; - - // Don't try to fold volatile loads. Target has to deal with alignment - // constraints. - if (LI->isVolatile()) return false; - - // Figure out which vreg this is going into. If there is no assigned vreg yet - // then there actually was no reference to it. Perhaps the load is referenced - // by a dead instruction. - unsigned LoadReg = FastIS->getRegForValue(LI); - if (LoadReg == 0) - return false; - - // Check to see what the uses of this vreg are. If it has no uses, or more - // than one use (at the machine instr level) then we can't fold it. - MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(LoadReg); - if (RI == RegInfo->reg_end()) - return false; - - // See if there is exactly one use of the vreg. If there are multiple uses, - // then the instruction got lowered to multiple machine instructions or the - // use of the loaded value ended up being multiple operands of the result, in - // either case, we can't fold this. - MachineRegisterInfo::reg_iterator PostRI = RI; ++PostRI; - if (PostRI != RegInfo->reg_end()) - return false; - - assert(RI.getOperand().isUse() && - "The only use of the vreg must be a use, we haven't emitted the def!"); - - MachineInstr *User = &*RI; - - // Set the insertion point properly. Folding the load can cause generation of - // other random instructions (like sign extends) for addressing modes, make - // sure they get inserted in a logical place before the new instruction. - FuncInfo->InsertPt = User; - FuncInfo->MBB = User->getParent(); - - // Ask the target to try folding the load. - return FastIS->TryToFoldLoad(User, RI.getOperandNo(), LI); -} - /// isFoldedOrDeadInstruction - Return true if the specified instruction is /// side-effect free and is either dead or folded into a generated instruction. /// Return false if it needs to be emitted. @@ -1050,9 +980,12 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Emit code for any incoming arguments. This must happen before // beginning FastISel on the entry block. if (LLVMBB == &Fn.getEntryBlock()) { + ++NumEntryBlocks; + // Lower any arguments needed in this block if this is the entry block. if (!FastIS->LowerArguments()) { // Fast isel failed to lower these arguments + ++NumFastIselFailLowerArguments; if (EnableFastISelAbortArgs) llvm_unreachable("FastISel didn't lower all arguments"); @@ -1102,7 +1035,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) && BeforeInst->hasOneUse() && - TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), Inst, FastIS)) { + FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) { // If we succeeded, don't re-select the load. BI = llvm::next(BasicBlock::const_iterator(BeforeInst)); --NumFastIselRemaining; @@ -1174,8 +1107,10 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { FastIS->recomputeInsertPt(); } else { // Lower any arguments needed in this block if this is the entry block. - if (LLVMBB == &Fn.getEntryBlock()) + if (LLVMBB == &Fn.getEntryBlock()) { + ++NumEntryBlocks; LowerArguments(Fn); + } } if (Begin != BI) @@ -1767,7 +1702,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain, if (!NowDeadNodes.empty()) CurDAG->RemoveDeadNodes(NowDeadNodes); - DEBUG(errs() << "ISEL: Match complete!\n"); + DEBUG(dbgs() << "ISEL: Match complete!\n"); } enum ChainResult { @@ -2272,9 +2207,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, SmallVector<SDNode*, 3> ChainNodesMatched; SmallVector<SDNode*, 3> GlueResultNodesMatched; - DEBUG(errs() << "ISEL: Starting pattern match on root node: "; + DEBUG(dbgs() << "ISEL: Starting pattern match on root node: "; NodeToMatch->dump(CurDAG); - errs() << '\n'); + dbgs() << '\n'); // Determine where to start the interpreter. Normally we start at opcode #0, // but if the state machine starts with an OPC_SwitchOpcode, then we @@ -2286,7 +2221,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, // Already computed the OpcodeOffset table, just index into it. if (N.getOpcode() < OpcodeOffset.size()) MatcherIndex = OpcodeOffset[N.getOpcode()]; - DEBUG(errs() << " Initial Opcode index to " << MatcherIndex << "\n"); + DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n"); } else if (MatcherTable[0] == OPC_SwitchOpcode) { // Otherwise, the table isn't computed, but the state machine does start @@ -2353,7 +2288,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, if (!Result) break; - DEBUG(errs() << " Skipped scope entry (due to false predicate) at " + DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at " << "index " << MatcherIndexOfPredicate << ", continuing at " << FailIndex << "\n"); ++NumDAGIselRetries; @@ -2483,7 +2418,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, if (CaseSize == 0) break; // Otherwise, execute the case we found. - DEBUG(errs() << " OpcodeSwitch from " << SwitchStart + DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to " << MatcherIndex << "\n"); continue; } @@ -2515,7 +2450,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, if (CaseSize == 0) break; // Otherwise, execute the case we found. - DEBUG(errs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString() + DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString() << "] from " << SwitchStart << " to " << MatcherIndex<<'\n'); continue; } @@ -2783,7 +2718,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, // If this is a normal EmitNode command, just create the new node and // add the results to the RecordedNodes list. Res = CurDAG->getMachineNode(TargetOpc, NodeToMatch->getDebugLoc(), - VTList, Ops.data(), Ops.size()); + VTList, Ops); // Add all the non-glue/non-chain results to the RecordedNodes list. for (unsigned i = 0, e = VTs.size(); i != e; ++i) { @@ -2859,9 +2794,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, ->setMemRefs(MemRefs, MemRefs + NumMemRefs); } - DEBUG(errs() << " " + DEBUG(dbgs() << " " << (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created") - << " node: "; Res->dump(CurDAG); errs() << "\n"); + << " node: "; Res->dump(CurDAG); dbgs() << "\n"); // If this was a MorphNodeTo then we're completely done! if (Opcode == OPC_MorphNodeTo) { @@ -2936,7 +2871,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, // If the code reached this point, then the match failed. See if there is // another child to try in the current 'Scope', otherwise pop it until we // find a case to check. - DEBUG(errs() << " Match failed at index " << CurrentOpcodeIndex << "\n"); + DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n"); ++NumDAGIselRetries; while (1) { if (MatchScopes.empty()) { @@ -2956,7 +2891,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, MatchedMemRefs.resize(LastScope.NumMatchedMemRefs); MatcherIndex = LastScope.FailIndex; - DEBUG(errs() << " Continuing at " << MatcherIndex << "\n"); + DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n"); InputChain = LastScope.InputChain; InputGlue = LastScope.InputGlue; diff --git a/lib/CodeGen/SpillPlacement.cpp b/lib/CodeGen/SpillPlacement.cpp index 320128a..c5bbba3 100644 --- a/lib/CodeGen/SpillPlacement.cpp +++ b/lib/CodeGen/SpillPlacement.cpp @@ -29,6 +29,7 @@ #define DEBUG_TYPE "spillplacement" #include "SpillPlacement.h" +#include "llvm/ADT/BitVector.h" #include "llvm/CodeGen/EdgeBundles.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineBasicBlock.h" diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index ec44b8c..a789a25 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -67,14 +67,14 @@ DisableColoring("no-stack-coloring", /// code. If this flag is enabled, we try to save the user. static cl::opt<bool> ProtectFromEscapedAllocas("protect-from-escaped-allocas", - cl::init(false), cl::Hidden, - cl::desc("Do not optimize lifetime zones that are broken")); + cl::init(false), cl::Hidden, + cl::desc("Do not optimize lifetime zones that " + "are broken")); STATISTIC(NumMarkerSeen, "Number of lifetime markers found."); STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots."); STATISTIC(StackSlotMerged, "Number of stack slot merged."); -STATISTIC(EscapedAllocas, - "Number of allocas that escaped the lifetime region"); +STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region"); //===----------------------------------------------------------------------===// // StackColoring Pass @@ -577,7 +577,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { SlotIndex Index = Indexes->getInstructionIndex(I); LiveInterval *Interval = Intervals[FromSlot]; assert(Interval->find(Index) != Interval->end() && - "Found instruction usage outside of live range."); + "Found instruction usage outside of live range."); } #endif @@ -741,9 +741,9 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) { std::stable_sort(SortedSlots.begin(), SortedSlots.end(), SlotSizeSorter(MFI)); - bool Chanded = true; - while (Chanded) { - Chanded = false; + bool Changed = true; + while (Changed) { + Changed = false; for (unsigned I = 0; I < NumSlots; ++I) { if (SortedSlots[I] == -1) continue; @@ -760,7 +760,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) { // Merge disjoint slots. if (!First->overlaps(*Second)) { - Chanded = true; + Changed = true; First->MergeRangesInAsValue(*Second, First->getValNumInfo(0)); SlotRemap[SecondSlot] = FirstSlot; SortedSlots[J] = -1; diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 3c34676..8074d16 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -620,12 +620,55 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) { TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, const TargetLoweringObjectFile *tlof) : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) { + initActions(); + + // Perform these initializations only once. + IsLittleEndian = TD->isLittleEndian(); + PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0)); + MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8; + MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize + = MaxStoresPerMemmoveOptSize = 4; + UseUnderscoreSetJmp = false; + UseUnderscoreLongJmp = false; + SelectIsExpensive = false; + IntDivIsCheap = false; + Pow2DivIsCheap = false; + JumpIsExpensive = false; + PredictableSelectIsExpensive = false; + StackPointerRegisterToSaveRestore = 0; + ExceptionPointerRegister = 0; + ExceptionSelectorRegister = 0; + BooleanContents = UndefinedBooleanContent; + BooleanVectorContents = UndefinedBooleanContent; + SchedPreferenceInfo = Sched::ILP; + JumpBufSize = 0; + JumpBufAlignment = 0; + MinFunctionAlignment = 0; + PrefFunctionAlignment = 0; + PrefLoopAlignment = 0; + MinStackArgumentAlignment = 1; + InsertFencesForAtomic = false; + SupportJumpTables = true; + MinimumJumpTableEntries = 4; + + InitLibcallNames(LibcallRoutineNames, TM); + InitCmpLibcallCCs(CmpLibcallCCs); + InitLibcallCallingConvs(LibcallCallingConvs); +} + +TargetLoweringBase::~TargetLoweringBase() { + delete &TLOF; +} + +void TargetLoweringBase::initActions() { // All operations default to being supported. memset(OpActions, 0, sizeof(OpActions)); memset(LoadExtActions, 0, sizeof(LoadExtActions)); memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); memset(CondCodeActions, 0, sizeof(CondCodeActions)); + memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); + memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); // Set default actions for various operations. for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { @@ -702,46 +745,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. // setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); - - IsLittleEndian = TD->isLittleEndian(); - PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0)); - memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); - memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); - MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8; - MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize - = MaxStoresPerMemmoveOptSize = 4; - BenefitFromCodePlacementOpt = false; - UseUnderscoreSetJmp = false; - UseUnderscoreLongJmp = false; - SelectIsExpensive = false; - IntDivIsCheap = false; - Pow2DivIsCheap = false; - JumpIsExpensive = false; - PredictableSelectIsExpensive = false; - StackPointerRegisterToSaveRestore = 0; - ExceptionPointerRegister = 0; - ExceptionSelectorRegister = 0; - BooleanContents = UndefinedBooleanContent; - BooleanVectorContents = UndefinedBooleanContent; - SchedPreferenceInfo = Sched::ILP; - JumpBufSize = 0; - JumpBufAlignment = 0; - MinFunctionAlignment = 0; - PrefFunctionAlignment = 0; - PrefLoopAlignment = 0; - MinStackArgumentAlignment = 1; - ShouldFoldAtomicFences = false; - InsertFencesForAtomic = false; - SupportJumpTables = true; - MinimumJumpTableEntries = 4; - - InitLibcallNames(LibcallRoutineNames, TM); - InitCmpLibcallCCs(CmpLibcallCCs); - InitLibcallCallingConvs(LibcallCallingConvs); -} - -TargetLoweringBase::~TargetLoweringBase() { - delete &TLOF; } MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const { diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index 3bdca4c..7e7359a 100644 --- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -523,11 +523,6 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, const MCSection *TargetLoweringObjectFileMachO:: SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler *Mang, const TargetMachine &TM) const { - - // Handle thread local data. - if (Kind.isThreadBSS()) return TLSBSSSection; - if (Kind.isThreadData()) return TLSDataSection; - if (Kind.isText()) return GV->isWeakForLinker() ? TextCoalSection : TextSection; @@ -580,6 +575,10 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, if (Kind.isBSSLocal()) return DataBSSSection; + // Handle thread local data. + if (Kind.isThreadBSS()) return TLSBSSSection; + if (Kind.isThreadData()) return TLSDataSection; + // Otherwise, just drop the variable in the normal data section. return DataSection; } @@ -782,3 +781,49 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, return getDataSection(); } +void TargetLoweringObjectFileCOFF:: +emitModuleFlags(MCStreamer &Streamer, + ArrayRef<Module::ModuleFlagEntry> ModuleFlags, + Mangler *Mang, const TargetMachine &TM) const { + MDNode *LinkerOptions = 0; + + // Look for the "Linker Options" flag, since it's the only one we support. + for (ArrayRef<Module::ModuleFlagEntry>::iterator + i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) { + const Module::ModuleFlagEntry &MFE = *i; + StringRef Key = MFE.Key->getString(); + Value *Val = MFE.Val; + if (Key == "Linker Options") { + LinkerOptions = cast<MDNode>(Val); + break; + } + } + if (!LinkerOptions) + return; + + // Emit the linker options to the linker .drectve section. According to the + // spec, this section is a space-separated string containing flags for linker. + const MCSection *Sec = getDrectveSection(); + Streamer.SwitchSection(Sec); + for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) { + MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i)); + for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) { + MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii)); + StringRef Op = MDOption->getString(); + // Lead with a space for consistency with our dllexport implementation. + std::string Escaped(" "); + if (Op.find(" ") != StringRef::npos) { + // The PE-COFF spec says args with spaces must be quoted. It doesn't say + // how to escape quotes, but it probably uses this algorithm: + // http://msdn.microsoft.com/en-us/library/17w5ykft(v=vs.85).aspx + // FIXME: Reuse escaping code from Support/Windows/Program.inc + Escaped.push_back('\"'); + Escaped.append(Op); + Escaped.push_back('\"'); + } else { + Escaped.append(Op); + } + Streamer.EmitBytes(Escaped); + } + } +} diff --git a/lib/CodeGen/TargetOptionsImpl.cpp b/lib/CodeGen/TargetOptionsImpl.cpp index 0f59d01..435a5e7 100644 --- a/lib/CodeGen/TargetOptionsImpl.cpp +++ b/lib/CodeGen/TargetOptionsImpl.cpp @@ -50,3 +50,29 @@ StringRef TargetOptions::getTrapFunctionName() const { return TrapFuncName; } +bool TargetOptions::operator==(const TargetOptions &TO) { +#define ARE_EQUAL(X) X == TO.X + return + ARE_EQUAL(UnsafeFPMath) && + ARE_EQUAL(NoInfsFPMath) && + ARE_EQUAL(NoNaNsFPMath) && + ARE_EQUAL(HonorSignDependentRoundingFPMathOption) && + ARE_EQUAL(UseSoftFloat) && + ARE_EQUAL(NoZerosInBSS) && + ARE_EQUAL(JITExceptionHandling) && + ARE_EQUAL(JITEmitDebugInfo) && + ARE_EQUAL(JITEmitDebugInfoToDisk) && + ARE_EQUAL(GuaranteedTailCallOpt) && + ARE_EQUAL(DisableTailCalls) && + ARE_EQUAL(StackAlignmentOverride) && + ARE_EQUAL(RealignStack) && + ARE_EQUAL(SSPBufferSize) && + ARE_EQUAL(EnableFastISel) && + ARE_EQUAL(PositionIndependentExecutable) && + ARE_EQUAL(EnableSegmentedStacks) && + ARE_EQUAL(UseInitArray) && + ARE_EQUAL(TrapFuncName) && + ARE_EQUAL(FloatABIType) && + ARE_EQUAL(AllowFPOpFusion); +#undef ARE_EQUAL +} diff --git a/lib/CodeGen/TargetSchedule.cpp b/lib/CodeGen/TargetSchedule.cpp index 783bfa1..1bf14db 100644 --- a/lib/CodeGen/TargetSchedule.cpp +++ b/lib/CodeGen/TargetSchedule.cpp @@ -128,6 +128,8 @@ resolveSchedClass(const MachineInstr *MI) const { // Get the definition's scheduling class descriptor from this machine model. unsigned SchedClass = MI->getDesc().getSchedClass(); const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass); + if (!SCDesc->isValid()) + return SCDesc; #ifndef NDEBUG unsigned NIter = 0; diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index e6dfe10..7ca2bee 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -43,6 +43,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/Function.h" #include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetInstrInfo.h" @@ -58,6 +59,12 @@ STATISTIC(Num3AddrSunk, "Number of 3-address instructions sunk"); STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up"); STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down"); +// Temporary flag to disable rescheduling. +static cl::opt<bool> +EnableRescheduling("twoaddr-reschedule", + cl::desc("Coalesce copies by rescheduling (default=true)"), + cl::init(true), cl::Hidden); + namespace { class TwoAddressInstructionPass : public MachineFunctionPass { MachineFunction *MF; @@ -426,10 +433,7 @@ static bool isKilled(MachineInstr &MI, unsigned Reg, /// isTwoAddrUse - Return true if the specified MI uses the specified register /// as a two-address use. If so, return the destination register by reference. static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) { - const MCInstrDesc &MCID = MI.getDesc(); - unsigned NumOps = MI.isInlineAsm() - ? MI.getNumOperands() : MCID.getNumOperands(); - for (unsigned i = 0; i != NumOps; ++i) { + for (unsigned i = 0, NumOps = MI.getNumOperands(); i != NumOps; ++i) { const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg) continue; @@ -1144,7 +1148,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, // If there is one more use of regB later in the same MBB, consider // re-schedule this MI below it. - if (rescheduleMIBelowKill(mi, nmi, regB)) { + if (EnableRescheduling && rescheduleMIBelowKill(mi, nmi, regB)) { ++NumReSchedDowns; return true; } @@ -1163,7 +1167,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, // If there is one more use of regB later in the same MBB, consider // re-schedule it before this MI if it's legal. - if (rescheduleKillAboveMI(mi, nmi, regB)) { + if (EnableRescheduling && rescheduleKillAboveMI(mi, nmi, regB)) { ++NumReSchedUps; return true; } diff --git a/lib/DebugInfo/DWARFCompileUnit.cpp b/lib/DebugInfo/DWARFCompileUnit.cpp index e3e4ccd..4f0eed4 100644 --- a/lib/DebugInfo/DWARFCompileUnit.cpp +++ b/lib/DebugInfo/DWARFCompileUnit.cpp @@ -9,7 +9,7 @@ #include "DWARFCompileUnit.h" #include "DWARFContext.h" -#include "DWARFFormValue.h" +#include "llvm/DebugInfo/DWARFFormValue.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" @@ -165,7 +165,7 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) { // we were told to parse const uint8_t *fixed_form_sizes = - DWARFFormValue::getFixedFormSizesForAddressSize(getAddressByteSize()); + DWARFFormValue::getFixedFormSizes(getAddressByteSize(), getVersion()); while (offset < next_cu_offset && die.extractFast(this, fixed_form_sizes, &offset)) { diff --git a/lib/DebugInfo/DWARFContext.cpp b/lib/DebugInfo/DWARFContext.cpp index 9e19310..e5daf55 100644 --- a/lib/DebugInfo/DWARFContext.cpp +++ b/lib/DebugInfo/DWARFContext.cpp @@ -9,6 +9,9 @@ #include "DWARFContext.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Compression.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Format.h" #include "llvm/Support/Path.h" @@ -482,6 +485,22 @@ DIInliningInfo DWARFContext::getInliningInfoForAddress(uint64_t Address, return InliningInfo; } +static bool consumeCompressedDebugSectionHeader(StringRef &data, + uint64_t &OriginalSize) { + // Consume "ZLIB" prefix. + if (!data.startswith("ZLIB")) + return false; + data = data.substr(4); + // Consume uncompressed section size (big-endian 8 bytes). + DataExtractor extractor(data, false, 8); + uint32_t Offset = 0; + OriginalSize = extractor.getU64(&Offset); + if (Offset == 0) + return false; + data = data.substr(Offset); + return true; +} + DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) : IsLittleEndian(Obj->isLittleEndian()), AddressSize(Obj->getBytesInAddress()) { @@ -495,49 +514,55 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) : i->getContents(data); name = name.substr(name.find_first_not_of("._")); // Skip . and _ prefixes. - if (name == "debug_info") - InfoSection = data; - else if (name == "debug_abbrev") - AbbrevSection = data; - else if (name == "debug_line") - LineSection = data; - else if (name == "debug_aranges") - ARangeSection = data; - else if (name == "debug_frame") - DebugFrameSection = data; - else if (name == "debug_str") - StringSection = data; - else if (name == "debug_ranges") { + + // Check if debug info section is compressed with zlib. + if (name.startswith("zdebug_")) { + uint64_t OriginalSize; + if (!zlib::isAvailable() || + !consumeCompressedDebugSectionHeader(data, OriginalSize)) + continue; + OwningPtr<MemoryBuffer> UncompressedSection; + if (zlib::uncompress(data, UncompressedSection, OriginalSize) != + zlib::StatusOK) + continue; + // Make data point to uncompressed section contents and save its contents. + name = name.substr(1); + data = UncompressedSection->getBuffer(); + UncompressedSections.push_back(UncompressedSection.take()); + } + + StringRef *Section = StringSwitch<StringRef*>(name) + .Case("debug_info", &InfoSection) + .Case("debug_abbrev", &AbbrevSection) + .Case("debug_line", &LineSection) + .Case("debug_aranges", &ARangeSection) + .Case("debug_frame", &DebugFrameSection) + .Case("debug_str", &StringSection) + .Case("debug_ranges", &RangeSection) + .Case("debug_pubnames", &PubNamesSection) + .Case("debug_info.dwo", &InfoDWOSection) + .Case("debug_abbrev.dwo", &AbbrevDWOSection) + .Case("debug_str.dwo", &StringDWOSection) + .Case("debug_str_offsets.dwo", &StringOffsetDWOSection) + .Case("debug_addr", &AddrSection) + // Any more debug info sections go here. + .Default(0); + if (!Section) + continue; + *Section = data; + if (name == "debug_ranges") { // FIXME: Use the other dwo range section when we emit it. RangeDWOSection = data; - RangeSection = data; } - else if (name == "debug_pubnames") - PubNamesSection = data; - else if (name == "debug_info.dwo") - InfoDWOSection = data; - else if (name == "debug_abbrev.dwo") - AbbrevDWOSection = data; - else if (name == "debug_str.dwo") - StringDWOSection = data; - else if (name == "debug_str_offsets.dwo") - StringOffsetDWOSection = data; - else if (name == "debug_addr") - AddrSection = data; - // Any more debug info sections go here. - else - continue; // TODO: Add support for relocations in other sections as needed. // Record relocations for the debug_info and debug_line sections. - RelocAddrMap *Map; - if (name == "debug_info") - Map = &InfoRelocMap; - else if (name == "debug_info.dwo") - Map = &InfoDWORelocMap; - else if (name == "debug_line") - Map = &LineRelocMap; - else + RelocAddrMap *Map = StringSwitch<RelocAddrMap*>(name) + .Case("debug_info", &InfoRelocMap) + .Case("debug_info.dwo", &InfoDWORelocMap) + .Case("debug_line", &LineRelocMap) + .Default(0); + if (!Map) continue; if (i->begin_relocations() != i->end_relocations()) { @@ -547,7 +572,7 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) : reloc_e = i->end_relocations(); reloc_i != reloc_e; reloc_i.increment(ec)) { uint64_t Address; - reloc_i->getAddress(Address); + reloc_i->getOffset(Address); uint64_t Type; reloc_i->getType(Type); uint64_t SymAddr = 0; @@ -593,4 +618,8 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) : } } +DWARFContextInMemory::~DWARFContextInMemory() { + DeleteContainerPointers(UncompressedSections); +} + void DWARFContextInMemory::anchor() { } diff --git a/lib/DebugInfo/DWARFContext.h b/lib/DebugInfo/DWARFContext.h index 37b2729..78c18e6 100644 --- a/lib/DebugInfo/DWARFContext.h +++ b/lib/DebugInfo/DWARFContext.h @@ -161,8 +161,11 @@ class DWARFContextInMemory : public DWARFContext { StringRef RangeDWOSection; StringRef AddrSection; + SmallVector<MemoryBuffer*, 4> UncompressedSections; + public: DWARFContextInMemory(object::ObjectFile *); + ~DWARFContextInMemory(); virtual bool isLittleEndian() const { return IsLittleEndian; } virtual uint8_t getAddressSize() const { return AddressSize; } virtual const RelocAddrMap &infoRelocMap() const { return InfoRelocMap; } diff --git a/lib/DebugInfo/DWARFDebugArangeSet.cpp b/lib/DebugInfo/DWARFDebugArangeSet.cpp index 2efbfd1..7dff9ff 100644 --- a/lib/DebugInfo/DWARFDebugArangeSet.cpp +++ b/lib/DebugInfo/DWARFDebugArangeSet.cpp @@ -16,7 +16,7 @@ using namespace llvm; void DWARFDebugArangeSet::clear() { Offset = -1U; - std::memset(&Header, 0, sizeof(Header)); + std::memset(&HeaderData, 0, sizeof(Header)); ArangeDescriptors.clear(); } @@ -66,15 +66,15 @@ DWARFDebugArangeSet::extract(DataExtractor data, uint32_t *offset_ptr) { // descriptor on the target system. This header is followed by a series // of tuples. Each tuple consists of an address and a length, each in // the size appropriate for an address on the target architecture. - Header.Length = data.getU32(offset_ptr); - Header.Version = data.getU16(offset_ptr); - Header.CuOffset = data.getU32(offset_ptr); - Header.AddrSize = data.getU8(offset_ptr); - Header.SegSize = data.getU8(offset_ptr); + HeaderData.Length = data.getU32(offset_ptr); + HeaderData.Version = data.getU16(offset_ptr); + HeaderData.CuOffset = data.getU32(offset_ptr); + HeaderData.AddrSize = data.getU8(offset_ptr); + HeaderData.SegSize = data.getU8(offset_ptr); // Perform basic validation of the header fields. - if (!data.isValidOffsetForDataOfSize(Offset, Header.Length) || - (Header.AddrSize != 4 && Header.AddrSize != 8)) { + if (!data.isValidOffsetForDataOfSize(Offset, HeaderData.Length) || + (HeaderData.AddrSize != 4 && HeaderData.AddrSize != 8)) { clear(); return false; } @@ -84,7 +84,7 @@ DWARFDebugArangeSet::extract(DataExtractor data, uint32_t *offset_ptr) { // size of an address). The header is padded, if necessary, to the // appropriate boundary. const uint32_t header_size = *offset_ptr - Offset; - const uint32_t tuple_size = Header.AddrSize * 2; + const uint32_t tuple_size = HeaderData.AddrSize * 2; uint32_t first_tuple_offset = 0; while (first_tuple_offset < header_size) first_tuple_offset += tuple_size; @@ -94,11 +94,11 @@ DWARFDebugArangeSet::extract(DataExtractor data, uint32_t *offset_ptr) { Descriptor arangeDescriptor; assert(sizeof(arangeDescriptor.Address) == sizeof(arangeDescriptor.Length)); - assert(sizeof(arangeDescriptor.Address) >= Header.AddrSize); + assert(sizeof(arangeDescriptor.Address) >= HeaderData.AddrSize); while (data.isValidOffset(*offset_ptr)) { - arangeDescriptor.Address = data.getUnsigned(offset_ptr, Header.AddrSize); - arangeDescriptor.Length = data.getUnsigned(offset_ptr, Header.AddrSize); + arangeDescriptor.Address = data.getUnsigned(offset_ptr, HeaderData.AddrSize); + arangeDescriptor.Length = data.getUnsigned(offset_ptr, HeaderData.AddrSize); // Each set of tuples is terminated by a 0 for the address and 0 // for the length. @@ -115,11 +115,11 @@ DWARFDebugArangeSet::extract(DataExtractor data, uint32_t *offset_ptr) { void DWARFDebugArangeSet::dump(raw_ostream &OS) const { OS << format("Address Range Header: length = 0x%8.8x, version = 0x%4.4x, ", - Header.Length, Header.Version) + HeaderData.Length, HeaderData.Version) << format("cu_offset = 0x%8.8x, addr_size = 0x%2.2x, seg_size = 0x%2.2x\n", - Header.CuOffset, Header.AddrSize, Header.SegSize); + HeaderData.CuOffset, HeaderData.AddrSize, HeaderData.SegSize); - const uint32_t hex_width = Header.AddrSize * 2; + const uint32_t hex_width = HeaderData.AddrSize * 2; for (DescriptorConstIter pos = ArangeDescriptors.begin(), end = ArangeDescriptors.end(); pos != end; ++pos) OS << format("[0x%*.*" PRIx64 " -", hex_width, hex_width, pos->Address) @@ -145,7 +145,7 @@ uint32_t DWARFDebugArangeSet::findAddress(uint64_t address) const { std::find_if(ArangeDescriptors.begin(), end, // Range DescriptorContainsAddress(address)); // Predicate if (pos != end) - return Header.CuOffset; + return HeaderData.CuOffset; return -1U; } diff --git a/lib/DebugInfo/DWARFDebugArangeSet.h b/lib/DebugInfo/DWARFDebugArangeSet.h index 9a2a6d0..d768676 100644 --- a/lib/DebugInfo/DWARFDebugArangeSet.h +++ b/lib/DebugInfo/DWARFDebugArangeSet.h @@ -48,7 +48,7 @@ private: typedef DescriptorColl::const_iterator DescriptorConstIter; uint32_t Offset; - Header Header; + Header HeaderData; DescriptorColl ArangeDescriptors; public: @@ -58,11 +58,11 @@ public: bool extract(DataExtractor data, uint32_t *offset_ptr); void dump(raw_ostream &OS) const; - uint32_t getCompileUnitDIEOffset() const { return Header.CuOffset; } - uint32_t getOffsetOfNextEntry() const { return Offset + Header.Length + 4; } + uint32_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; } + uint32_t getOffsetOfNextEntry() const { return Offset + HeaderData.Length + 4; } uint32_t findAddress(uint64_t address) const; uint32_t getNumDescriptors() const { return ArangeDescriptors.size(); } - const struct Header &getHeader() const { return Header; } + const struct Header &getHeader() const { return HeaderData; } const Descriptor *getDescriptor(uint32_t i) const { if (i < ArangeDescriptors.size()) return &ArangeDescriptors[i]; diff --git a/lib/DebugInfo/DWARFDebugAranges.cpp b/lib/DebugInfo/DWARFDebugAranges.cpp index b077eb5..f79862d 100644 --- a/lib/DebugInfo/DWARFDebugAranges.cpp +++ b/lib/DebugInfo/DWARFDebugAranges.cpp @@ -186,7 +186,7 @@ uint32_t DWARFDebugAranges::findAddress(uint64_t address) const { Range range(address); RangeCollIterator begin = Aranges.begin(); RangeCollIterator end = Aranges.end(); - RangeCollIterator pos = lower_bound(begin, end, range, RangeLessThan); + RangeCollIterator pos = std::lower_bound(begin, end, range, RangeLessThan); if (pos != end && pos->LoPC <= address && address < pos->HiPC()) { return pos->Offset; diff --git a/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/lib/DebugInfo/DWARFDebugInfoEntry.cpp index 02b15d6..10be7b4 100644 --- a/lib/DebugInfo/DWARFDebugInfoEntry.cpp +++ b/lib/DebugInfo/DWARFDebugInfoEntry.cpp @@ -11,7 +11,7 @@ #include "DWARFCompileUnit.h" #include "DWARFContext.h" #include "DWARFDebugAbbrev.h" -#include "DWARFFormValue.h" +#include "llvm/DebugInfo/DWARFFormValue.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Format.h" @@ -94,279 +94,87 @@ void DWARFDebugInfoEntryMinimal::dumpAttribute(raw_ostream &OS, OS << ")\n"; } -bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, - const uint8_t *fixed_form_sizes, - uint32_t *offset_ptr) { - Offset = *offset_ptr; - - DataExtractor debug_info_data = cu->getDebugInfoExtractor(); - uint64_t abbrCode = debug_info_data.getULEB128(offset_ptr); - - assert(fixed_form_sizes); // For best performance this should be specified! - - if (abbrCode) { - uint32_t offset = *offset_ptr; - - AbbrevDecl = cu->getAbbreviations()->getAbbreviationDeclaration(abbrCode); - - // Skip all data in the .debug_info for the attributes - const uint32_t numAttributes = AbbrevDecl->getNumAttributes(); - uint32_t i; - uint16_t form; - for (i=0; i<numAttributes; ++i) { - - form = AbbrevDecl->getFormByIndex(i); - - // FIXME: Currently we're checking if this is less than the last - // entry in the fixed_form_sizes table, but this should be changed - // to use dynamic dispatch. - const uint8_t fixed_skip_size = (form < DW_FORM_ref_sig8) ? - fixed_form_sizes[form] : 0; - if (fixed_skip_size) - offset += fixed_skip_size; - else { - bool form_is_indirect = false; - do { - form_is_indirect = false; - uint32_t form_size = 0; - switch (form) { - // Blocks if inlined data that have a length field and the data bytes - // inlined in the .debug_info. - case DW_FORM_exprloc: - case DW_FORM_block: - form_size = debug_info_data.getULEB128(&offset); - break; - case DW_FORM_block1: - form_size = debug_info_data.getU8(&offset); - break; - case DW_FORM_block2: - form_size = debug_info_data.getU16(&offset); - break; - case DW_FORM_block4: - form_size = debug_info_data.getU32(&offset); - break; - - // Inlined NULL terminated C-strings - case DW_FORM_string: - debug_info_data.getCStr(&offset); - break; - - // Compile unit address sized values - case DW_FORM_addr: - case DW_FORM_ref_addr: - form_size = cu->getAddressByteSize(); - break; - - // 0 sized form. - case DW_FORM_flag_present: - form_size = 0; - break; - - // 1 byte values - case DW_FORM_data1: - case DW_FORM_flag: - case DW_FORM_ref1: - form_size = 1; - break; - - // 2 byte values - case DW_FORM_data2: - case DW_FORM_ref2: - form_size = 2; - break; - - // 4 byte values - case DW_FORM_strp: - case DW_FORM_data4: - case DW_FORM_ref4: - form_size = 4; - break; - - // 8 byte values - case DW_FORM_data8: - case DW_FORM_ref8: - case DW_FORM_ref_sig8: - form_size = 8; - break; - - // signed or unsigned LEB 128 values - case DW_FORM_sdata: - case DW_FORM_udata: - case DW_FORM_ref_udata: - case DW_FORM_GNU_str_index: - case DW_FORM_GNU_addr_index: - debug_info_data.getULEB128(&offset); - break; - - case DW_FORM_indirect: - form_is_indirect = true; - form = debug_info_data.getULEB128(&offset); - break; - - // FIXME: 64-bit for DWARF64 - case DW_FORM_sec_offset: - debug_info_data.getU32(offset_ptr); - break; - - default: - *offset_ptr = Offset; - return false; - } - offset += form_size; - } while (form_is_indirect); - } - } - *offset_ptr = offset; - return true; - } else { +bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *CU, + const uint8_t *FixedFormSizes, + uint32_t *OffsetPtr) { + Offset = *OffsetPtr; + DataExtractor DebugInfoData = CU->getDebugInfoExtractor(); + uint64_t AbbrCode = DebugInfoData.getULEB128(OffsetPtr); + if (0 == AbbrCode) { + // NULL debug tag entry. AbbrevDecl = NULL; - return true; // NULL debug tag entry + return true; + } + AbbrevDecl = CU->getAbbreviations()->getAbbreviationDeclaration(AbbrCode); + assert(AbbrevDecl); + assert(FixedFormSizes); // For best performance this should be specified! + + // Skip all data in the .debug_info for the attributes + for (uint32_t i = 0, n = AbbrevDecl->getNumAttributes(); i < n; ++i) { + uint16_t Form = AbbrevDecl->getFormByIndex(i); + + // FIXME: Currently we're checking if this is less than the last + // entry in the fixed_form_sizes table, but this should be changed + // to use dynamic dispatch. + uint8_t FixedFormSize = + (Form < DW_FORM_ref_sig8) ? FixedFormSizes[Form] : 0; + if (FixedFormSize) + *OffsetPtr += FixedFormSize; + else if (!DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, + CU)) { + // Restore the original offset. + *OffsetPtr = Offset; + return false; + } } + return true; } bool -DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, - uint32_t *offset_ptr) { - DataExtractor debug_info_data = cu->getDebugInfoExtractor(); - const uint32_t cu_end_offset = cu->getNextCompileUnitOffset(); - const uint8_t cu_addr_size = cu->getAddressByteSize(); - uint32_t offset = *offset_ptr; - if ((offset < cu_end_offset) && debug_info_data.isValidOffset(offset)) { - Offset = offset; - - uint64_t abbrCode = debug_info_data.getULEB128(&offset); - - if (abbrCode) { - AbbrevDecl = cu->getAbbreviations()->getAbbreviationDeclaration(abbrCode); - - if (AbbrevDecl) { - uint16_t tag = AbbrevDecl->getTag(); - - bool isCompileUnitTag = tag == DW_TAG_compile_unit; - if(cu && isCompileUnitTag) - const_cast<DWARFCompileUnit*>(cu)->setBaseAddress(0); - - // Skip all data in the .debug_info for the attributes - const uint32_t numAttributes = AbbrevDecl->getNumAttributes(); - for (uint32_t i = 0; i != numAttributes; ++i) { - uint16_t attr = AbbrevDecl->getAttrByIndex(i); - uint16_t form = AbbrevDecl->getFormByIndex(i); - - if (isCompileUnitTag && - ((attr == DW_AT_entry_pc) || (attr == DW_AT_low_pc))) { - DWARFFormValue form_value(form); - if (form_value.extractValue(debug_info_data, &offset, cu)) { - if (attr == DW_AT_low_pc || attr == DW_AT_entry_pc) - const_cast<DWARFCompileUnit*>(cu) - ->setBaseAddress(form_value.getUnsigned()); - } - } else { - bool form_is_indirect = false; - do { - form_is_indirect = false; - register uint32_t form_size = 0; - switch (form) { - // Blocks if inlined data that have a length field and the data - // bytes // inlined in the .debug_info - case DW_FORM_exprloc: - case DW_FORM_block: - form_size = debug_info_data.getULEB128(&offset); - break; - case DW_FORM_block1: - form_size = debug_info_data.getU8(&offset); - break; - case DW_FORM_block2: - form_size = debug_info_data.getU16(&offset); - break; - case DW_FORM_block4: - form_size = debug_info_data.getU32(&offset); - break; - - // Inlined NULL terminated C-strings - case DW_FORM_string: - debug_info_data.getCStr(&offset); - break; - - // Compile unit address sized values - case DW_FORM_addr: - case DW_FORM_ref_addr: - form_size = cu_addr_size; - break; - - // 0 byte value - case DW_FORM_flag_present: - form_size = 0; - break; - - // 1 byte values - case DW_FORM_data1: - case DW_FORM_flag: - case DW_FORM_ref1: - form_size = 1; - break; - - // 2 byte values - case DW_FORM_data2: - case DW_FORM_ref2: - form_size = 2; - break; - - // 4 byte values - case DW_FORM_strp: - form_size = 4; - break; - - case DW_FORM_data4: - case DW_FORM_ref4: - form_size = 4; - break; - - // 8 byte values - case DW_FORM_data8: - case DW_FORM_ref8: - case DW_FORM_ref_sig8: - form_size = 8; - break; - - // signed or unsigned LEB 128 values - case DW_FORM_sdata: - case DW_FORM_udata: - case DW_FORM_ref_udata: - case DW_FORM_GNU_str_index: - case DW_FORM_GNU_addr_index: - debug_info_data.getULEB128(&offset); - break; - - case DW_FORM_indirect: - form = debug_info_data.getULEB128(&offset); - form_is_indirect = true; - break; - - // FIXME: 64-bit for DWARF64. - case DW_FORM_sec_offset: - debug_info_data.getU32(offset_ptr); - break; - - default: - *offset_ptr = offset; - return false; - } - - offset += form_size; - } while (form_is_indirect); - } - } - *offset_ptr = offset; - return true; +DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *CU, + uint32_t *OffsetPtr) { + DataExtractor DebugInfoData = CU->getDebugInfoExtractor(); + const uint32_t CUEndOffset = CU->getNextCompileUnitOffset(); + Offset = *OffsetPtr; + if ((Offset >= CUEndOffset) || !DebugInfoData.isValidOffset(Offset)) + return false; + uint64_t AbbrCode = DebugInfoData.getULEB128(OffsetPtr); + if (0 == AbbrCode) { + // NULL debug tag entry. + AbbrevDecl = NULL; + return true; + } + AbbrevDecl = CU->getAbbreviations()->getAbbreviationDeclaration(AbbrCode); + if (0 == AbbrevDecl) { + // Restore the original offset. + *OffsetPtr = Offset; + return false; + } + bool IsCompileUnitTag = (AbbrevDecl->getTag() == DW_TAG_compile_unit); + if (IsCompileUnitTag) + const_cast<DWARFCompileUnit*>(CU)->setBaseAddress(0); + + // Skip all data in the .debug_info for the attributes + for (uint32_t i = 0, n = AbbrevDecl->getNumAttributes(); i < n; ++i) { + uint16_t Attr = AbbrevDecl->getAttrByIndex(i); + uint16_t Form = AbbrevDecl->getFormByIndex(i); + + if (IsCompileUnitTag && + ((Attr == DW_AT_entry_pc) || (Attr == DW_AT_low_pc))) { + DWARFFormValue FormValue(Form); + if (FormValue.extractValue(DebugInfoData, OffsetPtr, CU)) { + if (Attr == DW_AT_low_pc || Attr == DW_AT_entry_pc) + const_cast<DWARFCompileUnit*>(CU) + ->setBaseAddress(FormValue.getUnsigned()); } - } else { - AbbrevDecl = NULL; - *offset_ptr = offset; - return true; // NULL debug tag entry + } else if (!DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, + CU)) { + // Restore the original offset. + *OffsetPtr = Offset; + return false; } } - - return false; + return true; } bool DWARFDebugInfoEntryMinimal::isSubprogramDIE() const { diff --git a/lib/DebugInfo/DWARFDebugInfoEntry.h b/lib/DebugInfo/DWARFDebugInfoEntry.h index 9c1b2be..9003591 100644 --- a/lib/DebugInfo/DWARFDebugInfoEntry.h +++ b/lib/DebugInfo/DWARFDebugInfoEntry.h @@ -45,12 +45,17 @@ public: uint32_t *offset_ptr, uint16_t attr, uint16_t form, unsigned indent = 0) const; - bool extractFast(const DWARFCompileUnit *cu, const uint8_t *fixed_form_sizes, - uint32_t *offset_ptr); + /// Extracts a debug info entry, which is a child of a given compile unit, + /// starting at a given offset. If DIE can't be extracted, returns false and + /// doesn't change OffsetPtr. + bool extractFast(const DWARFCompileUnit *CU, const uint8_t *FixedFormSizes, + uint32_t *OffsetPtr); /// Extract a debug info entry for a given compile unit from the /// .debug_info and .debug_abbrev data starting at the given offset. - bool extract(const DWARFCompileUnit *cu, uint32_t *offset_ptr); + /// If compile unit can't be parsed, returns false and doesn't change + /// OffsetPtr. + bool extract(const DWARFCompileUnit *CU, uint32_t *OffsetPtr); uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; } bool isNULL() const { return AbbrevDecl == 0; } diff --git a/lib/DebugInfo/DWARFFormValue.cpp b/lib/DebugInfo/DWARFFormValue.cpp index 9f807aa..c5583f9 100644 --- a/lib/DebugInfo/DWARFFormValue.cpp +++ b/lib/DebugInfo/DWARFFormValue.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// -#include "DWARFFormValue.h" +#include "llvm/DebugInfo/DWARFFormValue.h" #include "DWARFCompileUnit.h" #include "DWARFContext.h" #include "llvm/Support/Debug.h" @@ -18,39 +18,16 @@ using namespace llvm; using namespace dwarf; -static const uint8_t form_sizes_addr4[] = { - 0, // 0x00 unused - 4, // 0x01 DW_FORM_addr - 0, // 0x02 unused - 0, // 0x03 DW_FORM_block2 - 0, // 0x04 DW_FORM_block4 - 2, // 0x05 DW_FORM_data2 - 4, // 0x06 DW_FORM_data4 - 8, // 0x07 DW_FORM_data8 - 0, // 0x08 DW_FORM_string - 0, // 0x09 DW_FORM_block - 0, // 0x0a DW_FORM_block1 - 1, // 0x0b DW_FORM_data1 - 1, // 0x0c DW_FORM_flag - 0, // 0x0d DW_FORM_sdata - 4, // 0x0e DW_FORM_strp - 0, // 0x0f DW_FORM_udata - 4, // 0x10 DW_FORM_ref_addr - 1, // 0x11 DW_FORM_ref1 - 2, // 0x12 DW_FORM_ref2 - 4, // 0x13 DW_FORM_ref4 - 8, // 0x14 DW_FORM_ref8 - 0, // 0x15 DW_FORM_ref_udata - 0, // 0x16 DW_FORM_indirect - 4, // 0x17 DW_FORM_sec_offset - 0, // 0x18 DW_FORM_exprloc - 0, // 0x19 DW_FORM_flag_present - 8, // 0x20 DW_FORM_ref_sig8 +namespace { +template <uint8_t AddrSize, uint8_t RefAddrSize> struct FixedFormSizes { + static const uint8_t sizes[]; }; +} -static const uint8_t form_sizes_addr8[] = { +template <uint8_t AddrSize, uint8_t RefAddrSize> +const uint8_t FixedFormSizes<AddrSize, RefAddrSize>::sizes[] = { 0, // 0x00 unused - 8, // 0x01 DW_FORM_addr + AddrSize, // 0x01 DW_FORM_addr 0, // 0x02 unused 0, // 0x03 DW_FORM_block2 0, // 0x04 DW_FORM_block4 @@ -65,7 +42,7 @@ static const uint8_t form_sizes_addr8[] = { 0, // 0x0d DW_FORM_sdata 4, // 0x0e DW_FORM_strp 0, // 0x0f DW_FORM_udata - 8, // 0x10 DW_FORM_ref_addr + RefAddrSize, // 0x10 DW_FORM_ref_addr 1, // 0x11 DW_FORM_ref1 2, // 0x12 DW_FORM_ref2 4, // 0x13 DW_FORM_ref4 @@ -78,13 +55,23 @@ static const uint8_t form_sizes_addr8[] = { 8, // 0x20 DW_FORM_ref_sig8 }; +static uint8_t getRefAddrSize(uint8_t AddrSize, uint16_t Version) { + // FIXME: Support DWARF64. + return (Version == 2) ? AddrSize : 4; +} + const uint8_t * -DWARFFormValue::getFixedFormSizesForAddressSize(uint8_t addr_size) { - switch (addr_size) { - case 4: return form_sizes_addr4; - case 8: return form_sizes_addr8; - } - return NULL; +DWARFFormValue::getFixedFormSizes(uint8_t AddrSize, uint16_t Version) { + uint8_t RefAddrSize = getRefAddrSize(AddrSize, Version); + if (AddrSize == 4 && RefAddrSize == 4) + return FixedFormSizes<4, 4>::sizes; + if (AddrSize == 4 && RefAddrSize == 8) + return FixedFormSizes<4, 8>::sizes; + if (AddrSize == 8 && RefAddrSize == 4) + return FixedFormSizes<8, 4>::sizes; + if (AddrSize == 8 && RefAddrSize == 8) + return FixedFormSizes<8, 8>::sizes; + return 0; } bool @@ -100,14 +87,16 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr, switch (Form) { case DW_FORM_addr: case DW_FORM_ref_addr: { - RelocAddrMap::const_iterator AI - = cu->getRelocMap()->find(*offset_ptr); + uint16_t AddrSize = + (Form == DW_FORM_addr) + ? cu->getAddressByteSize() + : getRefAddrSize(cu->getAddressByteSize(), cu->getVersion()); + RelocAddrMap::const_iterator AI = cu->getRelocMap()->find(*offset_ptr); if (AI != cu->getRelocMap()->end()) { const std::pair<uint8_t, int64_t> &R = AI->second; - Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize()) + - R.second; + Value.uval = data.getUnsigned(offset_ptr, AddrSize) + R.second; } else - Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize()); + Value.uval = data.getUnsigned(offset_ptr, AddrSize); break; } case DW_FORM_exprloc: @@ -172,10 +161,17 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr, Form = data.getULEB128(offset_ptr); indirect = true; break; - case DW_FORM_sec_offset: + case DW_FORM_sec_offset: { // FIXME: This is 64-bit for DWARF64. - Value.uval = data.getU32(offset_ptr); + RelocAddrMap::const_iterator AI + = cu->getRelocMap()->find(*offset_ptr); + if (AI != cu->getRelocMap()->end()) { + const std::pair<uint8_t, int64_t> &R = AI->second; + Value.uval = data.getU32(offset_ptr) + R.second; + } else + Value.uval = data.getU32(offset_ptr); break; + } case DW_FORM_flag_present: Value.uval = 1; break; @@ -216,7 +212,6 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, uint32_t *offset_ptr, const DWARFCompileUnit *cu) { bool indirect = false; do { - indirect = false; switch (form) { // Blocks if inlined data that have a length field and the data bytes // inlined in the .debug_info @@ -249,9 +244,11 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, // Compile unit address sized values case DW_FORM_addr: - case DW_FORM_ref_addr: *offset_ptr += cu->getAddressByteSize(); return true; + case DW_FORM_ref_addr: + *offset_ptr += getRefAddrSize(cu->getAddressByteSize(), cu->getVersion()); + return true; // 0 byte values - implied from the form. case DW_FORM_flag_present: diff --git a/lib/DebugInfo/DWARFFormValue.h b/lib/DebugInfo/DWARFFormValue.h deleted file mode 100644 index b863001..0000000 --- a/lib/DebugInfo/DWARFFormValue.h +++ /dev/null @@ -1,82 +0,0 @@ -//===-- DWARFFormValue.h ----------------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_DEBUGINFO_DWARFFORMVALUE_H -#define LLVM_DEBUGINFO_DWARFFORMVALUE_H - -#include "llvm/Support/DataExtractor.h" - -namespace llvm { - -class DWARFCompileUnit; -class raw_ostream; - -class DWARFFormValue { -public: - struct ValueType { - ValueType() : data(NULL) { - uval = 0; - } - - union { - uint64_t uval; - int64_t sval; - const char* cstr; - }; - const uint8_t* data; - }; - - enum { - eValueTypeInvalid = 0, - eValueTypeUnsigned, - eValueTypeSigned, - eValueTypeCStr, - eValueTypeBlock - }; - -private: - uint16_t Form; // Form for this value. - ValueType Value; // Contains all data for the form. - -public: - DWARFFormValue(uint16_t form = 0) : Form(form) {} - uint16_t getForm() const { return Form; } - const ValueType& value() const { return Value; } - void dump(raw_ostream &OS, const DWARFCompileUnit* cu) const; - bool extractValue(DataExtractor data, uint32_t *offset_ptr, - const DWARFCompileUnit *cu); - bool isInlinedCStr() const { - return Value.data != NULL && Value.data == (const uint8_t*)Value.cstr; - } - const uint8_t *BlockData() const; - uint64_t getReference(const DWARFCompileUnit* cu) const; - - /// Resolve any compile unit specific references so that we don't need - /// the compile unit at a later time in order to work with the form - /// value. - bool resolveCompileUnitReferences(const DWARFCompileUnit* cu); - uint64_t getUnsigned() const { return Value.uval; } - int64_t getSigned() const { return Value.sval; } - const char *getAsCString(const DataExtractor *debug_str_data_ptr) const; - const char *getIndirectCString(const DataExtractor *, - const DataExtractor *) const; - uint64_t getIndirectAddress(const DataExtractor *, - const DWARFCompileUnit *) const; - bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr, - const DWARFCompileUnit *cu) const; - static bool skipValue(uint16_t form, DataExtractor debug_info_data, - uint32_t *offset_ptr, const DWARFCompileUnit *cu); - static bool isBlockForm(uint16_t form); - static bool isDataForm(uint16_t form); - static const uint8_t *getFixedFormSizesForAddressSize(uint8_t addr_size); -}; - -} - -#endif diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index 3d59d25..e43ba4f 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -535,6 +535,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { if (isa<UndefValue>(C)) { GenericValue Result; switch (C->getType()->getTypeID()) { + default: + break; case Type::IntegerTyID: case Type::X86_FP80TyID: case Type::FP128TyID: @@ -543,7 +545,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { // with the correct bit width. Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0); break; - default: + case Type::VectorTyID: + // if the whole vector is 'undef' just reserve memory for the value. + const VectorType* VTy = dyn_cast<VectorType>(C->getType()); + const Type *ElemTy = VTy->getElementType(); + unsigned int elemNum = VTy->getNumElements(); + Result.AggregateVal.resize(elemNum); + if (ElemTy->isIntegerTy()) + for (unsigned int i = 0; i < elemNum; ++i) + Result.AggregateVal[i].IntVal = + APInt(ElemTy->getPrimitiveSizeInBits(), 0); break; } return Result; @@ -825,6 +836,101 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { else llvm_unreachable("Unknown constant pointer type!"); break; + case Type::VectorTyID: { + unsigned elemNum; + Type* ElemTy; + const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); + const ConstantVector *CV = dyn_cast<ConstantVector>(C); + const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(C); + + if (CDV) { + elemNum = CDV->getNumElements(); + ElemTy = CDV->getElementType(); + } else if (CV || CAZ) { + VectorType* VTy = dyn_cast<VectorType>(C->getType()); + elemNum = VTy->getNumElements(); + ElemTy = VTy->getElementType(); + } else { + llvm_unreachable("Unknown constant vector type!"); + } + + Result.AggregateVal.resize(elemNum); + // Check if vector holds floats. + if(ElemTy->isFloatTy()) { + if (CAZ) { + GenericValue floatZero; + floatZero.FloatVal = 0.f; + std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(), + floatZero); + break; + } + if(CV) { + for (unsigned i = 0; i < elemNum; ++i) + if (!isa<UndefValue>(CV->getOperand(i))) + Result.AggregateVal[i].FloatVal = cast<ConstantFP>( + CV->getOperand(i))->getValueAPF().convertToFloat(); + break; + } + if(CDV) + for (unsigned i = 0; i < elemNum; ++i) + Result.AggregateVal[i].FloatVal = CDV->getElementAsFloat(i); + + break; + } + // Check if vector holds doubles. + if (ElemTy->isDoubleTy()) { + if (CAZ) { + GenericValue doubleZero; + doubleZero.DoubleVal = 0.0; + std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(), + doubleZero); + break; + } + if(CV) { + for (unsigned i = 0; i < elemNum; ++i) + if (!isa<UndefValue>(CV->getOperand(i))) + Result.AggregateVal[i].DoubleVal = cast<ConstantFP>( + CV->getOperand(i))->getValueAPF().convertToDouble(); + break; + } + if(CDV) + for (unsigned i = 0; i < elemNum; ++i) + Result.AggregateVal[i].DoubleVal = CDV->getElementAsDouble(i); + + break; + } + // Check if vector holds integers. + if (ElemTy->isIntegerTy()) { + if (CAZ) { + GenericValue intZero; + intZero.IntVal = APInt(ElemTy->getScalarSizeInBits(), 0ull); + std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(), + intZero); + break; + } + if(CV) { + for (unsigned i = 0; i < elemNum; ++i) + if (!isa<UndefValue>(CV->getOperand(i))) + Result.AggregateVal[i].IntVal = cast<ConstantInt>( + CV->getOperand(i))->getValue(); + else { + Result.AggregateVal[i].IntVal = + APInt(CV->getOperand(i)->getType()->getPrimitiveSizeInBits(), 0); + } + break; + } + if(CDV) + for (unsigned i = 0; i < elemNum; ++i) + Result.AggregateVal[i].IntVal = APInt( + CDV->getElementType()->getPrimitiveSizeInBits(), + CDV->getElementAsInteger(i)); + + break; + } + llvm_unreachable("Unknown constant pointer type!"); + } + break; + default: SmallString<256> Msg; raw_svector_ostream OS(Msg); @@ -842,7 +948,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!"); const uint8_t *Src = (const uint8_t *)IntVal.getRawData(); - if (sys::isLittleEndianHost()) { + if (sys::IsLittleEndianHost) { // Little-endian host - the source is ordered from LSB to MSB. Order the // destination from LSB to MSB: Do a straight copy. memcpy(Dst, Src, StoreBytes); @@ -866,6 +972,9 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty); switch (Ty->getTypeID()) { + default: + dbgs() << "Cannot store value of type " << *Ty << "!\n"; + break; case Type::IntegerTyID: StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes); break; @@ -885,11 +994,22 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, *((PointerTy*)Ptr) = Val.PointerVal; break; - default: - dbgs() << "Cannot store value of type " << *Ty << "!\n"; + case Type::VectorTyID: + for (unsigned i = 0; i < Val.AggregateVal.size(); ++i) { + if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) + *(((double*)Ptr)+i) = Val.AggregateVal[i].DoubleVal; + if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) + *(((float*)Ptr)+i) = Val.AggregateVal[i].FloatVal; + if (cast<VectorType>(Ty)->getElementType()->isIntegerTy()) { + unsigned numOfBytes =(Val.AggregateVal[i].IntVal.getBitWidth()+7)/8; + StoreIntToMemory(Val.AggregateVal[i].IntVal, + (uint8_t*)Ptr + numOfBytes*i, numOfBytes); + } + } + break; } - if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian()) + if (sys::IsLittleEndianHost != getDataLayout()->isLittleEndian()) // Host and target are different endian - reverse the stored bytes. std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr); } @@ -901,7 +1021,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) { uint8_t *Dst = reinterpret_cast<uint8_t *>( const_cast<uint64_t *>(IntVal.getRawData())); - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) // Little-endian host - the destination must be ordered from LSB to MSB. // The source is ordered from LSB to MSB: Do a straight copy. memcpy(Dst, Src, LoadBytes); @@ -951,6 +1071,31 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result, Result.IntVal = APInt(80, y); break; } + case Type::VectorTyID: { + const VectorType *VT = cast<VectorType>(Ty); + const Type *ElemT = VT->getElementType(); + const unsigned numElems = VT->getNumElements(); + if (ElemT->isFloatTy()) { + Result.AggregateVal.resize(numElems); + for (unsigned i = 0; i < numElems; ++i) + Result.AggregateVal[i].FloatVal = *((float*)Ptr+i); + } + if (ElemT->isDoubleTy()) { + Result.AggregateVal.resize(numElems); + for (unsigned i = 0; i < numElems; ++i) + Result.AggregateVal[i].DoubleVal = *((double*)Ptr+i); + } + if (ElemT->isIntegerTy()) { + GenericValue intZero; + const unsigned elemBitWidth = cast<IntegerType>(ElemT)->getBitWidth(); + intZero.IntVal = APInt(elemBitWidth, 0); + Result.AggregateVal.resize(numElems, intZero); + for (unsigned i = 0; i < numElems; ++i) + LoadIntFromMemory(Result.AggregateVal[i].IntVal, + (uint8_t*)Ptr+((elemBitWidth+7)/8)*i, (elemBitWidth+7)/8); + } + break; + } default: SmallString<256> Msg; raw_svector_ostream OS(Msg); diff --git a/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/lib/ExecutionEngine/ExecutionEngineBindings.cpp index f4e8246..f9b08a0 100644 --- a/lib/ExecutionEngine/ExecutionEngineBindings.cpp +++ b/lib/ExecutionEngine/ExecutionEngineBindings.cpp @@ -15,11 +15,33 @@ #include "llvm-c/ExecutionEngine.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Module.h" #include "llvm/Support/ErrorHandling.h" #include <cstring> using namespace llvm; +// Wrapping the C bindings types. +DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef) + +inline DataLayout *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast<DataLayout*>(P); +} + +inline LLVMTargetDataRef wrap(const DataLayout *P) { + return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P)); +} + +inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { + return reinterpret_cast<TargetLibraryInfo*>(P); +} + +inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) { + TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P); + return reinterpret_cast<LLVMTargetLibraryInfoRef>(X); +} + /*===-- Operations on generic values --------------------------------------===*/ LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty, @@ -132,6 +154,59 @@ LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT, return 1; } +void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions, + size_t SizeOfPassedOptions) { + LLVMMCJITCompilerOptions options; + options.OptLevel = 0; + options.CodeModel = LLVMCodeModelJITDefault; + options.NoFramePointerElim = false; + options.EnableFastISel = false; + + memcpy(PassedOptions, &options, + std::min(sizeof(options), SizeOfPassedOptions)); +} + +LLVMBool LLVMCreateMCJITCompilerForModule( + LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M, + LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions, + char **OutError) { + LLVMMCJITCompilerOptions options; + // If the user passed a larger sized options struct, then they were compiled + // against a newer LLVM. Tell them that something is wrong. + if (SizeOfPassedOptions > sizeof(options)) { + *OutError = strdup( + "Refusing to use options struct that is larger than my own; assuming " + "LLVM library mismatch."); + return 1; + } + + // Defend against the user having an old version of the API by ensuring that + // any fields they didn't see are cleared. We must defend against fields being + // set to the bitwise equivalent of zero, and assume that this means "do the + // default" as if that option hadn't been available. + LLVMInitializeMCJITCompilerOptions(&options, sizeof(options)); + memcpy(&options, PassedOptions, SizeOfPassedOptions); + + TargetOptions targetOptions; + targetOptions.NoFramePointerElim = options.NoFramePointerElim; + targetOptions.EnableFastISel = options.EnableFastISel; + + std::string Error; + EngineBuilder builder(unwrap(M)); + builder.setEngineKind(EngineKind::JIT) + .setErrorStr(&Error) + .setUseMCJIT(true) + .setOptLevel((CodeGenOpt::Level)options.OptLevel) + .setCodeModel(unwrap(options.CodeModel)) + .setTargetOptions(targetOptions); + if (ExecutionEngine *JIT = builder.create()) { + *OutJIT = wrap(JIT); + return 0; + } + *OutError = strdup(Error.c_str()); + return 1; +} + LLVMBool LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE, LLVMModuleProviderRef MP, char **OutError) { @@ -176,6 +251,8 @@ void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) { int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, unsigned ArgC, const char * const *ArgV, const char * const *EnvP) { + unwrap(EE)->finalizeObject(); + std::vector<std::string> ArgVec; for (unsigned I = 0; I != ArgC; ++I) ArgVec.push_back(ArgV[I]); @@ -186,6 +263,8 @@ int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F, unsigned NumArgs, LLVMGenericValueRef *Args) { + unwrap(EE)->finalizeObject(); + std::vector<GenericValue> ArgVec; ArgVec.reserve(NumArgs); for (unsigned I = 0; I != NumArgs; ++I) @@ -234,7 +313,8 @@ LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name, return 1; } -void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn) { +void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, + LLVMValueRef Fn) { return unwrap(EE)->recompileAndRelinkFunction(unwrap<Function>(Fn)); } @@ -248,5 +328,7 @@ void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, } void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) { + unwrap(EE)->finalizeObject(); + return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global)); } diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index ec4f7f6..b95a9e8 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -114,6 +114,15 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1, Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ break; +#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ + case Type::VectorTyID: { \ + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ + Dest.AggregateVal[_i].IntVal = APInt(1, \ + Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\ + } break; + // Handle pointers specially because they must be compared with only as much // width as the host has. We _do not_ want to be comparing 64 bit values when // running on a 32-bit target, otherwise the upper 32 bits might mess up @@ -129,6 +138,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(eq,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty); IMPLEMENT_POINTER_ICMP(==); default: dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n"; @@ -142,6 +152,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ne,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty); IMPLEMENT_POINTER_ICMP(!=); default: dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n"; @@ -155,6 +166,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ult,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty); IMPLEMENT_POINTER_ICMP(<); default: dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n"; @@ -168,6 +180,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(slt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty); IMPLEMENT_POINTER_ICMP(<); default: dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n"; @@ -181,6 +194,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ugt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty); IMPLEMENT_POINTER_ICMP(>); default: dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n"; @@ -194,6 +208,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sgt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty); IMPLEMENT_POINTER_ICMP(>); default: dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n"; @@ -207,6 +222,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ule,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty); IMPLEMENT_POINTER_ICMP(<=); default: dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n"; @@ -220,6 +236,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sle,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty); IMPLEMENT_POINTER_ICMP(<=); default: dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n"; @@ -233,6 +250,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(uge,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty); IMPLEMENT_POINTER_ICMP(>=); default: dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n"; @@ -246,6 +264,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sge,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty); IMPLEMENT_POINTER_ICMP(>=); default: dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n"; @@ -285,12 +304,29 @@ void Interpreter::visitICmpInst(ICmpInst &I) { Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \ break +#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \ + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ + Dest.AggregateVal[_i].IntVal = APInt(1, \ + Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\ + break; + +#define IMPLEMENT_VECTOR_FCMP(OP) \ + case Type::VectorTyID: \ + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ + } else { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ + } + static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_FCMP(==, Float); IMPLEMENT_FCMP(==, Double); + IMPLEMENT_VECTOR_FCMP(==); default: dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -298,17 +334,65 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, return Dest; } +#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \ + if (TY->isFloatTy()) { \ + if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ + Dest.IntVal = APInt(1,false); \ + return Dest; \ + } \ + } else { \ + if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ + Dest.IntVal = APInt(1,false); \ + return Dest; \ + } \ + } + +#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \ + assert(X.AggregateVal.size() == Y.AggregateVal.size()); \ + Dest.AggregateVal.resize( X.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \ + if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \ + Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \ + Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \ + else { \ + Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \ + } \ + } + +#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \ + if (TY->isVectorTy()) { \ + if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \ + MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \ + } else { \ + MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \ + } \ + } \ + + + static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, - Type *Ty) { + Type *Ty) +{ GenericValue Dest; + // if input is scalar value and Src1 or Src2 is NaN return false + IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2) + // if vector input detect NaNs and fill mask + MASK_VECTOR_NANS(Ty, Src1, Src2, false) + GenericValue DestMask = Dest; switch (Ty->getTypeID()) { IMPLEMENT_FCMP(!=, Float); IMPLEMENT_FCMP(!=, Double); - - default: - dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; - llvm_unreachable(0); + IMPLEMENT_VECTOR_FCMP(!=); + default: + dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; + llvm_unreachable(0); } + // in vector case mask out NaN elements + if (Ty->isVectorTy()) + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) + if (DestMask.AggregateVal[_i].IntVal == false) + Dest.AggregateVal[_i].IntVal = APInt(1,false); + return Dest; } @@ -318,6 +402,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(<=, Float); IMPLEMENT_FCMP(<=, Double); + IMPLEMENT_VECTOR_FCMP(<=); default: dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -331,6 +416,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(>=, Float); IMPLEMENT_FCMP(>=, Double); + IMPLEMENT_VECTOR_FCMP(>=); default: dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -344,6 +430,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(<, Float); IMPLEMENT_FCMP(<, Double); + IMPLEMENT_VECTOR_FCMP(<); default: dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -357,6 +444,7 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(>, Float); IMPLEMENT_FCMP(>, Double); + IMPLEMENT_VECTOR_FCMP(>); default: dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -375,18 +463,32 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, return Dest; \ } +#define IMPLEMENT_VECTOR_UNORDERED(TY, X,Y, _FUNC) \ + if (TY->isVectorTy()) { \ + GenericValue DestMask = Dest; \ + Dest = _FUNC(Src1, Src2, Ty); \ + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) \ + if (DestMask.AggregateVal[_i].IntVal == true) \ + Dest.AggregateVal[_i].IntVal = APInt(1,true); \ + return Dest; \ + } static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ) return executeFCMP_OEQ(Src1, Src2, Ty); + } static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE) return executeFCMP_ONE(Src1, Src2, Ty); } @@ -394,6 +496,8 @@ static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE) return executeFCMP_OLE(Src1, Src2, Ty); } @@ -401,6 +505,8 @@ static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE) return executeFCMP_OGE(Src1, Src2, Ty); } @@ -408,6 +514,8 @@ static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT) return executeFCMP_OLT(Src1, Src2, Ty); } @@ -415,33 +523,88 @@ static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT) return executeFCMP_OGT(Src1, Src2, Ty); } static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; - if (Ty->isFloatTy()) + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].FloatVal == + Src1.AggregateVal[_i].FloatVal) && + (Src2.AggregateVal[_i].FloatVal == + Src2.AggregateVal[_i].FloatVal))); + } else { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].DoubleVal == + Src1.AggregateVal[_i].DoubleVal) && + (Src2.AggregateVal[_i].DoubleVal == + Src2.AggregateVal[_i].DoubleVal))); + } + } else if (Ty->isFloatTy()) Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal && Src2.FloatVal == Src2.FloatVal)); - else + else { Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal && Src2.DoubleVal == Src2.DoubleVal)); + } return Dest; } static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; - if (Ty->isFloatTy()) + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].FloatVal != + Src1.AggregateVal[_i].FloatVal) || + (Src2.AggregateVal[_i].FloatVal != + Src2.AggregateVal[_i].FloatVal))); + } else { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].DoubleVal != + Src1.AggregateVal[_i].DoubleVal) || + (Src2.AggregateVal[_i].DoubleVal != + Src2.AggregateVal[_i].DoubleVal))); + } + } else if (Ty->isFloatTy()) Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal || Src2.FloatVal != Src2.FloatVal)); - else + else { Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal || Src2.DoubleVal != Src2.DoubleVal)); + } return Dest; } +static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2, + const Type *Ty, const bool val) { + GenericValue Dest; + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) + Dest.AggregateVal[_i].IntVal = APInt(1,val); + } else { + Dest.IntVal = APInt(1, val); + } + + return Dest; +} + void Interpreter::visitFCmpInst(FCmpInst &I) { ExecutionContext &SF = ECStack.back(); Type *Ty = I.getOperand(0)->getType(); @@ -450,8 +613,14 @@ void Interpreter::visitFCmpInst(FCmpInst &I) { GenericValue R; // Result switch (I.getPredicate()) { - case FCmpInst::FCMP_FALSE: R.IntVal = APInt(1,false); break; - case FCmpInst::FCMP_TRUE: R.IntVal = APInt(1,true); break; + default: + dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; + llvm_unreachable(0); + break; + case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false); + break; + case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true); + break; case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break; case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break; case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break; @@ -466,9 +635,6 @@ void Interpreter::visitFCmpInst(FCmpInst &I) { case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break; case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break; case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break; - default: - dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; - llvm_unreachable(0); } SetValue(&I, R, SF); @@ -502,16 +668,8 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1, case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty); case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty); case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty); - case FCmpInst::FCMP_FALSE: { - GenericValue Result; - Result.IntVal = APInt(1, false); - return Result; - } - case FCmpInst::FCMP_TRUE: { - GenericValue Result; - Result.IntVal = APInt(1, true); - return Result; - } + case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false); + case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true); default: dbgs() << "Unhandled Cmp predicate\n"; llvm_unreachable(0); @@ -525,27 +683,105 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) { GenericValue Src2 = getOperandValue(I.getOperand(1), SF); GenericValue R; // Result - switch (I.getOpcode()) { - case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; - case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; - case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; - case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break; - case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break; - case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break; - case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break; - case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break; - case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; - case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; - case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; - case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; - case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; - case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; - case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; - default: - dbgs() << "Don't know how to handle this binary operator!\n-->" << I; - llvm_unreachable(0); + // First process vector operation + if (Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + R.AggregateVal.resize(Src1.AggregateVal.size()); + + // Macros to execute binary operation 'OP' over integer vectors +#define INTEGER_VECTOR_OPERATION(OP) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].IntVal = \ + Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal; + + // Additional macros to execute binary operations udiv/sdiv/urem/srem since + // they have different notation. +#define INTEGER_VECTOR_FUNCTION(OP) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].IntVal = \ + Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal); + + // Macros to execute binary operation 'OP' over floating point type TY + // (float or double) vectors +#define FLOAT_VECTOR_FUNCTION(OP, TY) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].TY = \ + Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY; + + // Macros to choose appropriate TY: float or double and run operation + // execution +#define FLOAT_VECTOR_OP(OP) { \ + if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \ + FLOAT_VECTOR_FUNCTION(OP, FloatVal) \ + else { \ + if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \ + FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \ + else { \ + dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \ + llvm_unreachable(0); \ + } \ + } \ +} + + switch(I.getOpcode()){ + default: + dbgs() << "Don't know how to handle this binary operator!\n-->" << I; + llvm_unreachable(0); + break; + case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break; + case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break; + case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break; + case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break; + case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break; + case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break; + case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break; + case Instruction::And: INTEGER_VECTOR_OPERATION(&) break; + case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break; + case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break; + case Instruction::FAdd: FLOAT_VECTOR_OP(+) break; + case Instruction::FSub: FLOAT_VECTOR_OP(-) break; + case Instruction::FMul: FLOAT_VECTOR_OP(*) break; + case Instruction::FDiv: FLOAT_VECTOR_OP(/) break; + case Instruction::FRem: + if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) + R.AggregateVal[i].FloatVal = + fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal); + else { + if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) + R.AggregateVal[i].DoubleVal = + fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal); + else { + dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n"; + llvm_unreachable(0); + } + } + break; + } + } else { + switch (I.getOpcode()) { + default: + dbgs() << "Don't know how to handle this binary operator!\n-->" << I; + llvm_unreachable(0); + break; + case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; + case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; + case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; + case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break; + case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break; + case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break; + case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break; + case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break; + case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; + case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; + case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; + case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; + case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; + case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; + case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; + } } - SetValue(&I, R, SF); } @@ -1187,6 +1423,39 @@ void Interpreter::visitVAArgInst(VAArgInst &I) { ++VAList.UIntPairVal.second; } +void Interpreter::visitExtractElementInst(ExtractElementInst &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue Dest; + + Type *Ty = I.getType(); + const unsigned indx = unsigned(Src2.IntVal.getZExtValue()); + + if(Src1.AggregateVal.size() > indx) { + switch (Ty->getTypeID()) { + default: + dbgs() << "Unhandled destination type for extractelement instruction: " + << *Ty << "\n"; + llvm_unreachable(0); + break; + case Type::IntegerTyID: + Dest.IntVal = Src1.AggregateVal[indx].IntVal; + break; + case Type::FloatTyID: + Dest.FloatVal = Src1.AggregateVal[indx].FloatVal; + break; + case Type::DoubleTyID: + Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal; + break; + } + } else { + dbgs() << "Invalid index in extractelement instruction\n"; + } + + SetValue(&I, Dest, SF); +} + GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE, ExecutionContext &SF) { switch (CE->getOpcode()) { diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h index e95db2f..2952d7e 100644 --- a/lib/ExecutionEngine/Interpreter/Interpreter.h +++ b/lib/ExecutionEngine/Interpreter/Interpreter.h @@ -178,6 +178,7 @@ public: void visitAShr(BinaryOperator &I); void visitVAArgInst(VAArgInst &I); + void visitExtractElementInst(ExtractElementInst &I); void visitInstruction(Instruction &I) { errs() << I << "\n"; llvm_unreachable("Instruction not interpretable yet!"); diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/lib/ExecutionEngine/MCJIT/MCJIT.cpp index fee10e1..ccaa72a 100644 --- a/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -14,6 +14,7 @@ #include "llvm/ExecutionEngine/MCJIT.h" #include "llvm/ExecutionEngine/ObjectBuffer.h" #include "llvm/ExecutionEngine/ObjectImage.h" +#include "llvm/ExecutionEngine/SectionMemoryManager.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" @@ -46,13 +47,14 @@ ExecutionEngine *MCJIT::createJIT(Module *M, // FIXME: Don't do this here. sys::DynamicLibrary::LoadLibraryPermanently(0, NULL); - return new MCJIT(M, TM, JMM, GVsWithCode); + return new MCJIT(M, TM, JMM ? JMM : new SectionMemoryManager(), GVsWithCode); } MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM, bool AllocateGVsWithCode) - : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM), - isCompiled(false), M(m) { + : ExecutionEngine(m), TM(tm), Ctx(0), + MemMgr(MM ? MM : new SectionMemoryManager()), Dyld(MemMgr), + IsLoaded(false), M(m), ObjCache(0) { setDataLayout(TM->getDataLayout()); } @@ -64,7 +66,11 @@ MCJIT::~MCJIT() { delete TM; } -void MCJIT::emitObject(Module *m) { +void MCJIT::setObjectCache(ObjectCache* NewCache) { + ObjCache = NewCache; +} + +ObjectBufferStream* MCJIT::emitObject(Module *m) { /// Currently, MCJIT only supports a single module and the module passed to /// this function call is expected to be the contained module. The module /// is passed as a parameter here to prepare for multiple module support in @@ -77,30 +83,66 @@ void MCJIT::emitObject(Module *m) { // FIXME: Track compilation state on a per-module basis when multiple modules // are supported. // Re-compilation is not supported - if (isCompiled) - return; + assert(!IsLoaded); PassManager PM; PM.add(new DataLayout(*TM->getDataLayout())); // The RuntimeDyld will take ownership of this shortly - OwningPtr<ObjectBufferStream> Buffer(new ObjectBufferStream()); + OwningPtr<ObjectBufferStream> CompiledObject(new ObjectBufferStream()); // Turn the machine code intermediate representation into bytes in memory // that may be executed. - if (TM->addPassesToEmitMC(PM, Ctx, Buffer->getOStream(), false)) { + if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(), false)) { report_fatal_error("Target does not support MC emission!"); } // Initialize passes. PM.run(*m); // Flush the output buffer to get the generated code into memory - Buffer->flush(); + CompiledObject->flush(); + + // If we have an object cache, tell it about the new object. + // Note that we're using the compiled image, not the loaded image (as below). + if (ObjCache) { + // MemoryBuffer is a thin wrapper around the actual memory, so it's OK + // to create a temporary object here and delete it after the call. + OwningPtr<MemoryBuffer> MB(CompiledObject->getMemBuffer()); + ObjCache->notifyObjectCompiled(m, MB.get()); + } + + return CompiledObject.take(); +} + +void MCJIT::loadObject(Module *M) { + + // Get a thread lock to make sure we aren't trying to load multiple times + MutexGuard locked(lock); + + // FIXME: Track compilation state on a per-module basis when multiple modules + // are supported. + // Re-compilation is not supported + if (IsLoaded) + return; + + OwningPtr<ObjectBuffer> ObjectToLoad; + // Try to load the pre-compiled object from cache if possible + if (0 != ObjCache) { + OwningPtr<MemoryBuffer> PreCompiledObject(ObjCache->getObjectCopy(M)); + if (0 != PreCompiledObject.get()) + ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.take())); + } + + // If the cache did not contain a suitable object, compile the object + if (!ObjectToLoad) { + ObjectToLoad.reset(emitObject(M)); + assert(ObjectToLoad.get() && "Compilation did not produce an object."); + } // Load the object into the dynamic linker. // handing off ownership of the buffer - LoadedObject.reset(Dyld.loadObject(Buffer.take())); + LoadedObject.reset(Dyld.loadObject(ObjectToLoad.take())); if (!LoadedObject) report_fatal_error(Dyld.getErrorString()); @@ -113,7 +155,7 @@ void MCJIT::emitObject(Module *m) { NotifyObjectEmitted(*LoadedObject); // FIXME: Add support for per-module compilation state - isCompiled = true; + IsLoaded = true; } // FIXME: Add a parameter to identify which object is being finalized when @@ -122,10 +164,10 @@ void MCJIT::emitObject(Module *m) { // protection in the interface. void MCJIT::finalizeObject() { // If the module hasn't been compiled, just do that. - if (!isCompiled) { - // If the call to Dyld.resolveRelocations() is removed from emitObject() + if (!IsLoaded) { + // If the call to Dyld.resolveRelocations() is removed from loadObject() // we'll need to do that here. - emitObject(M); + loadObject(M); // Set page permissions. MemMgr->applyPermissions(); @@ -151,8 +193,8 @@ void *MCJIT::getPointerToFunction(Function *F) { // dies. // FIXME: Add support for per-module compilation state - if (!isCompiled) - emitObject(M); + if (!IsLoaded) + loadObject(M); if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) { bool AbortOnFailure = !F->hasExternalWeakLinkage(); @@ -284,8 +326,8 @@ GenericValue MCJIT::runFunction(Function *F, void *MCJIT::getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure) { // FIXME: Add support for per-module compilation state - if (!isCompiled) - emitObject(M); + if (!IsLoaded) + loadObject(M); if (!isSymbolSearchingDisabled() && MemMgr) { void *ptr = MemMgr->getPointerToNamedFunction(Name, false); diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.h b/lib/ExecutionEngine/MCJIT/MCJIT.h index 283a8e5..8c4bf6e 100644 --- a/lib/ExecutionEngine/MCJIT/MCJIT.h +++ b/lib/ExecutionEngine/MCJIT/MCJIT.h @@ -12,6 +12,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/ExecutionEngine/ObjectCache.h" #include "llvm/ExecutionEngine/RuntimeDyld.h" #include "llvm/PassManager.h" @@ -34,16 +35,23 @@ class MCJIT : public ExecutionEngine { SmallVector<JITEventListener*, 2> EventListeners; // FIXME: Add support for multiple modules - bool isCompiled; + bool IsLoaded; Module *M; OwningPtr<ObjectImage> LoadedObject; + // An optional ObjectCache to be notified of compiled objects and used to + // perform lookup of pre-compiled code to avoid re-compilation. + ObjectCache *ObjCache; + public: ~MCJIT(); /// @name ExecutionEngine interface implementation /// @{ + /// Sets the object manager that MCJIT should use to avoid compilation. + virtual void setObjectCache(ObjectCache *manager); + virtual void finalizeObject(); virtual void *getPointerToBasicBlock(BasicBlock *BB); @@ -102,7 +110,9 @@ protected: /// this function call is expected to be the contained module. The module /// is passed as a parameter here to prepare for multiple module support in /// the future. - void emitObject(Module *M); + ObjectBufferStream* emitObject(Module *M); + + void loadObject(Module *M); void NotifyObjectEmitted(const ObjectImage& Obj); void NotifyFreeingObject(const ObjectImage& Obj); diff --git a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp index fa35acd..da93124 100644 --- a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp +++ b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp @@ -138,6 +138,11 @@ bool SectionMemoryManager::applyPermissions(std::string *ErrMsg) // Read-write data memory already has the correct permissions + // Some platforms with separate data cache and instruction cache require + // explicit cache flush, otherwise JIT code manipulations (like resolved + // relocations) will get to the data cache but not to the instruction cache. + invalidateInstructionCache(); + return false; } diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index 409b25f..7b32db7 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -96,7 +96,8 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) { bool isCommon = flags & SymbolRef::SF_Common; if (isCommon) { // Add the common symbols to a list. We'll allocate them all below. - uint64_t Align = getCommonSymbolAlignment(*i); + uint32_t Align; + Check(i->getAlignment(Align)); uint64_t Size = 0; Check(i->getSize(Size)); CommonSize += Size + Align; @@ -154,18 +155,8 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) { isFirstRelocation = false; } - ObjRelocationInfo RI; - RI.SectionID = SectionID; - Check(i->getAdditionalInfo(RI.AdditionalInfo)); - Check(i->getOffset(RI.Offset)); - Check(i->getSymbol(RI.Symbol)); - Check(i->getType(RI.Type)); - - DEBUG(dbgs() << "\t\tAddend: " << RI.AdditionalInfo - << " Offset: " << format("%p", (uintptr_t)RI.Offset) - << " Type: " << (uint32_t)(RI.Type & 0xffffffffL) - << "\n"); - processRelocationRef(RI, *obj, LocalSections, LocalSymbols, Stubs); + processRelocationRef(SectionID, *i, *obj, LocalSections, LocalSymbols, + Stubs); } } @@ -401,26 +392,14 @@ void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID, Sections[SectionID].LoadAddress = Addr; } -void RuntimeDyldImpl::resolveRelocationEntry(const RelocationEntry &RE, - uint64_t Value) { - // Ignore relocations for sections that were not loaded - if (Sections[RE.SectionID].Address != 0) { - DEBUG(dbgs() << "\tSectionID: " << RE.SectionID - << " + " << RE.Offset << " (" - << format("%p", Sections[RE.SectionID].Address + RE.Offset) << ")" - << " RelType: " << RE.RelType - << " Addend: " << RE.Addend - << "\n"); - - resolveRelocation(Sections[RE.SectionID], RE.Offset, - Value, RE.RelType, RE.Addend); - } -} - void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs, uint64_t Value) { for (unsigned i = 0, e = Relocs.size(); i != e; ++i) { - resolveRelocationEntry(Relocs[i], Value); + const RelocationEntry &RE = Relocs[i]; + // Ignore relocations for sections that were not loaded + if (Sections[RE.SectionID].Address == 0) + continue; + resolveRelocation(RE, Value); } } diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index b8537b1..c5bad8e 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -41,7 +41,7 @@ error_code check(error_code Err) { template<class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { - LLVM_ELF_IMPORT_TYPES(ELFT) + LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; typedef Elf_Sym_Impl<ELFT> Elf_Sym; @@ -560,6 +560,12 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, } } +void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, + uint64_t Value) { + const SectionEntry &Section = Sections[RE.SectionID]; + return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend); +} + void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, @@ -593,15 +599,18 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, } } -void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, +void RuntimeDyldELF::processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs) { - - uint32_t RelType = (uint32_t)(Rel.Type & 0xffffffffL); - intptr_t Addend = (intptr_t)Rel.AdditionalInfo; - const SymbolRef &Symbol = Rel.Symbol; + uint64_t RelType; + Check(RelI.getType(RelType)); + int64_t Addend; + Check(RelI.getAdditionalInfo(Addend)); + SymbolRef Symbol; + Check(RelI.getSymbol(Symbol)); // Obtain the symbol name which is referenced in the relocation StringRef TargetName; @@ -617,14 +626,14 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Symbol.getType(SymType); if (lsi != Symbols.end()) { Value.SectionID = lsi->second.first; - Value.Addend = lsi->second.second; + Value.Addend = lsi->second.second + Addend; } else { // Search for the symbol in the global symbol table SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data()); if (gsi != GlobalSymbolTable.end()) { Value.SectionID = gsi->second.first; - Value.Addend = gsi->second.second; + Value.Addend = gsi->second.second + Addend; } else { switch (SymType) { case SymbolRef::ST_Debug: { @@ -657,8 +666,11 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } } - DEBUG(dbgs() << "\t\tRel.SectionID: " << Rel.SectionID - << " Rel.Offset: " << Rel.Offset + uint64_t Offset; + Check(RelI.getOffset(Offset)); + + DEBUG(dbgs() << "\t\tSectionID: " << SectionID + << " Offset: " << Offset << "\n"); if (Arch == Triple::arm && (RelType == ELF::R_ARM_PC24 || @@ -666,12 +678,12 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, RelType == ELF::R_ARM_JUMP24)) { // This is an ARM branch relocation, need to use a stub function. DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); - SectionEntry &Section = Sections[Rel.SectionID]; + SectionEntry &Section = Sections[SectionID]; // Look for an existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -680,14 +692,14 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, ELF::R_ARM_ABS32, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); @@ -696,8 +708,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, RelType == ELF::R_MIPS_26) { // This is an Mips branch relocation, need to use a stub function. DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); - SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; + SectionEntry &Section = Sections[SectionID]; + uint8_t *Target = Section.Address + Offset; uint32_t *TargetAddress = (uint32_t *)Target; // Extract the addend from the instruction. @@ -708,7 +720,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -719,10 +731,10 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Section.StubOffset); // Creating Hi and Lo relocations for the filled stub instructions. - RelocationEntry REHi(Rel.SectionID, + RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, ELF::R_MIPS_HI16, Value.Addend); - RelocationEntry RELo(Rel.SectionID, + RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, ELF::R_MIPS_LO16, Value.Addend); @@ -734,7 +746,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSection(RELo, Value.SectionID); } - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); @@ -744,8 +756,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // A PPC branch relocation will need a stub function if the target is // an external symbol (Symbol::ST_Unknown) or if the target address // is not within the signed 24-bits branch address. - SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; + SectionEntry &Section = Sections[SectionID]; + uint8_t *Target = Section.Address + Offset; bool RangeOverflow = false; if (SymType != SymbolRef::ST_Unknown) { // A function call may points to the .opd entry, so the final symbol value @@ -755,7 +767,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, int32_t delta = static_cast<int32_t>(Target - RelocTarget); // If it is within 24-bits branch range, just set the branch target if (SignExtend32<24>(delta) == delta) { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else @@ -770,7 +782,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { // Symbol function stub already created, just relocate to it - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -779,21 +791,21 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, ELF::R_PPC64_ADDR64, Value.Addend); // Generates the 64-bits address loads as exemplified in section // 4.5.1 in PPC64 ELF ABI. - RelocationEntry REhst(Rel.SectionID, + RelocationEntry REhst(SectionID, StubTargetAddr - Section.Address + 2, ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); - RelocationEntry REhr(Rel.SectionID, + RelocationEntry REhr(SectionID, StubTargetAddr - Section.Address + 6, ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); - RelocationEntry REh(Rel.SectionID, + RelocationEntry REh(SectionID, StubTargetAddr - Section.Address + 14, ELF::R_PPC64_ADDR16_HI, Value.Addend); - RelocationEntry REl(Rel.SectionID, + RelocationEntry REl(SectionID, StubTargetAddr - Section.Address + 18, ELF::R_PPC64_ADDR16_LO, Value.Addend); @@ -809,7 +821,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSection(REl, Value.SectionID); } - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); if (SymType == SymbolRef::ST_Unknown) @@ -819,7 +831,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); // Extra check to avoid relocation againt empty symbols (usually // the R_PPC64_TOC). if (Value.SymbolName && !TargetName.empty()) @@ -828,7 +840,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSection(RE, Value.SectionID); } } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else @@ -836,13 +848,6 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } -unsigned RuntimeDyldELF::getCommonSymbolAlignment(const SymbolRef &Sym) { - // In ELF, the value of an SHN_COMMON symbol is its alignment requirement. - uint64_t Align; - Check(Sym.getValue(Align)); - return Align; -} - bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const { if (Buffer->getBufferSize() < strlen(ELF::ElfMagic)) return false; diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h index 07e704b..102b1c6 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -31,7 +31,12 @@ namespace { } // end anonymous namespace class RuntimeDyldELF : public RuntimeDyldImpl { -protected: + void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend); + void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, @@ -62,21 +67,6 @@ protected: uint32_t Type, int64_t Addend); - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend); - - virtual void processRelocationRef(const ObjRelocationInfo &Rel, - ObjectImage &Obj, - ObjSectionToIDMap &ObjSectionToID, - const SymbolTableMap &Symbols, - StubMap &Stubs); - - unsigned getCommonSymbolAlignment(const SymbolRef &Sym); - - virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); uint64_t findPPC64TOC() const; void findOPDEntrySection(ObjectImage &Obj, @@ -84,12 +74,18 @@ protected: RelocationValueRef &Rel); public: - RuntimeDyldELF(RTDyldMemoryManager *mm) - : RuntimeDyldImpl(mm) {} + RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value); + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, + ObjectImage &Obj, + ObjSectionToIDMap &ObjSectionToID, + const SymbolTableMap &Symbols, + StubMap &Stubs); + virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const; + virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); virtual ~RuntimeDyldELF(); - - bool isCompatibleFormat(const ObjectBuffer *Buffer) const; }; } // end namespace llvm diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h index f100994..51873b1 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -89,20 +89,20 @@ public: /// used to make a relocation section relative instead of symbol relative. intptr_t Addend; + /// True if this is a PCRel relocation (MachO specific). + bool IsPCRel; + + /// The size of this relocation (MachO specific). + unsigned Size; + RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend) - : SectionID(id), Offset(offset), RelType(type), Addend(addend) {} -}; + : SectionID(id), Offset(offset), RelType(type), Addend(addend), + IsPCRel(false), Size(0) {} -/// ObjRelocationInfo - relocation information as read from the object file. -/// Used to pass around data taken from object::RelocationRef, together with -/// the section to which the relocation points (represented by a SectionID). -class ObjRelocationInfo { -public: - unsigned SectionID; - uint64_t Offset; - SymbolRef Symbol; - uint64_t Type; - int64_t AdditionalInfo; + RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend, + bool IsPCRel, unsigned Size) + : SectionID(id), Offset(offset), RelType(type), Addend(addend), + IsPCRel(IsPCRel), Size(Size) {} }; class RelocationValueRef { @@ -194,22 +194,15 @@ protected: return (uint8_t*)Sections[SectionID].Address; } - // Subclasses can override this method to get the alignment requirement of - // a common symbol. Returns no alignment requirement if not implemented. - virtual unsigned getCommonSymbolAlignment(const SymbolRef &Sym) { - return 0; - } - - void writeInt16BE(uint8_t *Addr, uint16_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 8) & 0xFF; *(Addr+1) = Value & 0xFF; } void writeInt32BE(uint8_t *Addr, uint32_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 24) & 0xFF; *(Addr+1) = (Value >> 16) & 0xFF; @@ -218,7 +211,7 @@ protected: } void writeInt64BE(uint8_t *Addr, uint64_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 56) & 0xFF; *(Addr+1) = (Value >> 48) & 0xFF; @@ -269,24 +262,16 @@ protected: /// \brief Resolves relocations from Relocs list with address from Value. void resolveRelocationList(const RelocationList &Relocs, uint64_t Value); - void resolveRelocationEntry(const RelocationEntry &RE, uint64_t Value); /// \brief A object file specific relocation resolver - /// \param Section The section where the relocation is being applied - /// \param Offset The offset into the section for this relocation + /// \param RE The relocation to be resolved /// \param Value Target symbol address to apply the relocation action - /// \param Type object file specific relocation type - /// \param Addend A constant addend used to compute the value to be stored - /// into the relocatable field - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend) = 0; + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0; /// \brief Parses the object file relocation and stores it to Relocations /// or SymbolRelocations (this depends on the object file type). - virtual void processRelocationRef(const ObjRelocationInfo &Rel, + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp index bcc3df1..b3467a9 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp @@ -21,16 +21,24 @@ using namespace llvm::object; namespace llvm { +void RuntimeDyldMachO::resolveRelocation(const RelocationEntry &RE, + uint64_t Value) { + const SectionEntry &Section = Sections[RE.SectionID]; + return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, + RE.IsPCRel, RE.Size); +} + void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type, - int64_t Addend) { + int64_t Addend, + bool isPCRel, + unsigned LogSize) { uint8_t *LocalAddress = Section.Address + Offset; uint64_t FinalAddress = Section.LoadAddress + Offset; - bool isPCRel = (Type >> 24) & 1; - unsigned MachoType = (Type >> 28) & 0xf; - unsigned Size = 1 << ((Type >> 25) & 3); + unsigned MachoType = Type; + unsigned Size = 1 << LogSize; DEBUG(dbgs() << "resolveRelocation LocalAddress: " << format("%p", LocalAddress) @@ -205,60 +213,58 @@ bool RuntimeDyldMachO::resolveARMRelocation(uint8_t *LocalAddress, return false; } -void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, +void RuntimeDyldMachO::processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs) { + const ObjectFile *OF = Obj.getObjectFile(); + const MachOObjectFile *MachO = static_cast<const MachOObjectFile*>(OF); + macho::RelocationEntry RE = MachO->getRelocation(RelI.getRawDataRefImpl()); - uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL); + uint32_t RelType = MachO->getAnyRelocationType(RE); RelocationValueRef Value; - SectionEntry &Section = Sections[Rel.SectionID]; + SectionEntry &Section = Sections[SectionID]; + + bool isExtern = MachO->getPlainRelocationExternal(RE); + bool IsPCRel = MachO->getAnyRelocationPCRel(RE); + unsigned Size = MachO->getAnyRelocationLength(RE); + uint64_t Offset; + RelI.getOffset(Offset); + uint8_t *LocalAddress = Section.Address + Offset; + unsigned NumBytes = 1 << Size; + uint64_t Addend = 0; + memcpy(&Addend, LocalAddress, NumBytes); - bool isExtern = (RelType >> 27) & 1; if (isExtern) { // Obtain the symbol name which is referenced in the relocation + SymbolRef Symbol; + RelI.getSymbol(Symbol); StringRef TargetName; - const SymbolRef &Symbol = Rel.Symbol; Symbol.getName(TargetName); // First search for the symbol in the local symbol table SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data()); if (lsi != Symbols.end()) { Value.SectionID = lsi->second.first; - Value.Addend = lsi->second.second; + Value.Addend = lsi->second.second + Addend; } else { // Search for the symbol in the global symbol table SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data()); if (gsi != GlobalSymbolTable.end()) { Value.SectionID = gsi->second.first; - Value.Addend = gsi->second.second; - } else + Value.Addend = gsi->second.second + Addend; + } else { Value.SymbolName = TargetName.data(); + Value.Addend = Addend; + } } } else { - error_code err; - uint8_t sectionIndex = static_cast<uint8_t>(RelType & 0xFF); - section_iterator si = Obj.begin_sections(), - se = Obj.end_sections(); - for (uint8_t i = 1; i < sectionIndex; i++) { - error_code err; - si.increment(err); - if (si == se) - break; - } - assert(si != se && "No section containing relocation!"); - Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID); - Value.Addend = 0; - // FIXME: The size and type of the relocation determines if we can - // encode an Addend in the target location itself, and if so, how many - // bytes we should read in order to get it. We don't yet support doing - // that, and just assuming it's sizeof(intptr_t) is blatantly wrong. - //Value.Addend = *(const intptr_t *)Target; - if (Value.Addend) { - // The MachO addend is an offset from the current section. We need it - // to be an offset from the destination section - Value.Addend += Section.ObjAddress - Sections[Value.SectionID].ObjAddress; - } + SectionRef Sec = MachO->getRelocationSection(RE); + Value.SectionID = findOrEmitSection(Obj, Sec, true, ObjSectionToID); + uint64_t Addr; + Sec.getAddress(Addr); + Value.Addend = Addend - Addr; } if (Arch == Triple::arm && (RelType & 0xf) == macho::RIT_ARM_Branch24Bit) { @@ -267,27 +273,28 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, - RelType, 0); + RelType, 0, IsPCRel, Size); else { // Create a new stub function. Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, macho::RIT_Vanilla, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, - RelType, 0); + RelType, 0, IsPCRel, Size); Section.StubOffset += getMaxStubSize(); } } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, + IsPCRel, Size); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h index 62d8487..8da6e35 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h @@ -16,7 +16,7 @@ #include "RuntimeDyldImpl.h" #include "llvm/ADT/IndexedMap.h" -#include "llvm/Object/MachOObject.h" +#include "llvm/Object/MachO.h" #include "llvm/Support/Format.h" using namespace llvm; @@ -25,7 +25,6 @@ using namespace llvm::object; namespace llvm { class RuntimeDyldMachO : public RuntimeDyldImpl { -protected: bool resolveI386Relocation(uint8_t *LocalAddress, uint64_t FinalAddress, uint64_t Value, @@ -48,22 +47,24 @@ protected: unsigned Size, int64_t Addend); - virtual void processRelocationRef(const ObjRelocationInfo &Rel, + void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend, + bool isPCRel, + unsigned Size); +public: + RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} + + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value); + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs); - -public: - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend); - - RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} - - bool isCompatibleFormat(const ObjectBuffer *Buffer) const; + virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const; }; } // end namespace llvm diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp index fb591a8..7761127 100644 --- a/lib/IR/AsmWriter.cpp +++ b/lib/IR/AsmWriter.cpp @@ -1605,6 +1605,29 @@ void AssemblyWriter::printFunction(const Function *F) { if (F->isMaterializable()) Out << "; Materializable\n"; + const AttributeSet &Attrs = F->getAttributes(); + if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) { + AttributeSet AS = Attrs.getFnAttributes(); + std::string AttrStr; + + unsigned Idx = 0; + for (unsigned E = AS.getNumSlots(); Idx != E; ++Idx) + if (AS.getSlotIndex(Idx) == AttributeSet::FunctionIndex) + break; + + for (AttributeSet::iterator I = AS.begin(Idx), E = AS.end(Idx); + I != E; ++I) { + Attribute Attr = *I; + if (!Attr.isStringAttribute()) { + if (!AttrStr.empty()) AttrStr += ' '; + AttrStr += Attr.getAsString(); + } + } + + if (!AttrStr.empty()) + Out << "; Function Attrs: " << AttrStr << '\n'; + } + if (F->isDeclaration()) Out << "declare "; else @@ -1620,7 +1643,6 @@ void AssemblyWriter::printFunction(const Function *F) { } FunctionType *FT = F->getFunctionType(); - const AttributeSet &Attrs = F->getAttributes(); if (Attrs.hasAttributes(AttributeSet::ReturnIndex)) Out << Attrs.getAsString(AttributeSet::ReturnIndex) << ' '; TypePrinter.print(F->getReturnType(), Out); @@ -1761,10 +1783,8 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { /// which slot it occupies. /// void AssemblyWriter::printInfoComment(const Value &V) { - if (AnnotationWriter) { + if (AnnotationWriter) AnnotationWriter->printInfoComment(V, Out); - return; - } } // This member is called for each Instruction in a function.. diff --git a/lib/IR/AttributeImpl.h b/lib/IR/AttributeImpl.h index ad2670d..0b6228b 100644 --- a/lib/IR/AttributeImpl.h +++ b/lib/IR/AttributeImpl.h @@ -228,7 +228,7 @@ public: /// is the index of the return, parameter, or function object that the /// attributes are applied to, not the index into the AttrNodes list where the /// attributes reside. - uint64_t getSlotIndex(unsigned Slot) const { + unsigned getSlotIndex(unsigned Slot) const { return AttrNodes[Slot].first; } @@ -248,15 +248,15 @@ public: typedef AttributeSetNode::iterator iterator; typedef AttributeSetNode::const_iterator const_iterator; - iterator begin(unsigned Idx) - { return AttrNodes[Idx].second->begin(); } - iterator end(unsigned Idx) - { return AttrNodes[Idx].second->end(); } + iterator begin(unsigned Slot) + { return AttrNodes[Slot].second->begin(); } + iterator end(unsigned Slot) + { return AttrNodes[Slot].second->end(); } - const_iterator begin(unsigned Idx) const - { return AttrNodes[Idx].second->begin(); } - const_iterator end(unsigned Idx) const - { return AttrNodes[Idx].second->end(); } + const_iterator begin(unsigned Slot) const + { return AttrNodes[Slot].second->begin(); } + const_iterator end(unsigned Slot) const + { return AttrNodes[Slot].second->end(); } void Profile(FoldingSetNodeID &ID) const { Profile(ID, AttrNodes); @@ -270,7 +270,7 @@ public: } // FIXME: This atrocity is temporary. - uint64_t Raw(uint64_t Index) const; + uint64_t Raw(unsigned Index) const; }; } // end llvm namespace diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp index 2d82891..4fe6f9d 100644 --- a/lib/IR/Attributes.cpp +++ b/lib/IR/Attributes.cpp @@ -195,6 +195,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const { return "readnone"; if (hasAttribute(Attribute::ReadOnly)) return "readonly"; + if (hasAttribute(Attribute::Returned)) + return "returned"; if (hasAttribute(Attribute::ReturnsTwice)) return "returns_twice"; if (hasAttribute(Attribute::SExt)) @@ -393,6 +395,7 @@ uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) { case Attribute::SanitizeThread: return 1ULL << 36; case Attribute::SanitizeMemory: return 1ULL << 37; case Attribute::NoBuiltin: return 1ULL << 38; + case Attribute::Returned: return 1ULL << 39; } llvm_unreachable("Unsupported attribute type"); } @@ -481,11 +484,12 @@ unsigned AttributeSetNode::getStackAlignment() const { } std::string AttributeSetNode::getAsString(bool InAttrGrp) const { - std::string Str = ""; + std::string Str; for (SmallVectorImpl<Attribute>::const_iterator I = AttrList.begin(), - E = AttrList.end(); I != E; ) { + E = AttrList.end(); I != E; ++I) { + if (I != AttrList.begin()) + Str += ' '; Str += I->getAsString(InAttrGrp); - if (++I != E) Str += " "; } return Str; } @@ -494,7 +498,7 @@ std::string AttributeSetNode::getAsString(bool InAttrGrp) const { // AttributeSetImpl Definition //===----------------------------------------------------------------------===// -uint64_t AttributeSetImpl::Raw(uint64_t Index) const { +uint64_t AttributeSetImpl::Raw(unsigned Index) const { for (unsigned I = 0, E = getNumAttributes(); I != E; ++I) { if (getSlotIndex(I) != Index) continue; const AttributeSetNode *ASN = AttrNodes[I].second; @@ -592,7 +596,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, return getImpl(C, Attrs); } -AttributeSet AttributeSet::get(LLVMContext &C, unsigned Idx, AttrBuilder &B) { +AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, AttrBuilder &B) { if (!B.hasAttributes()) return AttributeSet(); @@ -604,29 +608,29 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Idx, AttrBuilder &B) { continue; if (Kind == Attribute::Alignment) - Attrs.push_back(std::make_pair(Idx, Attribute:: + Attrs.push_back(std::make_pair(Index, Attribute:: getWithAlignment(C, B.getAlignment()))); else if (Kind == Attribute::StackAlignment) - Attrs.push_back(std::make_pair(Idx, Attribute:: + Attrs.push_back(std::make_pair(Index, Attribute:: getWithStackAlignment(C, B.getStackAlignment()))); else - Attrs.push_back(std::make_pair(Idx, Attribute::get(C, Kind))); + Attrs.push_back(std::make_pair(Index, Attribute::get(C, Kind))); } // Add target-dependent (string) attributes. for (AttrBuilder::td_iterator I = B.td_begin(), E = B.td_end(); I != E; ++I) - Attrs.push_back(std::make_pair(Idx, Attribute::get(C, I->first,I->second))); + Attrs.push_back(std::make_pair(Index, Attribute::get(C, I->first,I->second))); return get(C, Attrs); } -AttributeSet AttributeSet::get(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, ArrayRef<Attribute::AttrKind> Kind) { SmallVector<std::pair<unsigned, Attribute>, 8> Attrs; for (ArrayRef<Attribute::AttrKind>::iterator I = Kind.begin(), E = Kind.end(); I != E; ++I) - Attrs.push_back(std::make_pair(Idx, Attribute::get(C, *I))); + Attrs.push_back(std::make_pair(Index, Attribute::get(C, *I))); return get(C, Attrs); } @@ -643,20 +647,20 @@ AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<AttributeSet> Attrs) { return getImpl(C, AttrNodeVec); } -AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index, Attribute::AttrKind Attr) const { - if (hasAttribute(Idx, Attr)) return *this; - return addAttributes(C, Idx, AttributeSet::get(C, Idx, Attr)); + if (hasAttribute(Index, Attr)) return *this; + return addAttributes(C, Index, AttributeSet::get(C, Index, Attr)); } -AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index, StringRef Kind) const { llvm::AttrBuilder B; B.addAttribute(Kind); - return addAttributes(C, Idx, AttributeSet::get(C, Idx, B)); + return addAttributes(C, Index, AttributeSet::get(C, Index, B)); } -AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Index, AttributeSet Attrs) const { if (!pImpl) return Attrs; if (!Attrs.pImpl) return *this; @@ -664,8 +668,8 @@ AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. For now, say // we can't change a known alignment. - unsigned OldAlign = getParamAlignment(Idx); - unsigned NewAlign = Attrs.getParamAlignment(Idx); + unsigned OldAlign = getParamAlignment(Index); + unsigned NewAlign = Attrs.getParamAlignment(Index); assert((!OldAlign || !NewAlign || OldAlign == NewAlign) && "Attempt to change alignment!"); #endif @@ -676,8 +680,8 @@ AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, AttributeSet AS; uint64_t LastIndex = 0; for (unsigned I = 0, E = NumAttrs; I != E; ++I) { - if (getSlotIndex(I) >= Idx) { - if (getSlotIndex(I) == Idx) AS = getSlotAttributes(LastIndex++); + if (getSlotIndex(I) >= Index) { + if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++); break; } LastIndex = I + 1; @@ -686,17 +690,17 @@ AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, // Now add the attribute into the correct slot. There may already be an // AttributeSet there. - AttrBuilder B(AS, Idx); + AttrBuilder B(AS, Index); for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I) - if (Attrs.getSlotIndex(I) == Idx) { + if (Attrs.getSlotIndex(I) == Index) { for (AttributeSetImpl::const_iterator II = Attrs.pImpl->begin(I), IE = Attrs.pImpl->end(I); II != IE; ++II) B.addAttribute(*II); break; } - AttrSet.push_back(AttributeSet::get(C, Idx, B)); + AttrSet.push_back(AttributeSet::get(C, Index, B)); // Add the remaining attribute slots. for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) @@ -705,13 +709,13 @@ AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Idx, return get(C, AttrSet); } -AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index, Attribute::AttrKind Attr) const { - if (!hasAttribute(Idx, Attr)) return *this; - return removeAttributes(C, Idx, AttributeSet::get(C, Idx, Attr)); + if (!hasAttribute(Index, Attr)) return *this; + return removeAttributes(C, Index, AttributeSet::get(C, Index, Attr)); } -AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Idx, +AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index, AttributeSet Attrs) const { if (!pImpl) return AttributeSet(); if (!Attrs.pImpl) return *this; @@ -719,7 +723,7 @@ AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Idx, #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. // For now, say we can't pass in alignment, which no current use does. - assert(!Attrs.hasAttribute(Idx, Attribute::Alignment) && + assert(!Attrs.hasAttribute(Index, Attribute::Alignment) && "Attempt to change alignment!"); #endif @@ -729,8 +733,8 @@ AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Idx, AttributeSet AS; uint64_t LastIndex = 0; for (unsigned I = 0, E = NumAttrs; I != E; ++I) { - if (getSlotIndex(I) >= Idx) { - if (getSlotIndex(I) == Idx) AS = getSlotAttributes(LastIndex++); + if (getSlotIndex(I) >= Index) { + if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++); break; } LastIndex = I + 1; @@ -739,15 +743,15 @@ AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Idx, // Now remove the attribute from the correct slot. There may already be an // AttributeSet there. - AttrBuilder B(AS, Idx); + AttrBuilder B(AS, Index); for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I) - if (Attrs.getSlotIndex(I) == Idx) { - B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Idx); + if (Attrs.getSlotIndex(I) == Index) { + B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index); break; } - AttrSet.push_back(AttributeSet::get(C, Idx, B)); + AttrSet.push_back(AttributeSet::get(C, Index, B)); // Add the remaining attribute slots. for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I) @@ -764,11 +768,11 @@ LLVMContext &AttributeSet::getContext() const { return pImpl->getContext(); } -AttributeSet AttributeSet::getParamAttributes(unsigned Idx) const { - return pImpl && hasAttributes(Idx) ? +AttributeSet AttributeSet::getParamAttributes(unsigned Index) const { + return pImpl && hasAttributes(Index) ? AttributeSet::get(pImpl->getContext(), ArrayRef<std::pair<unsigned, AttributeSetNode*> >( - std::make_pair(Idx, getAttributes(Idx)))) : + std::make_pair(Index, getAttributes(Index)))) : AttributeSet(); } @@ -848,27 +852,27 @@ std::string AttributeSet::getAsString(unsigned Index, } /// \brief The attributes for the specified index are returned. -AttributeSetNode *AttributeSet::getAttributes(unsigned Idx) const { +AttributeSetNode *AttributeSet::getAttributes(unsigned Index) const { if (!pImpl) return 0; // Loop through to find the attribute node we want. for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) - if (pImpl->getSlotIndex(I) == Idx) + if (pImpl->getSlotIndex(I) == Index) return pImpl->getSlotNode(I); return 0; } -AttributeSet::iterator AttributeSet::begin(unsigned Idx) const { +AttributeSet::iterator AttributeSet::begin(unsigned Slot) const { if (!pImpl) return ArrayRef<Attribute>().begin(); - return pImpl->begin(Idx); + return pImpl->begin(Slot); } -AttributeSet::iterator AttributeSet::end(unsigned Idx) const { +AttributeSet::iterator AttributeSet::end(unsigned Slot) const { if (!pImpl) return ArrayRef<Attribute>().end(); - return pImpl->end(Idx); + return pImpl->end(Slot); } //===----------------------------------------------------------------------===// @@ -882,7 +886,7 @@ unsigned AttributeSet::getNumSlots() const { return pImpl ? pImpl->getNumAttributes() : 0; } -uint64_t AttributeSet::getSlotIndex(unsigned Slot) const { +unsigned AttributeSet::getSlotIndex(unsigned Slot) const { assert(pImpl && Slot < pImpl->getNumAttributes() && "Slot # out of range!"); return pImpl->getSlotIndex(Slot); @@ -919,13 +923,13 @@ void AttributeSet::dump() const { // AttrBuilder Method Implementations //===----------------------------------------------------------------------===// -AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Idx) +AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index) : Attrs(0), Alignment(0), StackAlignment(0) { AttributeSetImpl *pImpl = AS.pImpl; if (!pImpl) return; for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) { - if (pImpl->getSlotIndex(I) != Idx) continue; + if (pImpl->getSlotIndex(I) != Index) continue; for (AttributeSetImpl::const_iterator II = pImpl->begin(I), IE = pImpl->end(I); II != IE; ++II) @@ -982,16 +986,16 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) { } AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) { - unsigned Idx = ~0U; + unsigned Slot = ~0U; for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I) if (A.getSlotIndex(I) == Index) { - Idx = I; + Slot = I; break; } - assert(Idx != ~0U && "Couldn't find index in AttributeSet!"); + assert(Slot != ~0U && "Couldn't find index in AttributeSet!"); - for (AttributeSet::iterator I = A.begin(Idx), E = A.end(Idx); I != E; ++I) { + for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) { Attribute Attr = *I; if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) { Attribute::AttrKind Kind = I->getKindAsEnum(); @@ -1069,16 +1073,16 @@ bool AttrBuilder::hasAttributes() const { } bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const { - unsigned Idx = ~0U; + unsigned Slot = ~0U; for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I) if (A.getSlotIndex(I) == Index) { - Idx = I; + Slot = I; break; } - assert(Idx != ~0U && "Couldn't find the index!"); + assert(Slot != ~0U && "Couldn't find the index!"); - for (AttributeSet::iterator I = A.begin(Idx), E = A.end(Idx); + for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) { Attribute Attr = *I; if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) { @@ -1109,33 +1113,6 @@ bool AttrBuilder::operator==(const AttrBuilder &B) { return Alignment == B.Alignment && StackAlignment == B.StackAlignment; } -void AttrBuilder::removeFunctionOnlyAttrs() { - removeAttribute(Attribute::NoReturn) - .removeAttribute(Attribute::NoUnwind) - .removeAttribute(Attribute::ReadNone) - .removeAttribute(Attribute::ReadOnly) - .removeAttribute(Attribute::NoInline) - .removeAttribute(Attribute::AlwaysInline) - .removeAttribute(Attribute::OptimizeForSize) - .removeAttribute(Attribute::StackProtect) - .removeAttribute(Attribute::StackProtectReq) - .removeAttribute(Attribute::StackProtectStrong) - .removeAttribute(Attribute::NoRedZone) - .removeAttribute(Attribute::NoImplicitFloat) - .removeAttribute(Attribute::Naked) - .removeAttribute(Attribute::InlineHint) - .removeAttribute(Attribute::StackAlignment) - .removeAttribute(Attribute::UWTable) - .removeAttribute(Attribute::NonLazyBind) - .removeAttribute(Attribute::ReturnsTwice) - .removeAttribute(Attribute::SanitizeAddress) - .removeAttribute(Attribute::SanitizeThread) - .removeAttribute(Attribute::SanitizeMemory) - .removeAttribute(Attribute::MinSize) - .removeAttribute(Attribute::NoDuplicate) - .removeAttribute(Attribute::NoBuiltin); -} - AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) { // FIXME: Remove this in 4.0. if (!Val) return *this; diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp index 70f7e01..2c6971c 100644 --- a/lib/IR/Constants.cpp +++ b/lib/IR/Constants.cpp @@ -53,12 +53,9 @@ bool Constant::isNegativeZeroValue() const { if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative()) return true; - // However, vectors of zeroes which are floating point represent +0.0's. - if (const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(this)) - if (const VectorType *VT = dyn_cast<VectorType>(CAZ->getType())) - if (VT->getElementType()->isFloatingPointTy()) - // As it's a CAZ, we know it's the zero bit-pattern (ie, +0.0) in each element. - return false; + // We've already handled true FP case; any other FP vectors can't represent -0.0. + if (getType()->isFPOrFPVectorTy()) + return false; // Otherwise, just use +0.0. return isNullValue(); @@ -240,18 +237,21 @@ void Constant::destroyConstantImpl() { delete this; } -/// canTrap - Return true if evaluation of this constant could trap. This is -/// true for things like constant expressions that could divide by zero. -bool Constant::canTrap() const { - assert(getType()->isFirstClassType() && "Cannot evaluate aggregate vals!"); +static bool canTrapImpl(const Constant *C, + SmallPtrSet<const ConstantExpr *, 4> &NonTrappingOps) { + assert(C->getType()->isFirstClassType() && "Cannot evaluate aggregate vals!"); // The only thing that could possibly trap are constant exprs. - const ConstantExpr *CE = dyn_cast<ConstantExpr>(this); - if (!CE) return false; + const ConstantExpr *CE = dyn_cast<ConstantExpr>(C); + if (!CE) + return false; // ConstantExpr traps if any operands can trap. - for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (CE->getOperand(i)->canTrap()) - return true; + for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { + if (ConstantExpr *Op = dyn_cast<ConstantExpr>(CE->getOperand(i))) { + if (NonTrappingOps.insert(Op) && canTrapImpl(Op, NonTrappingOps)) + return true; + } + } // Otherwise, only specific operations can trap. switch (CE->getOpcode()) { @@ -270,6 +270,13 @@ bool Constant::canTrap() const { } } +/// canTrap - Return true if evaluation of this constant could trap. This is +/// true for things like constant expressions that could divide by zero. +bool Constant::canTrap() const { + SmallPtrSet<const ConstantExpr *, 4> NonTrappingOps; + return canTrapImpl(this, NonTrappingOps); +} + /// isThreadDependent - Return true if the value can vary between threads. bool Constant::isThreadDependent() const { SmallPtrSet<const Constant*, 64> Visited; diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp index 983b49c..889d574 100644 --- a/lib/IR/Core.cpp +++ b/lib/IR/Core.cpp @@ -21,7 +21,9 @@ #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" #include "llvm/PassManager.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" @@ -1301,6 +1303,53 @@ void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant) { unwrap<GlobalVariable>(GlobalVar)->setConstant(IsConstant != 0); } +LLVMThreadLocalMode LLVMGetThreadLocalMode(LLVMValueRef GlobalVar) { + switch (unwrap<GlobalVariable>(GlobalVar)->getThreadLocalMode()) { + case GlobalVariable::NotThreadLocal: + return LLVMNotThreadLocal; + case GlobalVariable::GeneralDynamicTLSModel: + return LLVMGeneralDynamicTLSModel; + case GlobalVariable::LocalDynamicTLSModel: + return LLVMLocalDynamicTLSModel; + case GlobalVariable::InitialExecTLSModel: + return LLVMInitialExecTLSModel; + case GlobalVariable::LocalExecTLSModel: + return LLVMLocalExecTLSModel; + } + + llvm_unreachable("Invalid GlobalVariable thread local mode"); +} + +void LLVMSetThreadLocalMode(LLVMValueRef GlobalVar, LLVMThreadLocalMode Mode) { + GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar); + + switch (Mode) { + case LLVMNotThreadLocal: + GV->setThreadLocalMode(GlobalVariable::NotThreadLocal); + break; + case LLVMGeneralDynamicTLSModel: + GV->setThreadLocalMode(GlobalVariable::GeneralDynamicTLSModel); + break; + case LLVMLocalDynamicTLSModel: + GV->setThreadLocalMode(GlobalVariable::LocalDynamicTLSModel); + break; + case LLVMInitialExecTLSModel: + GV->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); + break; + case LLVMLocalExecTLSModel: + GV->setThreadLocalMode(GlobalVariable::LocalExecTLSModel); + break; + } +} + +LLVMBool LLVMIsExternallyInitialized(LLVMValueRef GlobalVar) { + return unwrap<GlobalVariable>(GlobalVar)->isExternallyInitialized(); +} + +void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit) { + unwrap<GlobalVariable>(GlobalVar)->setExternallyInitialized(IsExtInit); +} + /*--.. Operations on aliases ......................................--*/ LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee, @@ -1396,6 +1445,18 @@ void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Func->setAttributes(PALnew); } +void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A, + const char *V) { + Function *Func = unwrap<Function>(Fn); + AttributeSet::AttrIndex Idx = + AttributeSet::AttrIndex(AttributeSet::FunctionIndex); + AttrBuilder B; + + B.addAttribute(A, V); + AttributeSet Set = AttributeSet::get(Func->getContext(), Idx, B); + Func->addAttributes(Idx, Set); +} + void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Function *Func = unwrap<Function>(Fn); const AttributeSet PAL = Func->getAttributes(); @@ -2331,6 +2392,42 @@ LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS, return wrap(unwrap(B)->CreatePtrDiff(unwrap(LHS), unwrap(RHS), Name)); } +LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op, + LLVMValueRef PTR, LLVMValueRef Val, + LLVMAtomicOrdering ordering, + LLVMBool singleThread) { + AtomicRMWInst::BinOp intop; + switch (op) { + case LLVMAtomicRMWBinOpXchg: intop = AtomicRMWInst::Xchg; break; + case LLVMAtomicRMWBinOpAdd: intop = AtomicRMWInst::Add; break; + case LLVMAtomicRMWBinOpSub: intop = AtomicRMWInst::Sub; break; + case LLVMAtomicRMWBinOpAnd: intop = AtomicRMWInst::And; break; + case LLVMAtomicRMWBinOpNand: intop = AtomicRMWInst::Nand; break; + case LLVMAtomicRMWBinOpOr: intop = AtomicRMWInst::Or; break; + case LLVMAtomicRMWBinOpXor: intop = AtomicRMWInst::Xor; break; + case LLVMAtomicRMWBinOpMax: intop = AtomicRMWInst::Max; break; + case LLVMAtomicRMWBinOpMin: intop = AtomicRMWInst::Min; break; + case LLVMAtomicRMWBinOpUMax: intop = AtomicRMWInst::UMax; break; + case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break; + } + AtomicOrdering intordering; + switch (ordering) { + case LLVMAtomicOrderingNotAtomic: intordering = NotAtomic; break; + case LLVMAtomicOrderingUnordered: intordering = Unordered; break; + case LLVMAtomicOrderingMonotonic: intordering = Monotonic; break; + case LLVMAtomicOrderingAcquire: intordering = Acquire; break; + case LLVMAtomicOrderingRelease: intordering = Release; break; + case LLVMAtomicOrderingAcquireRelease: + intordering = AcquireRelease; + break; + case LLVMAtomicOrderingSequentiallyConsistent: + intordering = SequentiallyConsistent; + break; + } + return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val), + intordering, singleThread ? SingleThread : CrossThread)); +} + /*===-- Module providers --------------------------------------------------===*/ @@ -2397,6 +2494,13 @@ LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRangeCopy( StringRef(BufferName))); } +const char *LLVMGetBufferStart(LLVMMemoryBufferRef MemBuf) { + return unwrap(MemBuf)->getBufferStart(); +} + +size_t LLVMGetBufferSize(LLVMMemoryBufferRef MemBuf) { + return unwrap(MemBuf)->getBufferSize(); +} void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) { delete unwrap(MemBuf); diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp index 34921af..0980e80 100644 --- a/lib/IR/DIBuilder.cpp +++ b/lib/IR/DIBuilder.cpp @@ -61,6 +61,9 @@ void DIBuilder::finalize() { DIArray GVs = getOrCreateArray(AllGVs); DIType(TempGVs).replaceAllUsesWith(GVs); + + DIArray IMs = getOrCreateArray(AllImportedModules); + DIType(TempImportedModules).replaceAllUsesWith(IMs); } /// getNonCompileUnitScope - If N is compile unit return NULL otherwise return @@ -71,6 +74,16 @@ static MDNode *getNonCompileUnitScope(MDNode *N) { return N; } +static MDNode *createFilePathPair(LLVMContext &VMContext, StringRef Filename, + StringRef Directory) { + assert(!Filename.empty() && "Unable to create file without name"); + Value *Pair[] = { + MDString::get(VMContext, Filename), + MDString::get(VMContext, Directory), + }; + return MDNode::get(VMContext, Pair); +} + /// createCompileUnit - A CompileUnit provides an anchor for all debugging /// information generated during this instance of compilation. void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename, @@ -91,11 +104,12 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename, TempGVs = MDNode::getTemporary(VMContext, TElts); + TempImportedModules = MDNode::getTemporary(VMContext, TElts); + Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_compile_unit), - Constant::getNullValue(Type::getInt32Ty(VMContext)), + createFilePathPair(VMContext, Filename, Directory), ConstantInt::get(Type::getInt32Ty(VMContext), Lang), - createFile(Filename, Directory), MDString::get(VMContext, Producer), ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized), MDString::get(VMContext, Flags), @@ -104,6 +118,7 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename, TempRetainTypes, TempSubprograms, TempGVs, + TempImportedModules, MDString::get(VMContext, SplitName) }; TheCU = DICompileUnit(MDNode::get(VMContext, Elts)); @@ -113,17 +128,27 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename, NMD->addOperand(TheCU); } +DIImportedModule DIBuilder::createImportedModule(DIScope Context, + DINameSpace NS, + unsigned Line) { + Value *Elts[] = { + GetTagConstant(VMContext, dwarf::DW_TAG_imported_module), + Context, + NS, + ConstantInt::get(Type::getInt32Ty(VMContext), Line), + }; + DIImportedModule M(MDNode::get(VMContext, Elts)); + assert(M.Verify() && "Imported module should be valid"); + AllImportedModules.push_back(M); + return M; +} + /// createFile - Create a file descriptor to hold debugging information /// for a file. DIFile DIBuilder::createFile(StringRef Filename, StringRef Directory) { - assert(!Filename.empty() && "Unable to create file without name"); - Value *Pair[] = { - MDString::get(VMContext, Filename), - MDString::get(VMContext, Directory), - }; Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_file_type), - MDNode::get(VMContext, Pair) + createFilePathPair(VMContext, Filename, Directory) }; return DIFile(MDNode::get(VMContext, Elts)); } @@ -146,9 +171,9 @@ DIType DIBuilder::createNullPtrType(StringRef Name) { // ,size, alignment, offset and flags are always empty here. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_unspecified_type), + NULL, // Filename NULL, //TheCU, MDString::get(VMContext, Name), - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -169,9 +194,9 @@ DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits, // offset and flags are always empty here. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_base_type), + NULL, // File/directory name NULL, //TheCU, MDString::get(VMContext, Name), - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -188,9 +213,9 @@ DIDerivedType DIBuilder::createQualifiedType(unsigned Tag, DIType FromTy) { // Qualified types are encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, Tag), + NULL, // Filename NULL, //TheCU, MDString::get(VMContext, StringRef()), // Empty name. - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -208,9 +233,9 @@ DIBuilder::createPointerType(DIType PointeeTy, uint64_t SizeInBits, // Pointer types are encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_pointer_type), + NULL, // Filename NULL, //TheCU, MDString::get(VMContext, Name), - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -221,13 +246,14 @@ DIBuilder::createPointerType(DIType PointeeTy, uint64_t SizeInBits, return DIDerivedType(MDNode::get(VMContext, Elts)); } -DIDerivedType DIBuilder::createMemberPointerType(DIType PointeeTy, DIType Base) { +DIDerivedType DIBuilder::createMemberPointerType(DIType PointeeTy, + DIType Base) { // Pointer types are encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_ptr_to_member_type), + NULL, // Filename NULL, //TheCU, NULL, - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), ConstantInt::get(Type::getInt64Ty(VMContext), 0), @@ -246,9 +272,9 @@ DIDerivedType DIBuilder::createReferenceType(unsigned Tag, DIType RTy) { // References are encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, Tag), + NULL, // Filename NULL, // TheCU, NULL, // Name - NULL, // Filename ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -266,9 +292,9 @@ DIDerivedType DIBuilder::createTypedef(DIType Ty, StringRef Name, DIFile File, assert(Ty.Verify() && "Invalid typedef type!"); Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_typedef), + File.getFileNode(), getNonCompileUnitScope(Context), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -286,9 +312,9 @@ DIType DIBuilder::createFriend(DIType Ty, DIType FriendTy) { assert(FriendTy.Verify() && "Invalid friend type!"); Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_friend), + NULL, Ty, NULL, // Name - Ty.getFile(), ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -307,9 +333,9 @@ DIDerivedType DIBuilder::createInheritance( // TAG_inheritance is encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_inheritance), + NULL, Ty, NULL, // Name - Ty.getFile(), ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align @@ -328,9 +354,9 @@ DIDerivedType DIBuilder::createMemberType( // TAG_member is encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_member), + File.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -351,9 +377,9 @@ DIType DIBuilder::createStaticMemberType(DIDescriptor Scope, StringRef Name, Flags |= DIDescriptor::FlagStaticMember; Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_member), + File.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), 0/*SizeInBits*/), ConstantInt::get(Type::getInt64Ty(VMContext), 0/*AlignInBits*/), @@ -377,9 +403,9 @@ DIType DIBuilder::createObjCIVar(StringRef Name, // TAG_member is encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_member), + File.getFileNode(), getNonCompileUnitScope(File), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -404,9 +430,9 @@ DIType DIBuilder::createObjCIVar(StringRef Name, // TAG_member is encoded in DIDerivedType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_member), + File.getFileNode(), getNonCompileUnitScope(File), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -423,7 +449,7 @@ DIType DIBuilder::createObjCIVar(StringRef Name, DIObjCProperty DIBuilder::createObjCProperty(StringRef Name, DIFile File, unsigned LineNumber, StringRef GetterName, - StringRef SetterName, + StringRef SetterName, unsigned PropertyAttributes, DIType Ty) { Value *Elts[] = { @@ -478,21 +504,23 @@ DIBuilder::createTemplateValueParameter(DIDescriptor Context, StringRef Name, } /// createClassType - Create debugging information entry for a class. -DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name, - DIFile File, unsigned LineNumber, - uint64_t SizeInBits, uint64_t AlignInBits, - uint64_t OffsetInBits, unsigned Flags, - DIType DerivedFrom, DIArray Elements, - MDNode *VTableHolder, - MDNode *TemplateParams) { +DICompositeType DIBuilder::createClassType(DIDescriptor Context, StringRef Name, + DIFile File, unsigned LineNumber, + uint64_t SizeInBits, + uint64_t AlignInBits, + uint64_t OffsetInBits, + unsigned Flags, DIType DerivedFrom, + DIArray Elements, + MDNode *VTableHolder, + MDNode *TemplateParams) { assert((!Context || Context.Verify()) && "createClassType should be called with a valid Context"); // TAG_class_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_class_type), + File.getFileNode(), getNonCompileUnitScope(Context), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -504,7 +532,7 @@ DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name, VTableHolder, TemplateParams }; - DIType R(MDNode::get(VMContext, Elts)); + DICompositeType R(MDNode::get(VMContext, Elts)); assert(R.Verify() && "createClassType should return a verifiable DIType"); return R; } @@ -522,9 +550,9 @@ DICompositeType DIBuilder::createStructType(DIDescriptor Context, // TAG_structure_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_structure_type), + File.getFileNode(), getNonCompileUnitScope(Context), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -542,16 +570,18 @@ DICompositeType DIBuilder::createStructType(DIDescriptor Context, } /// createUnionType - Create debugging information entry for an union. -DICompositeType DIBuilder::createUnionType( - DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, - uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags, DIArray Elements, - unsigned RunTimeLang) { +DICompositeType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name, + DIFile File, unsigned LineNumber, + uint64_t SizeInBits, + uint64_t AlignInBits, unsigned Flags, + DIArray Elements, + unsigned RunTimeLang) { // TAG_union_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_union_type), + File.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -560,7 +590,8 @@ DICompositeType DIBuilder::createUnionType( NULL, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang), - Constant::getNullValue(Type::getInt32Ty(VMContext)) + Constant::getNullValue(Type::getInt32Ty(VMContext)), + NULL }; return DICompositeType(MDNode::get(VMContext, Elts)); } @@ -572,8 +603,8 @@ DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) { Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_subroutine_type), Constant::getNullValue(Type::getInt32Ty(VMContext)), - MDString::get(VMContext, ""), Constant::getNullValue(Type::getInt32Ty(VMContext)), + MDString::get(VMContext, ""), ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt64Ty(VMContext), 0), ConstantInt::get(Type::getInt64Ty(VMContext), 0), @@ -592,19 +623,19 @@ DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) { DICompositeType DIBuilder::createEnumerationType( DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, DIArray Elements, - DIType ClassType) { + DIType UnderlyingType) { // TAG_enumeration_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type), + File.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt32Ty(VMContext), 0), - ClassType, + UnderlyingType, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), 0), Constant::getNullValue(Type::getInt32Ty(VMContext)) @@ -620,9 +651,9 @@ DICompositeType DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits, // TAG_array_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_array_type), + NULL, // Filename/Directory, NULL, //TheCU, MDString::get(VMContext, ""), - NULL, //TheCU, ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt64Ty(VMContext), Size), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -643,9 +674,9 @@ DIType DIBuilder::createVectorType(uint64_t Size, uint64_t AlignInBits, // A vector is an array type with the FlagVector flag applied. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_array_type), + NULL, // Filename/Directory, NULL, //TheCU, MDString::get(VMContext, ""), - NULL, //TheCU, ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt64Ty(VMContext), Size), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -723,29 +754,6 @@ DIDescriptor DIBuilder::createUnspecifiedParameter() { return DIDescriptor(MDNode::get(VMContext, Elts)); } -/// createTemporaryType - Create a temporary forward-declared type. -DIType DIBuilder::createTemporaryType() { - // Give the temporary MDNode a tag. It doesn't matter what tag we - // use here as long as DIType accepts it. - Value *Elts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; - MDNode *Node = MDNode::getTemporary(VMContext, Elts); - return DIType(Node); -} - -/// createTemporaryType - Create a temporary forward-declared type. -DIType DIBuilder::createTemporaryType(DIFile F) { - // Give the temporary MDNode a tag. It doesn't matter what tag we - // use here as long as DIType accepts it. - Value *Elts[] = { - GetTagConstant(VMContext, DW_TAG_base_type), - TheCU, - NULL, - F - }; - MDNode *Node = MDNode::getTemporary(VMContext, Elts); - return DIType(Node); -} - /// createForwardDecl - Create a temporary forward-declared type that /// can be RAUW'd if the full type is seen. DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, @@ -756,9 +764,9 @@ DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, // Create a temporary MDNode. Value *Elts[] = { GetTagConstant(VMContext, Tag), + F.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - F, ConstantInt::get(Type::getInt32Ty(VMContext), Line), ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), @@ -796,17 +804,18 @@ DISubrange DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Count) { return DISubrange(MDNode::get(VMContext, Elts)); } -/// createGlobalVariable - Create a new descriptor for the specified global. +/// \brief Create a new descriptor for the specified global. DIGlobalVariable DIBuilder:: -createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber, - DIType Ty, bool isLocalToUnit, Value *Val) { +createGlobalVariable(StringRef Name, StringRef LinkageName, DIFile F, + unsigned LineNumber, DIType Ty, bool isLocalToUnit, + Value *Val) { Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_variable), Constant::getNullValue(Type::getInt32Ty(VMContext)), NULL, // TheCU, MDString::get(VMContext, Name), MDString::get(VMContext, Name), - MDString::get(VMContext, Name), + MDString::get(VMContext, LinkageName), F, ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), Ty, @@ -820,6 +829,14 @@ createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber, return DIGlobalVariable(Node); } +/// \brief Create a new descriptor for the specified global. +DIGlobalVariable DIBuilder:: +createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber, + DIType Ty, bool isLocalToUnit, Value *Val) { + return createGlobalVariable(Name, Name, F, LineNumber, Ty, isLocalToUnit, + Val); +} + /// createStaticVariable - Create a new descriptor for the specified static /// variable. DIGlobalVariable DIBuilder:: @@ -918,12 +935,11 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context, Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_subprogram), - Constant::getNullValue(Type::getInt32Ty(VMContext)), + File.getFileNode(), getNonCompileUnitScope(Context), MDString::get(VMContext, Name), MDString::get(VMContext, Name), MDString::get(VMContext, LinkageName), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty, ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit), @@ -944,7 +960,9 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context, // Create a named metadata so that we do not lose this mdnode. if (isDefinition) AllSubprograms.push_back(Node); - return DISubprogram(Node); + DISubprogram S(Node); + assert(S.Verify() && "createFunction should return a valid DISubprogram"); + return S; } /// createMethod - Create a new descriptor for the specified C++ method. @@ -964,12 +982,11 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context, Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) }; Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_subprogram), - Constant::getNullValue(Type::getInt32Ty(VMContext)), + F.getFileNode(), getNonCompileUnitScope(Context), MDString::get(VMContext, Name), MDString::get(VMContext, Name), MDString::get(VMContext, LinkageName), - F, ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty, ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit), @@ -989,7 +1006,9 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context, MDNode *Node = MDNode::get(VMContext, Elts); if (isDefinition) AllSubprograms.push_back(Node); - return DISubprogram(Node); + DISubprogram S(Node); + assert(S.Verify() && "createMethod should return a valid DISubprogram"); + return S; } /// createNameSpace - This creates new descriptor for a namespace @@ -998,9 +1017,9 @@ DINameSpace DIBuilder::createNameSpace(DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNo) { Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_namespace), + File.getFileNode(), getNonCompileUnitScope(Scope), MDString::get(VMContext, Name), - File, ConstantInt::get(Type::getInt32Ty(VMContext), LineNo) }; DINameSpace R(MDNode::get(VMContext, Elts)); @@ -1015,8 +1034,8 @@ DILexicalBlockFile DIBuilder::createLexicalBlockFile(DIDescriptor Scope, DIFile File) { Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block), - Scope, - File + File.getFileNode(), + Scope }; DILexicalBlockFile R(MDNode::get(VMContext, Elts)); assert( @@ -1031,10 +1050,10 @@ DILexicalBlock DIBuilder::createLexicalBlock(DIDescriptor Scope, DIFile File, static unsigned int unique_id = 0; Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block), + File.getFileNode(), getNonCompileUnitScope(Scope), ConstantInt::get(Type::getInt32Ty(VMContext), Line), ConstantInt::get(Type::getInt32Ty(VMContext), Col), - File, ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++) }; DILexicalBlock R(MDNode::get(VMContext, Elts)); diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp index 4100c4f..5658f561 100644 --- a/lib/IR/DataLayout.cpp +++ b/lib/IR/DataLayout.cpp @@ -41,7 +41,7 @@ char DataLayout::ID = 0; // Support for StructLayout //===----------------------------------------------------------------------===// -StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { +StructLayout::StructLayout(StructType *ST, const DataLayout &DL) { assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); StructAlignment = 0; StructSize = 0; @@ -50,7 +50,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { Type *Ty = ST->getElementType(i); - unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); + unsigned TyAlign = ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty); // Add padding if necessary to align the data element properly. if ((StructSize & (TyAlign-1)) != 0) @@ -60,7 +60,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { StructAlignment = std::max(TyAlign, StructAlignment); MemberOffsets[i] = StructSize; - StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item + StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item } // Empty structures have alignment of 1 byte. @@ -510,47 +510,6 @@ std::string DataLayout::getStringRepresentation() const { } -uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const { - assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); - switch (Ty->getTypeID()) { - case Type::LabelTyID: - return getPointerSizeInBits(0); - case Type::PointerTyID: { - unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace(); - return getPointerSizeInBits(AS); - } - case Type::ArrayTyID: { - ArrayType *ATy = cast<ArrayType>(Ty); - return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); - } - case Type::StructTyID: - // Get the layout annotation... which is lazily created on demand. - return getStructLayout(cast<StructType>(Ty))->getSizeInBits(); - case Type::IntegerTyID: - return cast<IntegerType>(Ty)->getBitWidth(); - case Type::HalfTyID: - return 16; - case Type::FloatTyID: - return 32; - case Type::DoubleTyID: - case Type::X86_MMXTyID: - return 64; - case Type::PPC_FP128TyID: - case Type::FP128TyID: - return 128; - // In memory objects this is always aligned to a higher boundary, but - // only 80 bits contain information. - case Type::X86_FP80TyID: - return 80; - case Type::VectorTyID: { - VectorType *VTy = cast<VectorType>(Ty); - return VTy->getNumElements()*getTypeSizeInBits(VTy->getElementType()); - } - default: - llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type"); - } -} - /*! \param abi_or_pref Flag that determines which alignment is returned. true returns the ABI alignment, false returns the preferred alignment. @@ -662,6 +621,13 @@ Type *DataLayout::getIntPtrType(Type *Ty) const { return IntTy; } +Type *DataLayout::getSmallestLegalIntType(LLVMContext &C, unsigned Width) const { + for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i) + if (Width <= LegalIntWidths[i]) + return Type::getIntNTy(C, LegalIntWidths[i]); + return 0; +} + uint64_t DataLayout::getIndexedOffset(Type *ptrTy, ArrayRef<Value *> Indices) const { Type *Ty = ptrTy; diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp index 1a5454e..ec83dca 100644 --- a/lib/IR/DebugInfo.cpp +++ b/lib/IR/DebugInfo.cpp @@ -25,6 +25,7 @@ #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" +#include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace llvm::dwarf; @@ -63,7 +64,8 @@ bool DIDescriptor::Verify() const { DISubrange(DbgNode).Verify() || DIEnumerator(DbgNode).Verify() || DIObjCProperty(DbgNode).Verify() || DITemplateTypeParameter(DbgNode).Verify() || - DITemplateValueParameter(DbgNode).Verify()); + DITemplateValueParameter(DbgNode).Verify() || + DIImportedModule(DbgNode).Verify()); } static Value *getField(const MDNode *DbgNode, unsigned Elt) { @@ -331,10 +333,16 @@ bool DIDescriptor::isEnumerator() const { return DbgNode && getTag() == dwarf::DW_TAG_enumerator; } -/// isObjCProperty - Return true if the specified tag is DW_TAG +/// isObjCProperty - Return true if the specified tag is DW_TAG_APPLE_property. bool DIDescriptor::isObjCProperty() const { return DbgNode && getTag() == dwarf::DW_TAG_APPLE_property; } + +/// \brief Return true if the specified tag is DW_TAG_imported_module. +bool DIDescriptor::isImportedModule() const { + return DbgNode && getTag() == dwarf::DW_TAG_imported_module; +} + //===----------------------------------------------------------------------===// // Simple Descriptor Constructors and other Methods //===----------------------------------------------------------------------===// @@ -485,7 +493,7 @@ bool DISubprogram::Verify() const { DICompositeType Ty = getType(); if (!Ty.Verify()) return false; - return DbgNode->getNumOperands() == 21; + return DbgNode->getNumOperands() == 20; } /// Verify - Verify that a global variable descriptor is well formed. @@ -539,6 +547,11 @@ bool DINameSpace::Verify() const { return DbgNode->getNumOperands() == 5; } +/// \brief Retrieve the MDNode for the directory/file pair. +MDNode *DIFile::getFileNode() const { + return const_cast<MDNode*>(getNodeField(DbgNode, 1)); +} + /// \brief Verify that the file descriptor is well formed. bool DIFile::Verify() const { return isFile() && DbgNode->getNumOperands() == 2; @@ -574,6 +587,11 @@ bool DITemplateValueParameter::Verify() const { return isTemplateValueParameter() && DbgNode->getNumOperands() == 8; } +/// \brief Verify that the imported module descriptor is well formed. +bool DIImportedModule::Verify() const { + return isImportedModule() && DbgNode->getNumOperands() == 4; +} + /// getOriginalTypeSize - If this type is derived from a base type then /// return base type size. uint64_t DIDerivedType::getOriginalTypeSize() const { @@ -610,6 +628,25 @@ MDNode *DIDerivedType::getObjCProperty() const { return dyn_cast_or_null<MDNode>(DbgNode->getOperand(10)); } +/// \brief Set the array of member DITypes. +void DICompositeType::setTypeArray(DIArray Elements, DIArray TParams) { + assert((!TParams || DbgNode->getNumOperands() == 14) && + "If you're setting the template parameters this should include a slot " + "for that!"); + TrackingVH<MDNode> N(*this); + N->replaceOperandWith(10, Elements); + if (TParams) + N->replaceOperandWith(13, TParams); + DbgNode = N; +} + +/// \brief Set the containing type. +void DICompositeType::setContainingType(DICompositeType ContainingType) { + TrackingVH<MDNode> N(*this); + N->replaceOperandWith(12, ContainingType); + DbgNode = N; +} + /// isInlinedFnArgument - Return true if this variable provides debugging /// information for an inlined function arguments. bool DIVariable::isInlinedFnArgument(const Function *CurFn) { @@ -637,21 +674,21 @@ bool DISubprogram::describes(const Function *F) { unsigned DISubprogram::isOptimized() const { assert (DbgNode && "Invalid subprogram descriptor!"); - if (DbgNode->getNumOperands() == 16) - return getUnsignedField(15); + if (DbgNode->getNumOperands() == 15) + return getUnsignedField(14); return 0; } MDNode *DISubprogram::getVariablesNodes() const { - if (!DbgNode || DbgNode->getNumOperands() <= 19) + if (!DbgNode || DbgNode->getNumOperands() <= 18) return NULL; - return dyn_cast_or_null<MDNode>(DbgNode->getOperand(19)); + return dyn_cast_or_null<MDNode>(DbgNode->getOperand(18)); } DIArray DISubprogram::getVariables() const { - if (!DbgNode || DbgNode->getNumOperands() <= 19) + if (!DbgNode || DbgNode->getNumOperands() <= 18) return DIArray(); - if (MDNode *T = dyn_cast_or_null<MDNode>(DbgNode->getOperand(19))) + if (MDNode *T = dyn_cast_or_null<MDNode>(DbgNode->getOperand(18))) return DIArray(T); return DIArray(); } @@ -659,36 +696,12 @@ DIArray DISubprogram::getVariables() const { StringRef DIScope::getFilename() const { if (!DbgNode) return StringRef(); - if (isLexicalBlockFile()) - return DILexicalBlockFile(DbgNode).getFilename(); - if (isLexicalBlock()) - return DILexicalBlock(DbgNode).getFilename(); - if (isSubprogram()) - return DISubprogram(DbgNode).getFilename(); - if (isCompileUnit()) - return DICompileUnit(DbgNode).getFilename(); - if (isNameSpace()) - return DINameSpace(DbgNode).getFilename(); - if (isType()) - return DIType(DbgNode).getFilename(); return ::getStringField(getNodeField(DbgNode, 1), 0); } StringRef DIScope::getDirectory() const { if (!DbgNode) return StringRef(); - if (isLexicalBlockFile()) - return DILexicalBlockFile(DbgNode).getDirectory(); - if (isLexicalBlock()) - return DILexicalBlock(DbgNode).getDirectory(); - if (isSubprogram()) - return DISubprogram(DbgNode).getDirectory(); - if (isCompileUnit()) - return DICompileUnit(DbgNode).getDirectory(); - if (isNameSpace()) - return DINameSpace(DbgNode).getDirectory(); - if (isType()) - return DIType(DbgNode).getDirectory(); return ::getStringField(getNodeField(DbgNode, 1), 1); } @@ -696,7 +709,7 @@ DIArray DICompileUnit::getEnumTypes() const { if (!DbgNode || DbgNode->getNumOperands() < 13) return DIArray(); - if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(8))) + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(7))) return DIArray(N); return DIArray(); } @@ -705,7 +718,7 @@ DIArray DICompileUnit::getRetainedTypes() const { if (!DbgNode || DbgNode->getNumOperands() < 13) return DIArray(); - if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(9))) + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(8))) return DIArray(N); return DIArray(); } @@ -714,7 +727,7 @@ DIArray DICompileUnit::getSubprograms() const { if (!DbgNode || DbgNode->getNumOperands() < 13) return DIArray(); - if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(10))) + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(9))) return DIArray(N); return DIArray(); } @@ -724,6 +737,15 @@ DIArray DICompileUnit::getGlobalVariables() const { if (!DbgNode || DbgNode->getNumOperands() < 13) return DIArray(); + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(10))) + return DIArray(N); + return DIArray(); +} + +DIArray DICompileUnit::getImportedModules() const { + if (!DbgNode || DbgNode->getNumOperands() < 13) + return DIArray(); + if (MDNode *N = dyn_cast_or_null<MDNode>(DbgNode->getOperand(11))) return DIArray(N); return DIArray(); @@ -1032,6 +1054,8 @@ void DIDescriptor::print(raw_ostream &OS) const { DIVariable(DbgNode).printInternal(OS); } else if (this->isObjCProperty()) { DIObjCProperty(DbgNode).printInternal(OS); + } else if (this->isNameSpace()) { + DINameSpace(DbgNode).printInternal(OS); } else if (this->isScope()) { DIScope(DbgNode).printInternal(OS); } @@ -1051,8 +1075,13 @@ void DIScope::printInternal(raw_ostream &OS) const { void DICompileUnit::printInternal(raw_ostream &OS) const { DIScope::printInternal(OS); - if (const char *Lang = dwarf::LanguageString(getLanguage())) - OS << " [" << Lang << ']'; + OS << " ["; + unsigned Lang = getLanguage(); + if (const char *LangStr = dwarf::LanguageString(Lang)) + OS << LangStr; + else + (OS << "lang 0x").write_hex(Lang); + OS << ']'; } void DIEnumerator::printInternal(raw_ostream &OS) const { @@ -1105,6 +1134,14 @@ void DICompositeType::printInternal(raw_ostream &OS) const { OS << " [" << A.getNumElements() << " elements]"; } +void DINameSpace::printInternal(raw_ostream &OS) const { + StringRef Name = getName(); + if (!Name.empty()) + OS << " [" << Name << ']'; + + OS << " [line " << getLineNumber() << ']'; +} + void DISubprogram::printInternal(raw_ostream &OS) const { // TODO : Print context OS << " [line " << getLineNumber() << ']'; diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp index 5559a6c..7f7efabf 100644 --- a/lib/IR/Function.cpp +++ b/lib/IR/Function.cpp @@ -124,6 +124,13 @@ bool Argument::hasStructRetAttr() const { hasAttribute(1, Attribute::StructRet); } +/// hasReturnedAttr - Return true if this argument has the returned attribute on +/// it in its containing function. +bool Argument::hasReturnedAttr() const { + return getParent()->getAttributes(). + hasAttribute(getArgNo()+1, Attribute::Returned); +} + /// addAttr - Add attributes to an argument. void Argument::addAttr(AttributeSet AS) { assert(AS.getNumSlots() <= 1 && @@ -211,7 +218,7 @@ Function::~Function() { clearGC(); // Remove the intrinsicID from the Cache. - if(getValueName() && isIntrinsic()) + if (getValueName() && isIntrinsic()) getContext().pImpl->IntrinsicIDCache.erase(this); } @@ -352,7 +359,7 @@ unsigned Function::getIntrinsicID() const { LLVMContextImpl::IntrinsicIDCacheTy &IntrinsicIDCache = getContext().pImpl->IntrinsicIDCache; - if(!IntrinsicIDCache.count(this)) { + if (!IntrinsicIDCache.count(this)) { unsigned Id = lookupIntrinsicID(); IntrinsicIDCache[this]=Id; return Id; diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp index 2e3a525..d58877e 100644 --- a/lib/IR/Instructions.cpp +++ b/lib/IR/Instructions.cpp @@ -3000,8 +3000,8 @@ ICmpInst::makeConstantRange(Predicate pred, const APInt &C) { uint32_t BitWidth = C.getBitWidth(); switch (pred) { default: llvm_unreachable("Invalid ICmp opcode to ConstantRange ctor!"); - case ICmpInst::ICMP_EQ: Upper++; break; - case ICmpInst::ICMP_NE: Lower++; break; + case ICmpInst::ICMP_EQ: ++Upper; break; + case ICmpInst::ICMP_NE: ++Lower; break; case ICmpInst::ICMP_ULT: Lower = APInt::getMinValue(BitWidth); // Check for an empty-set condition. @@ -3015,25 +3015,25 @@ ICmpInst::makeConstantRange(Predicate pred, const APInt &C) { return ConstantRange(BitWidth, /*isFullSet=*/false); break; case ICmpInst::ICMP_UGT: - Lower++; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) + ++Lower; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) // Check for an empty-set condition. if (Lower == Upper) return ConstantRange(BitWidth, /*isFullSet=*/false); break; case ICmpInst::ICMP_SGT: - Lower++; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) + ++Lower; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) // Check for an empty-set condition. if (Lower == Upper) return ConstantRange(BitWidth, /*isFullSet=*/false); break; case ICmpInst::ICMP_ULE: - Lower = APInt::getMinValue(BitWidth); Upper++; + Lower = APInt::getMinValue(BitWidth); ++Upper; // Check for a full-set condition. if (Lower == Upper) return ConstantRange(BitWidth, /*isFullSet=*/true); break; case ICmpInst::ICMP_SLE: - Lower = APInt::getSignedMinValue(BitWidth); Upper++; + Lower = APInt::getSignedMinValue(BitWidth); ++Upper; // Check for a full-set condition. if (Lower == Upper) return ConstantRange(BitWidth, /*isFullSet=*/true); diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp index 0228aeb..6a6b7af5 100644 --- a/lib/IR/Metadata.cpp +++ b/lib/IR/Metadata.cpp @@ -403,42 +403,6 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) { } } -MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) { - if (!A || !B) - return NULL; - - if (A == B) - return A; - - SmallVector<MDNode *, 4> PathA; - MDNode *T = A; - while (T) { - PathA.push_back(T); - T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; - } - - SmallVector<MDNode *, 4> PathB; - T = B; - while (T) { - PathB.push_back(T); - T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0; - } - - int IA = PathA.size() - 1; - int IB = PathB.size() - 1; - - MDNode *Ret = 0; - while (IA >= 0 && IB >=0) { - if (PathA[IA] == PathB[IB]) - Ret = PathA[IA]; - else - break; - --IA; - --IB; - } - return Ret; -} - MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) { if (!A || !B) return NULL; diff --git a/lib/IR/PassManager.cpp b/lib/IR/PassManager.cpp index 5a8df70..3c968aa 100644 --- a/lib/IR/PassManager.cpp +++ b/lib/IR/PassManager.cpp @@ -1739,10 +1739,8 @@ bool PassManager::run(Module &M) { } //===----------------------------------------------------------------------===// -// TimingInfo Class - This class is used to calculate information about the -// amount of time each pass takes to execute. This only happens with -// -time-passes is enabled on the command line. -// +// TimingInfo implementation + bool llvm::TimePassesIsEnabled = false; static cl::opt<bool,true> EnableTiming("time-passes", cl::location(TimePassesIsEnabled), diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp index adc702e..e9eb012 100644 --- a/lib/IR/Value.cpp +++ b/lib/IR/Value.cpp @@ -118,7 +118,7 @@ bool Value::isUsedInBasicBlock(const BasicBlock *BB) const { for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { if (std::find(I->op_begin(), I->op_end(), this) != I->op_end()) return true; - if (MaxBlockSize-- == 0) // If the block is larger fall back to use_iterator + if (--MaxBlockSize == 0) // If the block is larger fall back to use_iterator break; } diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index 8bfbb32..d106173 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -301,9 +301,12 @@ namespace { bool VerifyIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos, SmallVectorImpl<Type*> &ArgTys); - void VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, + bool VerifyAttributeCount(AttributeSet Attrs, unsigned Params); + void VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx, + bool isFunction, const Value *V); + void VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, bool isReturnValue, const Value *V); - void VerifyFunctionAttrs(FunctionType *FT, const AttributeSet &Attrs, + void VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, const Value *V); void WriteValue(const Value *V) { @@ -446,6 +449,30 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) { } } + if (GV.hasName() && (GV.getName() == "llvm.used" || + GV.getName() == "llvm.compiler_used")) { + Assert1(!GV.hasInitializer() || GV.hasAppendingLinkage(), + "invalid linkage for intrinsic global variable", &GV); + Type *GVType = GV.getType()->getElementType(); + if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) { + PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType()); + Assert1(PTy, "wrong type for intrinsic global variable", &GV); + if (GV.hasInitializer()) { + Constant *Init = GV.getInitializer(); + ConstantArray *InitArray = dyn_cast<ConstantArray>(Init); + Assert1(InitArray, "wrong initalizer for intrinsic global variable", + Init); + for (unsigned i = 0, e = InitArray->getNumOperands(); i != e; ++i) { + Value *V = Init->getOperand(i)->stripPointerCasts(); + // stripPointerCasts strips aliases, so we only need to check for + // variables and functions. + Assert1(isa<GlobalVariable>(V) || isa<Function>(V), + "invalid llvm.used member", V); + } + } + } + } + visitGlobalValue(GV); } @@ -626,44 +653,74 @@ void Verifier::visitModuleFlag(MDNode *Op, DenseMap<MDString*, MDNode*>&SeenIDs, } } +void Verifier::VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx, + bool isFunction, const Value* V) { + unsigned Slot = ~0U; + for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I) + if (Attrs.getSlotIndex(I) == Idx) { + Slot = I; + break; + } + + assert(Slot != ~0U && "Attribute set inconsistency!"); + + for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot); + I != E; ++I) { + if (I->isStringAttribute()) + continue; + + if (I->getKindAsEnum() == Attribute::NoReturn || + I->getKindAsEnum() == Attribute::NoUnwind || + I->getKindAsEnum() == Attribute::ReadNone || + I->getKindAsEnum() == Attribute::ReadOnly || + I->getKindAsEnum() == Attribute::NoInline || + I->getKindAsEnum() == Attribute::AlwaysInline || + I->getKindAsEnum() == Attribute::OptimizeForSize || + I->getKindAsEnum() == Attribute::StackProtect || + I->getKindAsEnum() == Attribute::StackProtectReq || + I->getKindAsEnum() == Attribute::StackProtectStrong || + I->getKindAsEnum() == Attribute::NoRedZone || + I->getKindAsEnum() == Attribute::NoImplicitFloat || + I->getKindAsEnum() == Attribute::Naked || + I->getKindAsEnum() == Attribute::InlineHint || + I->getKindAsEnum() == Attribute::StackAlignment || + I->getKindAsEnum() == Attribute::UWTable || + I->getKindAsEnum() == Attribute::NonLazyBind || + I->getKindAsEnum() == Attribute::ReturnsTwice || + I->getKindAsEnum() == Attribute::SanitizeAddress || + I->getKindAsEnum() == Attribute::SanitizeThread || + I->getKindAsEnum() == Attribute::SanitizeMemory || + I->getKindAsEnum() == Attribute::MinSize || + I->getKindAsEnum() == Attribute::NoDuplicate || + I->getKindAsEnum() == Attribute::NoBuiltin) { + if (!isFunction) + CheckFailed("Attribute '" + I->getKindAsString() + + "' only applies to functions!", V); + return; + } else if (isFunction) { + CheckFailed("Attribute '" + I->getKindAsString() + + "' does not apply to functions!", V); + return; + } + } +} + // VerifyParameterAttrs - Check the given attributes for an argument or return // value of the specified type. The value V is printed in error messages. -void Verifier::VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, +void Verifier::VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, bool isReturnValue, const Value *V) { if (!Attrs.hasAttributes(Idx)) return; - Assert1(!Attrs.hasAttribute(Idx, Attribute::NoReturn) && - !Attrs.hasAttribute(Idx, Attribute::NoUnwind) && - !Attrs.hasAttribute(Idx, Attribute::ReadNone) && - !Attrs.hasAttribute(Idx, Attribute::ReadOnly) && - !Attrs.hasAttribute(Idx, Attribute::NoInline) && - !Attrs.hasAttribute(Idx, Attribute::AlwaysInline) && - !Attrs.hasAttribute(Idx, Attribute::OptimizeForSize) && - !Attrs.hasAttribute(Idx, Attribute::StackProtect) && - !Attrs.hasAttribute(Idx, Attribute::StackProtectReq) && - !Attrs.hasAttribute(Idx, Attribute::NoRedZone) && - !Attrs.hasAttribute(Idx, Attribute::NoImplicitFloat) && - !Attrs.hasAttribute(Idx, Attribute::Naked) && - !Attrs.hasAttribute(Idx, Attribute::InlineHint) && - !Attrs.hasAttribute(Idx, Attribute::StackAlignment) && - !Attrs.hasAttribute(Idx, Attribute::UWTable) && - !Attrs.hasAttribute(Idx, Attribute::NonLazyBind) && - !Attrs.hasAttribute(Idx, Attribute::ReturnsTwice) && - !Attrs.hasAttribute(Idx, Attribute::SanitizeAddress) && - !Attrs.hasAttribute(Idx, Attribute::SanitizeThread) && - !Attrs.hasAttribute(Idx, Attribute::SanitizeMemory) && - !Attrs.hasAttribute(Idx, Attribute::MinSize) && - !Attrs.hasAttribute(Idx, Attribute::NoBuiltin), - "Some attributes in '" + Attrs.getAsString(Idx) + - "' only apply to functions!", V); + VerifyAttributeTypes(Attrs, Idx, false, V); if (isReturnValue) Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal) && !Attrs.hasAttribute(Idx, Attribute::Nest) && !Attrs.hasAttribute(Idx, Attribute::StructRet) && - !Attrs.hasAttribute(Idx, Attribute::NoCapture), - "Attribute 'byval', 'nest', 'sret', and 'nocapture' " + !Attrs.hasAttribute(Idx, Attribute::NoCapture) && + !Attrs.hasAttribute(Idx, Attribute::Returned), + "Attribute 'byval', 'nest', 'sret', 'nocapture', and 'returned' " "do not apply to return values!", V); // Check for mutually incompatible attributes. @@ -683,6 +740,10 @@ void Verifier::VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, Attrs.hasAttribute(Idx, Attribute::InReg))), "Attributes " "'byval, nest, and inreg' are incompatible!", V); + Assert1(!(Attrs.hasAttribute(Idx, Attribute::StructRet) && + Attrs.hasAttribute(Idx, Attribute::Returned)), "Attributes " + "'sret and returned' are incompatible!", V); + Assert1(!(Attrs.hasAttribute(Idx, Attribute::ZExt) && Attrs.hasAttribute(Idx, Attribute::SExt)), "Attributes " "'zeroext and signext' are incompatible!", V); @@ -712,81 +773,51 @@ void Verifier::VerifyParameterAttrs(AttributeSet Attrs, uint64_t Idx, Type *Ty, // VerifyFunctionAttrs - Check parameter attributes against a function type. // The value V is printed in error messages. -void Verifier::VerifyFunctionAttrs(FunctionType *FT, - const AttributeSet &Attrs, +void Verifier::VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, const Value *V) { if (Attrs.isEmpty()) return; bool SawNest = false; + bool SawReturned = false; for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { - unsigned Index = Attrs.getSlotIndex(i); + unsigned Idx = Attrs.getSlotIndex(i); Type *Ty; - if (Index == 0) + if (Idx == 0) Ty = FT->getReturnType(); - else if (Index-1 < FT->getNumParams()) - Ty = FT->getParamType(Index-1); + else if (Idx-1 < FT->getNumParams()) + Ty = FT->getParamType(Idx-1); else break; // VarArgs attributes, verified elsewhere. - VerifyParameterAttrs(Attrs, Index, Ty, Index == 0, V); + VerifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V); - if (Attrs.hasAttribute(i, Attribute::Nest)) { + if (Idx == 0) + continue; + + if (Attrs.hasAttribute(Idx, Attribute::Nest)) { Assert1(!SawNest, "More than one parameter has attribute nest!", V); SawNest = true; } - if (Attrs.hasAttribute(Index, Attribute::StructRet)) - Assert1(Index == 1, "Attribute sret is not on first parameter!", V); + if (Attrs.hasAttribute(Idx, Attribute::Returned)) { + Assert1(!SawReturned, "More than one parameter has attribute returned!", + V); + Assert1(Ty->canLosslesslyBitCastTo(FT->getReturnType()), "Incompatible " + "argument and return types for 'returned' attribute", V); + SawReturned = true; + } + + if (Attrs.hasAttribute(Idx, Attribute::StructRet)) + Assert1(Idx == 1, "Attribute sret is not on first parameter!", V); } if (!Attrs.hasAttributes(AttributeSet::FunctionIndex)) return; - AttrBuilder NotFn(Attrs, AttributeSet::FunctionIndex); - NotFn.removeFunctionOnlyAttrs(); - Assert1(NotFn.empty(), "Attributes '" + - AttributeSet::get(V->getContext(), - AttributeSet::FunctionIndex, - NotFn).getAsString(AttributeSet::FunctionIndex) + - "' do not apply to the function!", V); - - // Check for mutually incompatible attributes. - Assert1(!((Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::ByVal) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::Nest)) || - (Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::ByVal) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::StructRet)) || - (Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::Nest) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::StructRet))), - "Attributes 'byval, nest, and sret' are incompatible!", V); - - Assert1(!((Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::ByVal) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::Nest)) || - (Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::ByVal) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::InReg)) || - (Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::Nest) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::InReg))), - "Attributes 'byval, nest, and inreg' are incompatible!", V); - - Assert1(!(Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::ZExt) && - Attrs.hasAttribute(AttributeSet::FunctionIndex, - Attribute::SExt)), - "Attributes 'zeroext and signext' are incompatible!", V); + VerifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V); Assert1(!(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && @@ -801,7 +832,7 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, "Attributes 'noinline and alwaysinline' are incompatible!", V); } -static bool VerifyAttributeCount(const AttributeSet &Attrs, unsigned Params) { +bool Verifier::VerifyAttributeCount(AttributeSet Attrs, unsigned Params) { if (Attrs.getNumSlots() == 0) return true; @@ -837,7 +868,7 @@ void Verifier::visitFunction(Function &F) { Assert1(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), "Invalid struct return type!", &F); - const AttributeSet &Attrs = F.getAttributes(); + AttributeSet Attrs = F.getAttributes(); Assert1(VerifyAttributeCount(Attrs, FT->getNumParams()), "Attribute after last parameter!", &F); @@ -1350,7 +1381,7 @@ void Verifier::VerifyCallSite(CallSite CS) { "Call parameter type does not match function signature!", CS.getArgument(i), FTy->getParamType(i), I); - const AttributeSet &Attrs = CS.getAttributes(); + AttributeSet Attrs = CS.getAttributes(); Assert1(VerifyAttributeCount(Attrs, CS.arg_size()), "Attribute after last parameter!", I); @@ -1358,15 +1389,41 @@ void Verifier::VerifyCallSite(CallSite CS) { // Verify call attributes. VerifyFunctionAttrs(FTy, Attrs, I); - if (FTy->isVarArg()) + if (FTy->isVarArg()) { + // FIXME? is 'nest' even legal here? + bool SawNest = false; + bool SawReturned = false; + + for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) { + if (Attrs.hasAttribute(Idx, Attribute::Nest)) + SawNest = true; + if (Attrs.hasAttribute(Idx, Attribute::Returned)) + SawReturned = true; + } + // Check attributes on the varargs part. for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) { - VerifyParameterAttrs(Attrs, Idx, CS.getArgument(Idx-1)->getType(), - false, I); + Type *Ty = CS.getArgument(Idx-1)->getType(); + VerifyParameterAttrs(Attrs, Idx, Ty, false, I); + + if (Attrs.hasAttribute(Idx, Attribute::Nest)) { + Assert1(!SawNest, "More than one parameter has attribute nest!", I); + SawNest = true; + } + + if (Attrs.hasAttribute(Idx, Attribute::Returned)) { + Assert1(!SawReturned, "More than one parameter has attribute returned!", + I); + Assert1(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), + "Incompatible argument and return types for 'returned' " + "attribute", I); + SawReturned = true; + } Assert1(!Attrs.hasAttribute(Idx, Attribute::StructRet), "Attribute 'sret' cannot be used for vararg call arguments!", I); } + } // Verify that there's no metadata unless it's a direct call to an intrinsic. if (CS.getCalledFunction() == 0 || diff --git a/lib/IRReader/CMakeLists.txt b/lib/IRReader/CMakeLists.txt new file mode 100644 index 0000000..cf10d8b --- /dev/null +++ b/lib/IRReader/CMakeLists.txt @@ -0,0 +1,3 @@ +add_llvm_library(LLVMIRReader + IRReader.cpp + ) diff --git a/lib/IRReader/IRReader.cpp b/lib/IRReader/IRReader.cpp new file mode 100644 index 0000000..eeec14e --- /dev/null +++ b/lib/IRReader/IRReader.cpp @@ -0,0 +1,89 @@ +//===---- IRReader.cpp - Reader for LLVM IR files -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IRReader/IRReader.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Assembly/Parser.h" +#include "llvm/Bitcode/ReaderWriter.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/system_error.h" +#include "llvm/Support/Timer.h" + +using namespace llvm; + +namespace llvm { + extern bool TimePassesIsEnabled; +} + +static const char *TimeIRParsingGroupName = "LLVM IR Parsing"; +static const char *TimeIRParsingName = "Parse IR"; + + +Module *llvm::getLazyIRModule(MemoryBuffer *Buffer, SMDiagnostic &Err, + LLVMContext &Context) { + if (isBitcode((const unsigned char *)Buffer->getBufferStart(), + (const unsigned char *)Buffer->getBufferEnd())) { + std::string ErrMsg; + Module *M = getLazyBitcodeModule(Buffer, Context, &ErrMsg); + if (M == 0) { + Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error, + ErrMsg); + // ParseBitcodeFile does not take ownership of the Buffer in the + // case of an error. + delete Buffer; + } + return M; + } + + return ParseAssembly(Buffer, 0, Err, Context); +} + +Module *llvm::getLazyIRFileModule(const std::string &Filename, SMDiagnostic &Err, + LLVMContext &Context) { + OwningPtr<MemoryBuffer> File; + if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) { + Err = SMDiagnostic(Filename, SourceMgr::DK_Error, + "Could not open input file: " + ec.message()); + return 0; + } + + return getLazyIRModule(File.take(), Err, Context); +} + +Module *llvm::ParseIR(MemoryBuffer *Buffer, SMDiagnostic &Err, + LLVMContext &Context) { + NamedRegionTimer T(TimeIRParsingName, TimeIRParsingGroupName, + TimePassesIsEnabled); + if (isBitcode((const unsigned char *)Buffer->getBufferStart(), + (const unsigned char *)Buffer->getBufferEnd())) { + std::string ErrMsg; + Module *M = ParseBitcodeFile(Buffer, Context, &ErrMsg); + if (M == 0) + Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error, + ErrMsg); + // ParseBitcodeFile does not take ownership of the Buffer. + delete Buffer; + return M; + } + + return ParseAssembly(Buffer, 0, Err, Context); +} + +Module *llvm::ParseIRFile(const std::string &Filename, SMDiagnostic &Err, + LLVMContext &Context) { + OwningPtr<MemoryBuffer> File; + if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) { + Err = SMDiagnostic(Filename, SourceMgr::DK_Error, + "Could not open input file: " + ec.message()); + return 0; + } + + return ParseIR(File.take(), Err, Context); +} diff --git a/lib/IRReader/LLVMBuild.txt b/lib/IRReader/LLVMBuild.txt new file mode 100644 index 0000000..b7bc74d --- /dev/null +++ b/lib/IRReader/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/IRReader/LLVMBuild.txt -----------------------------*- Conf -*--===; +; +; The LLVM Compiler Infrastructure +; +; This file is distributed under the University of Illinois Open Source +; License. See LICENSE.TXT for details. +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = IRReader +parent = Libraries +required_libraries = AsmParser BitReader Core Support diff --git a/lib/IRReader/Makefile b/lib/IRReader/Makefile new file mode 100644 index 0000000..cf6bc11 --- /dev/null +++ b/lib/IRReader/Makefile @@ -0,0 +1,14 @@ +##===- lib/IRReader/Makefile -------------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../.. +LIBRARYNAME := LLVMIRReader +BUILD_ARCHIVE = 1 + +include $(LEVEL)/Makefile.common diff --git a/lib/LLVMBuild.txt b/lib/LLVMBuild.txt index a31793c..0565443 100644 --- a/lib/LLVMBuild.txt +++ b/lib/LLVMBuild.txt @@ -16,7 +16,7 @@ ;===------------------------------------------------------------------------===; [common] -subdirectories = Analysis Archive AsmParser Bitcode CodeGen DebugInfo ExecutionEngine Linker IR MC Object Option Support TableGen Target Transforms +subdirectories = Analysis Archive AsmParser Bitcode CodeGen DebugInfo ExecutionEngine Linker IR IRReader MC Object Option Support TableGen Target Transforms [component_0] type = Group diff --git a/lib/Linker/LinkModules.cpp b/lib/Linker/LinkModules.cpp index 0acbcfa..74cbdad 100644 --- a/lib/Linker/LinkModules.cpp +++ b/lib/Linker/LinkModules.cpp @@ -17,13 +17,13 @@ #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallString.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/TypeFinder.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/ValueMapper.h" diff --git a/lib/Linker/Linker.cpp b/lib/Linker/Linker.cpp index c8ea8ff..74d24f2 100644 --- a/lib/Linker/Linker.cpp +++ b/lib/Linker/Linker.cpp @@ -15,7 +15,6 @@ #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" using namespace llvm; @@ -24,7 +23,6 @@ Linker::Linker(StringRef progname, StringRef modname, LLVMContext& C, unsigned flags): Context(C), Composite(new Module(modname, C)), - LibPaths(), Flags(flags), Error(), ProgramName(progname) { } @@ -32,7 +30,6 @@ Linker::Linker(StringRef progname, StringRef modname, Linker::Linker(StringRef progname, Module* aModule, unsigned flags) : Context(aModule->getContext()), Composite(aModule), - LibPaths(), Flags(flags), Error(), ProgramName(progname) { } @@ -63,27 +60,9 @@ Linker::verbose(StringRef message) { errs() << " " << message << "\n"; } -void -Linker::addPath(const sys::Path& path) { - LibPaths.push_back(path); -} - -void -Linker::addPaths(const std::vector<std::string>& paths) { - for (unsigned i = 0, e = paths.size(); i != e; ++i) - LibPaths.push_back(sys::Path(paths[i])); -} - -void -Linker::addSystemPaths() { - sys::Path::GetBitcodeLibraryPaths(LibPaths); - LibPaths.insert(LibPaths.begin(),sys::Path("./")); -} - Module* Linker::releaseModule() { Module* result = Composite; - LibPaths.clear(); Error.clear(); Composite = 0; Flags = 0; diff --git a/lib/MC/MCAsmInfo.cpp b/lib/MC/MCAsmInfo.cpp index 51bb435..9e60884 100644 --- a/lib/MC/MCAsmInfo.cpp +++ b/lib/MC/MCAsmInfo.cpp @@ -87,10 +87,10 @@ MCAsmInfo::MCAsmInfo() { SupportsDebugInformation = false; ExceptionsType = ExceptionHandling::None; DwarfUsesInlineInfoSection = false; - DwarfSectionOffsetDirective = 0; DwarfUsesRelocationsAcrossSections = true; DwarfRegNumForCFI = false; HasMicrosoftFastStdCallMangling = false; + NeedsDwarfSectionOffsetDirective = false; } MCAsmInfo::~MCAsmInfo() { diff --git a/lib/MC/MCAsmInfoCOFF.cpp b/lib/MC/MCAsmInfoCOFF.cpp index fd79193..33350d9 100644 --- a/lib/MC/MCAsmInfoCOFF.cpp +++ b/lib/MC/MCAsmInfoCOFF.cpp @@ -36,8 +36,8 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() { // Set up DWARF directives HasLEB128 = true; // Target asm supports leb128 directives (little-endian) SupportsDebugInformation = true; - DwarfSectionOffsetDirective = "\t.secrel32\t"; HasMicrosoftFastStdCallMangling = true; + NeedsDwarfSectionOffsetDirective = true; } void MCAsmInfoMicrosoft::anchor() { } diff --git a/lib/MC/MCAsmStreamer.cpp b/lib/MC/MCAsmStreamer.cpp index 35613b4..9e86785 100644 --- a/lib/MC/MCAsmStreamer.cpp +++ b/lib/MC/MCAsmStreamer.cpp @@ -124,19 +124,15 @@ public: /// @name MCStreamer Interface /// @{ - virtual void ChangeSection(const MCSection *Section); + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection); virtual void InitSections() { InitToTextSection(); } virtual void InitToTextSection() { - // FIXME, this is MachO specific, but the testsuite - // expects this. - SwitchSection(getContext().getMachOSection( - "__TEXT", "__text", - MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS, - 0, SectionKind::getText())); + SwitchSection(getContext().getObjectFileInfo()->getTextSection()); } virtual void EmitLabel(MCSymbol *Symbol); @@ -333,9 +329,10 @@ static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) { return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8)); } -void MCAsmStreamer::ChangeSection(const MCSection *Section) { +void MCAsmStreamer::ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { assert(Section && "Cannot switch to a null section!"); - Section->PrintSwitchToSection(MAI, OS); + Section->PrintSwitchToSection(MAI, OS, Subsection); } void MCAsmStreamer::EmitEHSymAttributes(const MCSymbol *Symbol, @@ -642,7 +639,8 @@ static void PrintQuotedString(StringRef Data, raw_ostream &OS) { void MCAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { - assert(getCurrentSection() && "Cannot emit contents before setting section!"); + assert(getCurrentSection().first && + "Cannot emit contents before setting section!"); if (Data.empty()) return; if (Data.size() == 1) { @@ -673,7 +671,8 @@ void MCAsmStreamer::EmitIntValue(uint64_t Value, unsigned Size, void MCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size, unsigned AddrSpace) { - assert(getCurrentSection() && "Cannot emit contents before setting section!"); + assert(getCurrentSection().first && + "Cannot emit contents before setting section!"); const char *Directive = 0; switch (Size) { default: break; @@ -1368,7 +1367,8 @@ void MCAsmStreamer::EmitTCEntry(const MCSymbol &S) { } void MCAsmStreamer::EmitInstruction(const MCInst &Inst) { - assert(getCurrentSection() && "Cannot emit contents before setting section!"); + assert(getCurrentSection().first && + "Cannot emit contents before setting section!"); // Show the encoding in a comment if we have a code emitter. if (Emitter) diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp index 208a4d5..a6a1a88 100644 --- a/lib/MC/MCAssembler.cpp +++ b/lib/MC/MCAssembler.cpp @@ -244,6 +244,36 @@ MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A) A->getSectionList().push_back(this); } +MCSectionData::iterator +MCSectionData::getSubsectionInsertionPoint(unsigned Subsection) { + if (Subsection == 0 && SubsectionFragmentMap.empty()) + return end(); + + SmallVectorImpl<std::pair<unsigned, MCFragment *> >::iterator MI = + std::lower_bound(SubsectionFragmentMap.begin(), SubsectionFragmentMap.end(), + std::make_pair(Subsection, (MCFragment *)0)); + bool ExactMatch = false; + if (MI != SubsectionFragmentMap.end()) { + ExactMatch = MI->first == Subsection; + if (ExactMatch) + ++MI; + } + iterator IP; + if (MI == SubsectionFragmentMap.end()) + IP = end(); + else + IP = MI->second; + if (!ExactMatch && Subsection != 0) { + // The GNU as documentation claims that subsections have an alignment of 4, + // although this appears not to be the case. + MCFragment *F = new MCDataFragment(); + SubsectionFragmentMap.insert(MI, std::make_pair(Subsection, F)); + getFragmentList().insert(IP, F); + F->setParent(this); + } + return IP; +} + /* *** */ MCSymbolData::MCSymbolData() : Symbol(0) {} diff --git a/lib/MC/MCDwarf.cpp b/lib/MC/MCDwarf.cpp index 0f8f074..18982e9 100644 --- a/lib/MC/MCDwarf.cpp +++ b/lib/MC/MCDwarf.cpp @@ -197,6 +197,8 @@ static inline void EmitDwarfLineTable(MCStreamer *MCOS, // actually a DW_LNE_end_sequence. // Switch to the section to be able to create a symbol at its end. + // TODO: keep track of the last subsection so that this symbol appears in the + // correct place. MCOS->SwitchSection(Section); MCContext &context = MCOS->getContext(); @@ -787,7 +789,7 @@ void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS, if (Symbol->isTemporary()) return; MCContext &context = MCOS->getContext(); - if (context.getGenDwarfSection() != MCOS->getCurrentSection()) + if (context.getGenDwarfSection() != MCOS->getCurrentSection().first) return; // The dwarf label's name does not have the symbol name's leading @@ -899,7 +901,7 @@ namespace { /// EmitCompactUnwind - Emit the unwind information in a compact way. If /// we're successful, return 'true'. Otherwise, return 'false' and it will /// emit the normal CIE and FDE. - bool EmitCompactUnwind(MCStreamer &streamer, + void EmitCompactUnwind(MCStreamer &streamer, const MCDwarfFrameInfo &frame); const MCSymbol &EmitCIE(MCStreamer &streamer, @@ -1139,7 +1141,7 @@ void FrameEmitterImpl::EmitCFIInstructions(MCStreamer &streamer, /// EmitCompactUnwind - Emit the unwind information in a compact way. If we're /// successful, return 'true'. Otherwise, return 'false' and it will emit the /// normal CIE and FDE. -bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, +void FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, const MCDwarfFrameInfo &Frame) { MCContext &Context = Streamer.getContext(); const MCObjectFileInfo *MOFI = Context.getObjectFileInfo(); @@ -1168,14 +1170,13 @@ bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, // .quad except_tab1 uint32_t Encoding = Frame.CompactUnwindEncoding; - if (!Encoding) return false; + if (!Encoding) return; + bool DwarfEHFrameOnly = (Encoding == MOFI->getCompactUnwindDwarfEHFrameOnly()); // The encoding needs to know we have an LSDA. - if (Frame.Lsda) + if (!DwarfEHFrameOnly && Frame.Lsda) Encoding |= 0x40000000; - Streamer.SwitchSection(MOFI->getCompactUnwindSection()); - // Range Start unsigned FDEEncoding = MOFI->getFDEEncoding(UsingCFI); unsigned Size = getSizeForEncoding(Streamer, FDEEncoding); @@ -1194,11 +1195,10 @@ bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, Twine::utohexstr(Encoding)); Streamer.EmitIntValue(Encoding, Size); - // Personality Function Size = getSizeForEncoding(Streamer, dwarf::DW_EH_PE_absptr); if (VerboseAsm) Streamer.AddComment("Personality Function"); - if (Frame.Personality) + if (!DwarfEHFrameOnly && Frame.Personality) Streamer.EmitSymbolValue(Frame.Personality, Size); else Streamer.EmitIntValue(0, Size); // No personality fn @@ -1206,12 +1206,10 @@ bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, // LSDA Size = getSizeForEncoding(Streamer, Frame.LsdaEncoding); if (VerboseAsm) Streamer.AddComment("LSDA"); - if (Frame.Lsda) + if (!DwarfEHFrameOnly && Frame.Lsda) Streamer.EmitSymbolValue(Frame.Lsda, Size); else Streamer.EmitIntValue(0, Size); // No LSDA - - return true; } const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer, @@ -1421,7 +1419,6 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer, } // Call Frame Instructions - EmitCFIInstructions(streamer, frame.Instructions, frame.Begin); // Padding @@ -1482,12 +1479,23 @@ void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer, ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getFrameInfos(); // Emit the compact unwind info if available. - if (IsEH && MOFI->getCompactUnwindSection()) - for (unsigned i = 0, n = Streamer.getNumFrameInfos(); i < n; ++i) { - const MCDwarfFrameInfo &Frame = Streamer.getFrameInfo(i); - if (Frame.CompactUnwindEncoding) + if (IsEH && MOFI->getCompactUnwindSection()) { + unsigned NumFrameInfos = Streamer.getNumFrameInfos(); + bool SectionEmitted = false; + + if (NumFrameInfos) { + for (unsigned i = 0; i < NumFrameInfos; ++i) { + const MCDwarfFrameInfo &Frame = Streamer.getFrameInfo(i); + if (Frame.CompactUnwindEncoding == 0) continue; + if (!SectionEmitted) { + Streamer.SwitchSection(MOFI->getCompactUnwindSection()); + Streamer.EmitValueToAlignment(Context.getAsmInfo().getPointerSize()); + SectionEmitted = true; + } Emitter.EmitCompactUnwind(Streamer, Frame); + } } + } const MCSection &Section = IsEH ? *MOFI->getEHFrameSection() : *MOFI->getDwarfFrameSection(); diff --git a/lib/MC/MCELFStreamer.cpp b/lib/MC/MCELFStreamer.cpp index 7f5f1b6..116f86f 100644 --- a/lib/MC/MCELFStreamer.cpp +++ b/lib/MC/MCELFStreamer.cpp @@ -13,6 +13,7 @@ #include "llvm/MC/MCELFStreamer.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" @@ -108,14 +109,15 @@ void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) { llvm_unreachable("invalid assembler flag!"); } -void MCELFStreamer::ChangeSection(const MCSection *Section) { +void MCELFStreamer::ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { MCSectionData *CurSection = getCurrentSectionData(); if (CurSection && CurSection->isBundleLocked()) report_fatal_error("Unterminated .bundle_lock when changing a section"); const MCSymbol *Grp = static_cast<const MCSectionELF *>(Section)->getGroup(); if (Grp) getAssembler().getOrCreateSymbolData(*Grp); - this->MCObjectStreamer::ChangeSection(Section); + this->MCObjectStreamer::ChangeSection(Section, Subsection); } void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) { @@ -126,6 +128,26 @@ void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) { Alias->setVariableValue(Value); } +// When GNU as encounters more than one .type declaration for an object it seems +// to use a mechanism similar to the one below to decide which type is actually +// used in the object file. The greater of T1 and T2 is selected based on the +// following ordering: +// STT_NOTYPE < STT_OBJECT < STT_FUNC < STT_GNU_IFUNC < STT_TLS < anything else +// If neither T1 < T2 nor T2 < T1 according to this ordering, use T2 (the user +// provided type). +static unsigned CombineSymbolTypes(unsigned T1, unsigned T2) { + unsigned TypeOrdering[] = {ELF::STT_NOTYPE, ELF::STT_OBJECT, ELF::STT_FUNC, + ELF::STT_GNU_IFUNC, ELF::STT_TLS}; + for (unsigned i = 0; i != array_lengthof(TypeOrdering); ++i) { + if (T1 == TypeOrdering[i]) + return T2; + if (T2 == TypeOrdering[i]) + return T1; + } + + return T2; +} + void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) { // Indirect symbols are handled differently, to match how 'as' handles @@ -187,27 +209,34 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol, break; case MCSA_ELF_TypeFunction: - MCELF::SetType(SD, ELF::STT_FUNC); + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_FUNC)); break; case MCSA_ELF_TypeIndFunction: - MCELF::SetType(SD, ELF::STT_GNU_IFUNC); + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_GNU_IFUNC)); break; case MCSA_ELF_TypeObject: - MCELF::SetType(SD, ELF::STT_OBJECT); + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_OBJECT)); break; case MCSA_ELF_TypeTLS: - MCELF::SetType(SD, ELF::STT_TLS); + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_TLS)); break; case MCSA_ELF_TypeCommon: - MCELF::SetType(SD, ELF::STT_COMMON); + // TODO: Emit these as a common symbol. + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_OBJECT)); break; case MCSA_ELF_TypeNoType: - MCELF::SetType(SD, ELF::STT_NOTYPE); + MCELF::SetType(SD, CombineSymbolTypes(MCELF::GetType(SD), + ELF::STT_NOTYPE)); break; case MCSA_Protected: @@ -290,7 +319,7 @@ void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment, // entry in the module's symbol table (the first being the null symbol). void MCELFStreamer::EmitFileDirective(StringRef Filename) { MCSymbol *Symbol = getAssembler().getContext().GetOrCreateSymbol(Filename); - Symbol->setSection(*getCurrentSection()); + Symbol->setSection(*getCurrentSection().first); Symbol->setAbsolute(); MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol); @@ -406,11 +435,13 @@ void MCELFStreamer::EmitInstToData(const MCInst &Inst) { // Optimize memory usage by emitting the instruction to a // MCCompactEncodedInstFragment when not in a bundle-locked group and // there are no fixups registered. - MCCompactEncodedInstFragment *CEIF = new MCCompactEncodedInstFragment(SD); + MCCompactEncodedInstFragment *CEIF = new MCCompactEncodedInstFragment(); + insert(CEIF); CEIF->getContents().append(Code.begin(), Code.end()); return; } else { - DF = new MCDataFragment(SD); + DF = new MCDataFragment(); + insert(DF); if (SD->getBundleLockState() == MCSectionData::BundleLockedAlignToEnd) { // If this is a new fragment created for a bundle-locked group, and the // group was marked as "align_to_end", set a flag in the fragment. diff --git a/lib/MC/MCExpr.cpp b/lib/MC/MCExpr.cpp index 1a53934..6cde26c 100644 --- a/lib/MC/MCExpr.cpp +++ b/lib/MC/MCExpr.cpp @@ -194,7 +194,7 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) { case VK_TPOFF: return "TPOFF"; case VK_DTPOFF: return "DTPOFF"; case VK_TLVP: return "TLVP"; - case VK_SECREL: return "SECREL"; + case VK_SECREL: return "SECREL32"; case VK_ARM_NONE: return "(NONE)"; case VK_ARM_PLT: return "(PLT)"; case VK_ARM_GOT: return "(GOT)"; @@ -250,6 +250,7 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) { case VK_Mips_GOT_LO16: return "GOT_LO16"; case VK_Mips_CALL_HI16: return "CALL_HI16"; case VK_Mips_CALL_LO16: return "CALL_LO16"; + case VK_COFF_IMGREL32: return "IMGREL32"; } llvm_unreachable("Invalid variant kind"); } @@ -285,6 +286,10 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) { .Case("dtpoff", VK_DTPOFF) .Case("TLVP", VK_TLVP) .Case("tlvp", VK_TLVP) + .Case("IMGREL", VK_COFF_IMGREL32) + .Case("imgrel", VK_COFF_IMGREL32) + .Case("SECREL32", VK_SECREL) + .Case("secrel32", VK_SECREL) .Default(VK_Invalid); } diff --git a/lib/MC/MCMachOStreamer.cpp b/lib/MC/MCMachOStreamer.cpp index 7d08d0e..e08b01b 100644 --- a/lib/MC/MCMachOStreamer.cpp +++ b/lib/MC/MCMachOStreamer.cpp @@ -122,11 +122,11 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) { assert(Symbol->isUndefined() && "Cannot define a symbol twice!"); // isSymbolLinkerVisible uses the section. - Symbol->setSection(*getCurrentSection()); + Symbol->setSection(*getCurrentSection().first); // We have to create a new fragment if this is an atom defining symbol, // fragments cannot span atoms. if (getAssembler().isSymbolLinkerVisible(*Symbol)) - new MCDataFragment(getCurrentSectionData()); + insert(new MCDataFragment()); MCObjectStreamer::EmitLabel(Symbol); diff --git a/lib/MC/MCNullStreamer.cpp b/lib/MC/MCNullStreamer.cpp index c872b22..659706a 100644 --- a/lib/MC/MCNullStreamer.cpp +++ b/lib/MC/MCNullStreamer.cpp @@ -30,13 +30,14 @@ namespace { virtual void InitSections() { } - virtual void ChangeSection(const MCSection *Section) { + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { } virtual void EmitLabel(MCSymbol *Symbol) { assert(Symbol->isUndefined() && "Cannot define a symbol twice!"); - assert(getCurrentSection() && "Cannot emit before setting section!"); - Symbol->setSection(*getCurrentSection()); + assert(getCurrentSection().first &&"Cannot emit before setting section!"); + Symbol->setSection(*getCurrentSection().first); } virtual void EmitDebugLabel(MCSymbol *Symbol) { EmitLabel(Symbol); diff --git a/lib/MC/MCObjectFileInfo.cpp b/lib/MC/MCObjectFileInfo.cpp index bafa002..79ebad1 100644 --- a/lib/MC/MCObjectFileInfo.cpp +++ b/lib/MC/MCObjectFileInfo.cpp @@ -145,12 +145,16 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) { LSDASection = Ctx->getMachOSection("__TEXT", "__gcc_except_tab", 0, SectionKind::getReadOnlyWithRel()); - if (T.isMacOSX() && !T.isMacOSXVersionLT(10, 6)) + if (T.isMacOSX() && !T.isMacOSXVersionLT(10, 6)) { CompactUnwindSection = Ctx->getMachOSection("__LD", "__compact_unwind", MCSectionMachO::S_ATTR_DEBUG, SectionKind::getReadOnly()); + if (T.getArch() == Triple::x86_64 || T.getArch() == Triple::x86) + CompactUnwindDwarfEHFrameOnly = 0x04000000; + } + // Debug Information. DwarfAccelNamesSection = Ctx->getMachOSection("__DWARF", "__apple_names", @@ -223,9 +227,13 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) { } void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) { - // FIXME: Check this. Mips64el is using the base values, which is most likely - // incorrect. - if (T.getArch() != Triple::mips64el) + if (T.getArch() == Triple::mips || + T.getArch() == Triple::mipsel) + FDECFIEncoding = dwarf::DW_EH_PE_sdata4; + else if (T.getArch() == Triple::mips64 || + T.getArch() == Triple::mips64el) + FDECFIEncoding = dwarf::DW_EH_PE_sdata8; + else FDECFIEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4; if (T.getArch() == Triple::x86) { @@ -625,6 +633,8 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm, PersonalityEncoding = LSDAEncoding = FDEEncoding = FDECFIEncoding = TTypeEncoding = dwarf::DW_EH_PE_absptr; + CompactUnwindDwarfEHFrameOnly = 0; + EHFrameSection = 0; // Created on demand. CompactUnwindSection = 0; // Used only by selected targets. DwarfAccelNamesSection = 0; // Used only by selected targets. diff --git a/lib/MC/MCObjectStreamer.cpp b/lib/MC/MCObjectStreamer.cpp index 0d2ce83..d21ce8d 100644 --- a/lib/MC/MCObjectStreamer.cpp +++ b/lib/MC/MCObjectStreamer.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCObjectStreamer.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAssembler.h" @@ -45,14 +46,15 @@ void MCObjectStreamer::reset() { if (Assembler) Assembler->reset(); CurSectionData = 0; + CurInsertionPoint = MCSectionData::iterator(); MCStreamer::reset(); } MCFragment *MCObjectStreamer::getCurrentFragment() const { assert(getCurrentSectionData() && "No current section!"); - if (!getCurrentSectionData()->empty()) - return &getCurrentSectionData()->getFragmentList().back(); + if (CurInsertionPoint != getCurrentSectionData()->getFragmentList().begin()) + return prior(CurInsertionPoint); return 0; } @@ -61,8 +63,10 @@ MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() const { MCDataFragment *F = dyn_cast_or_null<MCDataFragment>(getCurrentFragment()); // When bundling is enabled, we don't want to add data to a fragment that // already has instructions (see MCELFStreamer::EmitInstToData for details) - if (!F || (Assembler->isBundlingEnabled() && F->hasInstructions())) - F = new MCDataFragment(getCurrentSectionData()); + if (!F || (Assembler->isBundlingEnabled() && F->hasInstructions())) { + F = new MCDataFragment(); + insert(F); + } return F; } @@ -145,7 +149,7 @@ void MCObjectStreamer::EmitULEB128Value(const MCExpr *Value) { return; } Value = ForceExpAbs(Value); - new MCLEBFragment(*Value, false, getCurrentSectionData()); + insert(new MCLEBFragment(*Value, false)); } void MCObjectStreamer::EmitSLEB128Value(const MCExpr *Value) { @@ -155,7 +159,7 @@ void MCObjectStreamer::EmitSLEB128Value(const MCExpr *Value) { return; } Value = ForceExpAbs(Value); - new MCLEBFragment(*Value, true, getCurrentSectionData()); + insert(new MCLEBFragment(*Value, true)); } void MCObjectStreamer::EmitWeakReference(MCSymbol *Alias, @@ -163,10 +167,20 @@ void MCObjectStreamer::EmitWeakReference(MCSymbol *Alias, report_fatal_error("This file format doesn't support weak aliases."); } -void MCObjectStreamer::ChangeSection(const MCSection *Section) { +void MCObjectStreamer::ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { assert(Section && "Cannot switch to a null section!"); CurSectionData = &getAssembler().getOrCreateSectionData(*Section); + + int64_t IntSubsection = 0; + if (Subsection && + !Subsection->EvaluateAsAbsolute(IntSubsection, getAssembler())) + report_fatal_error("Cannot evaluate subsection number"); + if (IntSubsection < 0 || IntSubsection > 8192) + report_fatal_error("Subsection number out of range"); + CurInsertionPoint = + CurSectionData->getSubsectionInsertionPoint(unsigned(IntSubsection)); } void MCObjectStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) { @@ -185,7 +199,7 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) { // Now that a machine instruction has been assembled into this section, make // a line entry for any .loc directive that has been seen. - MCLineEntry::Make(this, getCurrentSection()); + MCLineEntry::Make(this, getCurrentSection().first); // If this instruction doesn't need relaxation, just emit it as data. MCAssembler &Assembler = getAssembler(); @@ -216,8 +230,8 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) { void MCObjectStreamer::EmitInstToFragment(const MCInst &Inst) { // Always create a new, separate fragment here, because its size can change // during relaxation. - MCRelaxableFragment *IF = - new MCRelaxableFragment(Inst, getCurrentSectionData()); + MCRelaxableFragment *IF = new MCRelaxableFragment(Inst); + insert(IF); SmallString<128> Code; raw_svector_ostream VecOS(Code); @@ -258,7 +272,7 @@ void MCObjectStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta, return; } AddrDelta = ForceExpAbs(AddrDelta); - new MCDwarfLineAddrFragment(LineDelta, *AddrDelta, getCurrentSectionData()); + insert(new MCDwarfLineAddrFragment(LineDelta, *AddrDelta)); } void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel, @@ -270,7 +284,7 @@ void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel, return; } AddrDelta = ForceExpAbs(AddrDelta); - new MCDwarfCallFrameFragment(*AddrDelta, getCurrentSectionData()); + insert(new MCDwarfCallFrameFragment(*AddrDelta)); } void MCObjectStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { @@ -284,8 +298,7 @@ void MCObjectStreamer::EmitValueToAlignment(unsigned ByteAlignment, unsigned MaxBytesToEmit) { if (MaxBytesToEmit == 0) MaxBytesToEmit = ByteAlignment; - new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, - getCurrentSectionData()); + insert(new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit)); // Update the maximum alignment on the current section if necessary. if (ByteAlignment > getCurrentSectionData()->getAlignment()) @@ -302,7 +315,7 @@ bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset, unsigned char Value) { int64_t Res; if (Offset->EvaluateAsAbsolute(Res, getAssembler())) { - new MCOrgFragment(*Offset, Value, getCurrentSectionData()); + insert(new MCOrgFragment(*Offset, Value)); return false; } diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index 9d52377..a903771 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -221,6 +221,7 @@ public: bool parseExpression(const MCExpr *&Res); virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc); + virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc); virtual bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc); virtual bool parseAbsoluteExpression(int64_t &Res); @@ -601,7 +602,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) { // If we are generating dwarf for assembly source files save the initial text // section and generate a .file directive. if (getContext().getGenDwarfForAssembly()) { - getContext().setGenDwarfSection(getStreamer().getCurrentSection()); + getContext().setGenDwarfSection(getStreamer().getCurrentSection().first); MCSymbol *SectionStartSym = getContext().CreateTempSymbol(); getStreamer().EmitLabel(SectionStartSym); getContext().setGenDwarfSectionStartSym(SectionStartSym); @@ -666,7 +667,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) { } void AsmParser::checkForValidSection() { - if (!ParsingInlineAsm && !getStreamer().getCurrentSection()) { + if (!ParsingInlineAsm && !getStreamer().getCurrentSection().first) { TokError("expected section directive before assembly directive"); Out.InitToTextSection(); } @@ -869,6 +870,10 @@ bool AsmParser::parseExpression(const MCExpr *&Res) { return parseExpression(Res, EndLoc); } +bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) { + return ParsePrimaryExpr(Res, EndLoc); +} + const MCExpr * AsmParser::ApplyModifierToExpr(const MCExpr *E, MCSymbolRefExpr::VariantKind Variant) { @@ -1488,7 +1493,8 @@ bool AsmParser::ParseStatement(ParseStatementInfo &Info) { // section is the initial text section then generate a .loc directive for // the instruction. if (!HadError && getContext().getGenDwarfForAssembly() && - getContext().getGenDwarfSection() == getStreamer().getCurrentSection()) { + getContext().getGenDwarfSection() == + getStreamer().getCurrentSection().first) { unsigned Line = SrcMgr.FindLineNumber(IDLoc, CurBuffer); @@ -1978,7 +1984,6 @@ static bool IsUsedIn(const MCSymbol *Sym, const MCExpr *Value) { case MCExpr::Binary: { const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Value); return IsUsedIn(Sym, BE->getLHS()) || IsUsedIn(Sym, BE->getRHS()); - break; } case MCExpr::Target: case MCExpr::Constant: @@ -2479,7 +2484,7 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) { // Check whether we should use optimal code alignment for this .align // directive. - bool UseCodeAlign = getStreamer().getCurrentSection()->UseCodeAlign(); + bool UseCodeAlign = getStreamer().getCurrentSection().first->UseCodeAlign(); if ((!HasFillExpr || Lexer.getMAI().getTextAlignFillValue() == FillExpr) && ValueSize == 1 && UseCodeAlign) { getStreamer().EmitCodeAlignment(Alignment, MaxBytesToFill); @@ -2631,12 +2636,10 @@ bool AsmParser::ParseDirectiveLoc() { Flags |= DWARF2_FLAG_IS_STMT; else return Error(Loc, "is_stmt value not 0 or 1"); - } - else { + } else { return Error(Loc, "is_stmt value not the constant value of 0 or 1"); } - } - else if (Name == "isa") { + } else if (Name == "isa") { Loc = getTok().getLoc(); const MCExpr *Value; if (parseExpression(Value)) @@ -2647,16 +2650,13 @@ bool AsmParser::ParseDirectiveLoc() { if (Value < 0) return Error(Loc, "isa number less than zero"); Isa = Value; - } - else { + } else { return Error(Loc, "isa number not a constant value"); } - } - else if (Name == "discriminator") { + } else if (Name == "discriminator") { if (parseAbsoluteExpression(Discriminator)) return true; - } - else { + } else { return Error(Loc, "unknown sub-directive in '.loc' directive"); } @@ -3615,18 +3615,17 @@ bool AsmParser::ParseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) { bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) { if (TheCondState.TheCond != AsmCond::IfCond && TheCondState.TheCond != AsmCond::ElseIfCond) - Error(DirectiveLoc, "Encountered a .elseif that doesn't follow a .if or " - " an .elseif"); + Error(DirectiveLoc, "Encountered a .elseif that doesn't follow a .if or " + " an .elseif"); TheCondState.TheCond = AsmCond::ElseIfCond; bool LastIgnoreState = false; if (!TheCondStack.empty()) - LastIgnoreState = TheCondStack.back().Ignore; + LastIgnoreState = TheCondStack.back().Ignore; if (LastIgnoreState || TheCondState.CondMet) { TheCondState.Ignore = true; eatToEndOfStatement(); - } - else { + } else { int64_t ExprValue; if (parseAbsoluteExpression(ExprValue)) return true; @@ -3652,8 +3651,8 @@ bool AsmParser::ParseDirectiveElse(SMLoc DirectiveLoc) { if (TheCondState.TheCond != AsmCond::IfCond && TheCondState.TheCond != AsmCond::ElseIfCond) - Error(DirectiveLoc, "Encountered a .else that doesn't follow a .if or an " - ".elseif"); + Error(DirectiveLoc, "Encountered a .else that doesn't follow a .if or an " + ".elseif"); TheCondState.TheCond = AsmCond::ElseCond; bool LastIgnoreState = false; if (!TheCondStack.empty()) @@ -4046,19 +4045,17 @@ static int RewritesSort(const void *A, const void *B) { if (AsmRewriteB->Loc.getPointer() < AsmRewriteA->Loc.getPointer()) return 1; - // It's possible to have a SizeDirective rewrite and an Input/Output rewrite - // to the same location. Make sure the SizeDirective rewrite is performed - // first. This also ensure the sort algorithm is stable. - if (AsmRewriteA->Kind == AOK_SizeDirective) { - assert ((AsmRewriteB->Kind == AOK_Input || AsmRewriteB->Kind == AOK_Output) && - "Expected an Input/Output rewrite!"); + // It's possible to have a SizeDirective, Imm/ImmPrefix and an Input/Output + // rewrite to the same location. Make sure the SizeDirective rewrite is + // performed first, then the Imm/ImmPrefix and finally the Input/Output. This + // ensures the sort algorithm is stable. + if (AsmRewritePrecedence [AsmRewriteA->Kind] > + AsmRewritePrecedence [AsmRewriteB->Kind]) return -1; - } - if (AsmRewriteB->Kind == AOK_SizeDirective) { - assert ((AsmRewriteA->Kind == AOK_Input || AsmRewriteA->Kind == AOK_Output) && - "Expected an Input/Output rewrite!"); + + if (AsmRewritePrecedence [AsmRewriteA->Kind] < + AsmRewritePrecedence [AsmRewriteB->Kind]) return 1; - } llvm_unreachable ("Unstable rewrite sort."); } @@ -4105,12 +4102,8 @@ AsmParser::parseMSInlineAsm(void *AsmLoc, std::string &AsmString, MCParsedAsmOperand *Operand = Info.ParsedOperands[i]; // Immediate. - if (Operand->isImm()) { - if (Operand->needAsmRewrite()) - AsmStrRewrites.push_back(AsmRewrite(AOK_ImmPrefix, - Operand->getStartLoc())); + if (Operand->isImm()) continue; - } // Register operand. if (Operand->isReg() && !Operand->needAddressOf()) { @@ -4122,33 +4115,27 @@ AsmParser::parseMSInlineAsm(void *AsmLoc, std::string &AsmString, } // Expr/Input or Output. - bool IsVarDecl; - unsigned Length, Size, Type; - void *OpDecl = SI.LookupInlineAsmIdentifier(Operand->getName(), AsmLoc, - Length, Size, Type, - IsVarDecl); + StringRef SymName = Operand->getSymName(); + if (SymName.empty()) + continue; + + void *OpDecl = Operand->getOpDecl(); if (!OpDecl) continue; bool isOutput = (i == 1) && Desc.mayStore(); - if (Operand->isMem() && Operand->needSizeDirective()) - AsmStrRewrites.push_back(AsmRewrite(AOK_SizeDirective, - Operand->getStartLoc(), /*Len*/0, - Operand->getMemSize())); - + SMLoc Start = SMLoc::getFromPointer(SymName.data()); if (isOutput) { ++InputIdx; OutputDecls.push_back(OpDecl); OutputDeclsAddressOf.push_back(Operand->needAddressOf()); OutputConstraints.push_back('=' + Operand->getConstraint().str()); - AsmStrRewrites.push_back(AsmRewrite(AOK_Output, Operand->getStartLoc(), - Operand->getNameLen())); + AsmStrRewrites.push_back(AsmRewrite(AOK_Output, Start, SymName.size())); } else { InputDecls.push_back(OpDecl); InputDeclsAddressOf.push_back(Operand->needAddressOf()); InputConstraints.push_back(Operand->getConstraint().str()); - AsmStrRewrites.push_back(AsmRewrite(AOK_Input, Operand->getStartLoc(), - Operand->getNameLen())); + AsmStrRewrites.push_back(AsmRewrite(AOK_Input, Start, SymName.size())); } } } @@ -4184,31 +4171,32 @@ AsmParser::parseMSInlineAsm(void *AsmLoc, std::string &AsmString, // Build the IR assembly string. std::string AsmStringIR; - AsmRewriteKind PrevKind = AOK_Imm; raw_string_ostream OS(AsmStringIR); - const char *Start = SrcMgr.getMemoryBuffer(0)->getBufferStart(); + const char *AsmStart = SrcMgr.getMemoryBuffer(0)->getBufferStart(); + const char *AsmEnd = SrcMgr.getMemoryBuffer(0)->getBufferEnd(); array_pod_sort(AsmStrRewrites.begin(), AsmStrRewrites.end(), RewritesSort); for (SmallVectorImpl<AsmRewrite>::iterator I = AsmStrRewrites.begin(), E = AsmStrRewrites.end(); I != E; ++I) { - const char *Loc = (*I).Loc.getPointer(); - assert(Loc >= Start && "Expected Loc to be after Start!"); - - unsigned AdditionalSkip = 0; AsmRewriteKind Kind = (*I).Kind; + if (Kind == AOK_Delete) + continue; - // Emit everything up to the immediate/expression. If the previous rewrite - // was a size directive, then this has already been done. - if (PrevKind != AOK_SizeDirective) - OS << StringRef(Start, Loc - Start); - PrevKind = Kind; + const char *Loc = (*I).Loc.getPointer(); + assert(Loc >= AsmStart && "Expected Loc to be at or after Start!"); + + // Emit everything up to the immediate/expression. + unsigned Len = Loc - AsmStart; + if (Len) + OS << StringRef(AsmStart, Len); // Skip the original expression. if (Kind == AOK_Skip) { - Start = Loc + (*I).Len; + AsmStart = Loc + (*I).Len; continue; } + unsigned AdditionalSkip = 0; // Rewrite expressions in $N notation. switch (Kind) { default: break; @@ -4254,14 +4242,12 @@ AsmParser::parseMSInlineAsm(void *AsmLoc, std::string &AsmString, } // Skip the original expression. - if (Kind != AOK_SizeDirective) - Start = Loc + (*I).Len + AdditionalSkip; + AsmStart = Loc + (*I).Len + AdditionalSkip; } // Emit the remainder of the asm string. - const char *AsmEnd = SrcMgr.getMemoryBuffer(0)->getBufferEnd(); - if (Start != AsmEnd) - OS << StringRef(Start, AsmEnd - Start); + if (AsmStart != AsmEnd) + OS << StringRef(AsmStart, AsmEnd - AsmStart); AsmString = OS.str(); return false; diff --git a/lib/MC/MCParser/DarwinAsmParser.cpp b/lib/MC/MCParser/DarwinAsmParser.cpp index 6d6409f..7eb8b74 100644 --- a/lib/MC/MCParser/DarwinAsmParser.cpp +++ b/lib/MC/MCParser/DarwinAsmParser.cpp @@ -566,10 +566,10 @@ bool DarwinAsmParser::ParseDirectivePopSection(StringRef, SMLoc) { /// ParseDirectivePrevious: /// ::= .previous bool DarwinAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) { - const MCSection *PreviousSection = getStreamer().getPreviousSection(); - if (PreviousSection == NULL) + MCSectionSubPair PreviousSection = getStreamer().getPreviousSection(); + if (PreviousSection.first == NULL) return TokError(".previous without corresponding .section"); - getStreamer().SwitchSection(PreviousSection); + getStreamer().SwitchSection(PreviousSection.first, PreviousSection.second); return false; } diff --git a/lib/MC/MCParser/ELFAsmParser.cpp b/lib/MC/MCParser/ELFAsmParser.cpp index 4c45e08..3134fc3 100644 --- a/lib/MC/MCParser/ELFAsmParser.cpp +++ b/lib/MC/MCParser/ELFAsmParser.cpp @@ -76,6 +76,7 @@ public: &ELFAsmParser::ParseDirectiveSymbolAttribute>(".internal"); addDirectiveHandler< &ELFAsmParser::ParseDirectiveSymbolAttribute>(".hidden"); + addDirectiveHandler<&ELFAsmParser::ParseDirectiveSubsection>(".subsection"); } // FIXME: Part of this logic is duplicated in the MCELFStreamer. What is @@ -147,9 +148,11 @@ public: bool ParseDirectiveVersion(StringRef, SMLoc); bool ParseDirectiveWeakref(StringRef, SMLoc); bool ParseDirectiveSymbolAttribute(StringRef, SMLoc); + bool ParseDirectiveSubsection(StringRef, SMLoc); private: bool ParseSectionName(StringRef &SectionName); + bool ParseSectionArguments(bool IsPush); }; } @@ -191,12 +194,15 @@ bool ELFAsmParser::ParseDirectiveSymbolAttribute(StringRef Directive, SMLoc) { bool ELFAsmParser::ParseSectionSwitch(StringRef Section, unsigned Type, unsigned Flags, SectionKind Kind) { - if (getLexer().isNot(AsmToken::EndOfStatement)) - return TokError("unexpected token in section switching directive"); - Lex(); + const MCExpr *Subsection = 0; + if (getLexer().isNot(AsmToken::EndOfStatement)) { + if (getParser().parseExpression(Subsection)) + return true; + } getStreamer().SwitchSection(getContext().getELFSection( - Section, Type, Flags, Kind)); + Section, Type, Flags, Kind), + Subsection); return false; } @@ -316,7 +322,7 @@ static int parseSectionFlags(StringRef flagsStr) { bool ELFAsmParser::ParseDirectivePushSection(StringRef s, SMLoc loc) { getStreamer().PushSection(); - if (ParseDirectiveSection(s, loc)) { + if (ParseSectionArguments(/*IsPush=*/true)) { getStreamer().PopSection(); return true; } @@ -332,6 +338,10 @@ bool ELFAsmParser::ParseDirectivePopSection(StringRef, SMLoc) { // FIXME: This is a work in progress. bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) { + return ParseSectionArguments(/*IsPush=*/false); +} + +bool ELFAsmParser::ParseSectionArguments(bool IsPush) { StringRef SectionName; if (ParseSectionName(SectionName)) @@ -341,6 +351,7 @@ bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) { int64_t Size = 0; StringRef GroupName; unsigned Flags = 0; + const MCExpr *Subsection = 0; // Set the defaults first. if (SectionName == ".fini" || SectionName == ".init" || @@ -352,6 +363,14 @@ bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) { if (getLexer().is(AsmToken::Comma)) { Lex(); + if (IsPush && getLexer().isNot(AsmToken::String)) { + if (getParser().parseExpression(Subsection)) + return true; + if (getLexer().isNot(AsmToken::Comma)) + goto EndStmt; + Lex(); + } + if (getLexer().isNot(AsmToken::String)) return TokError("expected string in directive"); @@ -408,6 +427,7 @@ bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) { } } +EndStmt: if (getLexer().isNot(AsmToken::EndOfStatement)) return TokError("unexpected token in directive"); @@ -444,15 +464,16 @@ bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) { SectionKind Kind = computeSectionKind(Flags); getStreamer().SwitchSection(getContext().getELFSection(SectionName, Type, Flags, Kind, Size, - GroupName)); + GroupName), + Subsection); return false; } bool ELFAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) { - const MCSection *PreviousSection = getStreamer().getPreviousSection(); - if (PreviousSection == NULL) + MCSectionSubPair PreviousSection = getStreamer().getPreviousSection(); + if (PreviousSection.first == NULL) return TokError(".previous without corresponding .section"); - getStreamer().SwitchSection(PreviousSection); + getStreamer().SwitchSection(PreviousSection.first, PreviousSection.second); return false; } @@ -613,6 +634,20 @@ bool ELFAsmParser::ParseDirectiveWeakref(StringRef, SMLoc) { return false; } +bool ELFAsmParser::ParseDirectiveSubsection(StringRef, SMLoc) { + const MCExpr *Subsection = 0; + if (getLexer().isNot(AsmToken::EndOfStatement)) { + if (getParser().parseExpression(Subsection)) + return true; + } + + if (getLexer().isNot(AsmToken::EndOfStatement)) + return TokError("unexpected token in directive"); + + getStreamer().SubSection(Subsection); + return false; +} + namespace llvm { MCAsmParserExtension *createELFAsmParser() { diff --git a/lib/MC/MCPureStreamer.cpp b/lib/MC/MCPureStreamer.cpp index 0e04c55..8ae724f 100644 --- a/lib/MC/MCPureStreamer.cpp +++ b/lib/MC/MCPureStreamer.cpp @@ -12,9 +12,8 @@ #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCObjectStreamer.h" -// FIXME: Remove this. -#include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/ErrorHandling.h" @@ -113,25 +112,22 @@ void MCPureStreamer::InitSections() { } void MCPureStreamer::InitToTextSection() { - // FIMXE: To what!? - SwitchSection(getContext().getMachOSection("__TEXT", "__text", - MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS, - 0, SectionKind::getText())); + SwitchSection(getContext().getObjectFileInfo()->getTextSection()); } void MCPureStreamer::EmitLabel(MCSymbol *Symbol) { assert(Symbol->isUndefined() && "Cannot define a symbol twice!"); assert(!Symbol->isVariable() && "Cannot emit a variable symbol!"); - assert(getCurrentSection() && "Cannot emit before setting section!"); + assert(getCurrentSection().first && "Cannot emit before setting section!"); - Symbol->setSection(*getCurrentSection()); + Symbol->setSection(*getCurrentSection().first); MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol); // We have to create a new fragment if this is an atom defining symbol, // fragments cannot span atoms. if (getAssembler().isSymbolLinkerVisible(SD.getSymbol())) - new MCDataFragment(getCurrentSectionData()); + insert(new MCDataFragment()); // FIXME: This is wasteful, we don't necessarily need to create a data // fragment. Instead, we should mark the symbol as pointing into the data @@ -166,8 +162,7 @@ void MCPureStreamer::EmitValueToAlignment(unsigned ByteAlignment, // MCObjectStreamer. if (MaxBytesToEmit == 0) MaxBytesToEmit = ByteAlignment; - new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, - getCurrentSectionData()); + insert(new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit)); // Update the maximum alignment on the current section if necessary. if (ByteAlignment > getCurrentSectionData()->getAlignment()) @@ -180,8 +175,8 @@ void MCPureStreamer::EmitCodeAlignment(unsigned ByteAlignment, // MCObjectStreamer. if (MaxBytesToEmit == 0) MaxBytesToEmit = ByteAlignment; - MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit, - getCurrentSectionData()); + MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit); + insert(F); F->setEmitNops(true); // Update the maximum alignment on the current section if necessary. @@ -191,13 +186,13 @@ void MCPureStreamer::EmitCodeAlignment(unsigned ByteAlignment, bool MCPureStreamer::EmitValueToOffset(const MCExpr *Offset, unsigned char Value) { - new MCOrgFragment(*Offset, Value, getCurrentSectionData()); + insert(new MCOrgFragment(*Offset, Value)); return false; } void MCPureStreamer::EmitInstToFragment(const MCInst &Inst) { - MCRelaxableFragment *IF = - new MCRelaxableFragment(Inst, getCurrentSectionData()); + MCRelaxableFragment *IF = new MCRelaxableFragment(Inst); + insert(IF); // Add the fixups and data. // diff --git a/lib/MC/MCSectionCOFF.cpp b/lib/MC/MCSectionCOFF.cpp index aac9377..6cedf06 100644 --- a/lib/MC/MCSectionCOFF.cpp +++ b/lib/MC/MCSectionCOFF.cpp @@ -29,7 +29,8 @@ bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name, } void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI, - raw_ostream &OS) const { + raw_ostream &OS, + const MCExpr *Subsection) const { // standard sections don't require the '.section' if (ShouldOmitSectionDirective(SectionName, MAI)) { diff --git a/lib/MC/MCSectionELF.cpp b/lib/MC/MCSectionELF.cpp index 0775cfa..bf1a984 100644 --- a/lib/MC/MCSectionELF.cpp +++ b/lib/MC/MCSectionELF.cpp @@ -10,6 +10,7 @@ #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/ELF.h" #include "llvm/Support/raw_ostream.h" @@ -32,10 +33,14 @@ bool MCSectionELF::ShouldOmitSectionDirective(StringRef Name, } void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI, - raw_ostream &OS) const { + raw_ostream &OS, + const MCExpr *Subsection) const { if (ShouldOmitSectionDirective(SectionName, MAI)) { - OS << '\t' << getSectionName() << '\n'; + OS << '\t' << getSectionName(); + if (Subsection) + OS << '\t' << *Subsection; + OS << '\n'; return; } @@ -129,6 +134,9 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI, if (Flags & ELF::SHF_GROUP) OS << "," << Group->getName() << ",comdat"; OS << '\n'; + + if (Subsection) + OS << "\t.subsection\t" << *Subsection << '\n'; } bool MCSectionELF::UseCodeAlign() const { diff --git a/lib/MC/MCSectionMachO.cpp b/lib/MC/MCSectionMachO.cpp index fc32315..8704513 100644 --- a/lib/MC/MCSectionMachO.cpp +++ b/lib/MC/MCSectionMachO.cpp @@ -91,7 +91,8 @@ MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section, } void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI, - raw_ostream &OS) const { + raw_ostream &OS, + const MCExpr *Subsection) const { OS << "\t.section\t" << getSegmentName() << ',' << getSectionName(); // Get the section type and attributes. diff --git a/lib/MC/MCStreamer.cpp b/lib/MC/MCStreamer.cpp index d02e553..4839c34 100644 --- a/lib/MC/MCStreamer.cpp +++ b/lib/MC/MCStreamer.cpp @@ -24,8 +24,7 @@ using namespace llvm; MCStreamer::MCStreamer(StreamerKind Kind, MCContext &Ctx) : Kind(Kind), Context(Ctx), EmitEHFrame(true), EmitDebugFrame(false), CurrentW64UnwindInfo(0), LastSymbol(0), AutoInitSections(false) { - const MCSection *section = 0; - SectionStack.push_back(std::make_pair(section, section)); + SectionStack.push_back(std::pair<MCSectionSubPair, MCSectionSubPair>()); } MCStreamer::~MCStreamer() { @@ -40,9 +39,8 @@ void MCStreamer::reset() { EmitDebugFrame = false; CurrentW64UnwindInfo = 0; LastSymbol = 0; - const MCSection *section = 0; SectionStack.clear(); - SectionStack.push_back(std::make_pair(section, section)); + SectionStack.push_back(std::pair<MCSectionSubPair, MCSectionSubPair>()); } const MCExpr *MCStreamer::BuildSymbolDiff(MCContext &Context, @@ -188,15 +186,15 @@ void MCStreamer::EmitEHSymAttributes(const MCSymbol *Symbol, void MCStreamer::EmitLabel(MCSymbol *Symbol) { assert(!Symbol->isVariable() && "Cannot emit a variable symbol!"); - assert(getCurrentSection() && "Cannot emit before setting section!"); - Symbol->setSection(*getCurrentSection()); + assert(getCurrentSection().first && "Cannot emit before setting section!"); + Symbol->setSection(*getCurrentSection().first); LastSymbol = Symbol; } void MCStreamer::EmitDebugLabel(MCSymbol *Symbol) { assert(!Symbol->isVariable() && "Cannot emit a variable symbol!"); - assert(getCurrentSection() && "Cannot emit before setting section!"); - Symbol->setSection(*getCurrentSection()); + assert(getCurrentSection().first && "Cannot emit before setting section!"); + Symbol->setSection(*getCurrentSection().first); LastSymbol = Symbol; } diff --git a/lib/MC/WinCOFFObjectWriter.cpp b/lib/MC/WinCOFFObjectWriter.cpp index 6dffed7..518b59e 100644 --- a/lib/MC/WinCOFFObjectWriter.cpp +++ b/lib/MC/WinCOFFObjectWriter.cpp @@ -147,8 +147,7 @@ public: object_t *createCOFFEntity(StringRef Name, list_t &List); void DefineSection(MCSectionData const &SectionData); - void DefineSymbol(MCSymbol const &Symbol, - MCSymbolData const &SymbolData, + void DefineSymbol(MCSymbolData const &SymbolData, MCAssembler &Assembler); void MakeSymbolReal(COFFSymbol &S, size_t Index); @@ -410,25 +409,23 @@ void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) { /// This function takes a section data object from the assembler /// and creates the associated COFF symbol staging object. -void WinCOFFObjectWriter::DefineSymbol(MCSymbol const &Symbol, - MCSymbolData const &SymbolData, +void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData, MCAssembler &Assembler) { + MCSymbol const &Symbol = SymbolData.getSymbol(); COFFSymbol *coff_symbol = GetOrCreateCOFFSymbol(&Symbol); - - coff_symbol->Data.Type = (SymbolData.getFlags() & 0x0000FFFF) >> 0; - coff_symbol->Data.StorageClass = (SymbolData.getFlags() & 0x00FF0000) >> 16; + SymbolMap[&Symbol] = coff_symbol; if (SymbolData.getFlags() & COFF::SF_WeakExternal) { coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL; if (Symbol.isVariable()) { - coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL; + const MCSymbolRefExpr *SymRef = + dyn_cast<MCSymbolRefExpr>(Symbol.getVariableValue()); - // FIXME: This assert message isn't very good. - assert(Symbol.getVariableValue()->getKind() == MCExpr::SymbolRef && - "Value must be a SymbolRef!"); + if (!SymRef) + report_fatal_error("Weak externals may only alias symbols"); - coff_symbol->Other = GetOrCreateCOFFSymbol(&Symbol); + coff_symbol->Other = GetOrCreateCOFFSymbol(&SymRef->getSymbol()); } else { std::string WeakName = std::string(".weak.") + Symbol.getName().str() @@ -448,23 +445,29 @@ void WinCOFFObjectWriter::DefineSymbol(MCSymbol const &Symbol, coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = 0; coff_symbol->Aux[0].Aux.WeakExternal.Characteristics = COFF::IMAGE_WEAK_EXTERN_SEARCH_LIBRARY; - } - // If no storage class was specified in the streamer, define it here. - if (coff_symbol->Data.StorageClass == 0) { - bool external = SymbolData.isExternal() || (SymbolData.Fragment == NULL); + coff_symbol->MCData = &SymbolData; + } else { + const MCSymbolData &ResSymData = + Assembler.getSymbolData(Symbol.AliasedSymbol()); - coff_symbol->Data.StorageClass = - external ? COFF::IMAGE_SYM_CLASS_EXTERNAL : COFF::IMAGE_SYM_CLASS_STATIC; - } + coff_symbol->Data.Type = (ResSymData.getFlags() & 0x0000FFFF) >> 0; + coff_symbol->Data.StorageClass = (ResSymData.getFlags() & 0x00FF0000) >> 16; - if (SymbolData.Fragment != NULL) - coff_symbol->Section = - SectionMap[&SymbolData.Fragment->getParent()->getSection()]; + // If no storage class was specified in the streamer, define it here. + if (coff_symbol->Data.StorageClass == 0) { + bool external = ResSymData.isExternal() || (ResSymData.Fragment == NULL); - // Bind internal COFF symbol to MC symbol. - coff_symbol->MCData = &SymbolData; - SymbolMap[&Symbol] = coff_symbol; + coff_symbol->Data.StorageClass = + external ? COFF::IMAGE_SYM_CLASS_EXTERNAL : COFF::IMAGE_SYM_CLASS_STATIC; + } + + if (ResSymData.Fragment != NULL) + coff_symbol->Section = + SectionMap[&ResSymData.Fragment->getParent()->getSection()]; + + coff_symbol->MCData = &ResSymData; + } } /// making a section real involves assigned it a number and putting @@ -620,9 +623,7 @@ void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm, for (MCAssembler::const_symbol_iterator i = Asm.symbol_begin(), e = Asm.symbol_end(); i != e; i++) { if (ExportSymbol(*i, Asm)) { - const MCSymbol &Alias = i->getSymbol(); - const MCSymbol &Symbol = Alias.AliasedSymbol(); - DefineSymbol(Alias, Asm.getSymbolData(Symbol), Asm); + DefineSymbol(*i, Asm); } } } @@ -689,13 +690,8 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm, ++Reloc.Symb->Relocations; Reloc.Data.VirtualAddress += Fixup.getOffset(); - - unsigned FixupKind = Fixup.getKind(); - - if (CrossSection) - FixupKind = FK_PCRel_4; - - Reloc.Data.Type = TargetObjectWriter->getRelocType(FixupKind); + Reloc.Data.Type = TargetObjectWriter->getRelocType(Target, Fixup, + CrossSection); // FIXME: Can anyone explain what this does other than adjust for the size // of the offset? diff --git a/lib/Makefile b/lib/Makefile index 043eda6..57f016b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,8 @@ LEVEL = .. include $(LEVEL)/Makefile.config PARALLEL_DIRS := IR AsmParser Bitcode Archive Analysis Transforms CodeGen \ - Target ExecutionEngine Linker MC Object Option DebugInfo + Target ExecutionEngine Linker MC Object Option DebugInfo \ + IRReader include $(LEVEL)/Makefile.common diff --git a/lib/Object/CMakeLists.txt b/lib/Object/CMakeLists.txt index c20fc0c..4ed129f 100644 --- a/lib/Object/CMakeLists.txt +++ b/lib/Object/CMakeLists.txt @@ -4,7 +4,6 @@ add_llvm_library(LLVMObject COFFObjectFile.cpp ELFObjectFile.cpp Error.cpp - MachOObject.cpp MachOObjectFile.cpp Object.cpp ObjectFile.cpp diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp index b0b84fc..6967a00 100644 --- a/lib/Object/COFFObjectFile.cpp +++ b/lib/Object/COFFObjectFile.cpp @@ -431,7 +431,7 @@ relocation_iterator COFFObjectFile::getSectionRelEnd(DataRefImpl Sec) const { } COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec) - : ObjectFile(Binary::ID_COFF, Object, ec) + : ObjectFile(Binary::ID_COFF, Object) , Header(0) , SectionTable(0) , SymbolTable(0) @@ -707,8 +707,7 @@ error_code COFFObjectFile::getRelocationNext(DataRefImpl Rel, } error_code COFFObjectFile::getRelocationAddress(DataRefImpl Rel, uint64_t &Res) const { - Res = toRel(Rel)->VirtualAddress; - return object_error::success; + report_fatal_error("getRelocationAddress not implemented in COFFObjectFile"); } error_code COFFObjectFile::getRelocationOffset(DataRefImpl Rel, uint64_t &Res) const { diff --git a/lib/Object/MachOObject.cpp b/lib/Object/MachOObject.cpp deleted file mode 100644 index c9c341a..0000000 --- a/lib/Object/MachOObject.cpp +++ /dev/null @@ -1,422 +0,0 @@ -//===- MachOObject.cpp - Mach-O Object File Wrapper -----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Object/MachOObject.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Support/DataExtractor.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/Host.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/SwapByteOrder.h" -#include "llvm/Support/raw_ostream.h" - -using namespace llvm; -using namespace llvm::object; - -/* Translation Utilities */ - -template<typename T> -static void SwapValue(T &Value) { - Value = sys::SwapByteOrder(Value); -} - -template<typename T> -static void SwapStruct(T &Value); - -template<typename T> -static void ReadInMemoryStruct(const MachOObject &MOO, - StringRef Buffer, uint64_t Base, - InMemoryStruct<T> &Res) { - typedef T struct_type; - uint64_t Size = sizeof(struct_type); - - // Check that the buffer contains the expected data. - if (Base + Size > Buffer.size()) { - Res = 0; - return; - } - - // Check whether we can return a direct pointer. - struct_type *Ptr = reinterpret_cast<struct_type *>( - const_cast<char *>(Buffer.data() + Base)); - if (!MOO.isSwappedEndian()) { - Res = Ptr; - return; - } - - // Otherwise, copy the struct and translate the values. - Res = *Ptr; - SwapStruct(*Res); -} - -/* *** */ - -MachOObject::MachOObject(MemoryBuffer *Buffer_, bool IsLittleEndian_, - bool Is64Bit_) - : Buffer(Buffer_), IsLittleEndian(IsLittleEndian_), Is64Bit(Is64Bit_), - IsSwappedEndian(IsLittleEndian != sys::isLittleEndianHost()), - HasStringTable(false), LoadCommands(0), NumLoadedCommands(0) { - // Load the common header. - memcpy(&Header, Buffer->getBuffer().data(), sizeof(Header)); - if (IsSwappedEndian) { - SwapValue(Header.Magic); - SwapValue(Header.CPUType); - SwapValue(Header.CPUSubtype); - SwapValue(Header.FileType); - SwapValue(Header.NumLoadCommands); - SwapValue(Header.SizeOfLoadCommands); - SwapValue(Header.Flags); - } - - if (is64Bit()) { - memcpy(&Header64Ext, Buffer->getBuffer().data() + sizeof(Header), - sizeof(Header64Ext)); - if (IsSwappedEndian) { - SwapValue(Header64Ext.Reserved); - } - } - - // Create the load command array if sane. - if (getHeader().NumLoadCommands < (1 << 20)) - LoadCommands = new LoadCommandInfo[getHeader().NumLoadCommands]; -} - -MachOObject::~MachOObject() { - delete [] LoadCommands; -} - -MachOObject *MachOObject::LoadFromBuffer(MemoryBuffer *Buffer, - std::string *ErrorStr) { - // First, check the magic value and initialize the basic object info. - bool IsLittleEndian = false, Is64Bit = false; - StringRef Magic = Buffer->getBuffer().slice(0, 4); - if (Magic == "\xFE\xED\xFA\xCE") { - } else if (Magic == "\xCE\xFA\xED\xFE") { - IsLittleEndian = true; - } else if (Magic == "\xFE\xED\xFA\xCF") { - Is64Bit = true; - } else if (Magic == "\xCF\xFA\xED\xFE") { - IsLittleEndian = true; - Is64Bit = true; - } else { - if (ErrorStr) *ErrorStr = "not a Mach object file (invalid magic)"; - return 0; - } - - // Ensure that the at least the full header is present. - unsigned HeaderSize = Is64Bit ? macho::Header64Size : macho::Header32Size; - if (Buffer->getBufferSize() < HeaderSize) { - if (ErrorStr) *ErrorStr = "not a Mach object file (invalid header)"; - return 0; - } - - OwningPtr<MachOObject> Object(new MachOObject(Buffer, IsLittleEndian, - Is64Bit)); - - // Check for bogus number of load commands. - if (Object->getHeader().NumLoadCommands >= (1 << 20)) { - if (ErrorStr) *ErrorStr = "not a Mach object file (unreasonable header)"; - return 0; - } - - if (ErrorStr) *ErrorStr = ""; - return Object.take(); -} - -StringRef MachOObject::getData(size_t Offset, size_t Size) const { - return Buffer->getBuffer().substr(Offset,Size); -} - -void MachOObject::RegisterStringTable(macho::SymtabLoadCommand &SLC) { - HasStringTable = true; - StringTable = Buffer->getBuffer().substr(SLC.StringTableOffset, - SLC.StringTableSize); -} - -const MachOObject::LoadCommandInfo & -MachOObject::getLoadCommandInfo(unsigned Index) const { - assert(Index < getHeader().NumLoadCommands && "Invalid index!"); - - // Load the command, if necessary. - if (Index >= NumLoadedCommands) { - uint64_t Offset; - if (Index == 0) { - Offset = getHeaderSize(); - } else { - const LoadCommandInfo &Prev = getLoadCommandInfo(Index - 1); - Offset = Prev.Offset + Prev.Command.Size; - } - - LoadCommandInfo &Info = LoadCommands[Index]; - memcpy(&Info.Command, Buffer->getBuffer().data() + Offset, - sizeof(macho::LoadCommand)); - if (IsSwappedEndian) { - SwapValue(Info.Command.Type); - SwapValue(Info.Command.Size); - } - Info.Offset = Offset; - NumLoadedCommands = Index + 1; - } - - return LoadCommands[Index]; -} - -template<> -void SwapStruct(macho::SegmentLoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.VMAddress); - SwapValue(Value.VMSize); - SwapValue(Value.FileOffset); - SwapValue(Value.FileSize); - SwapValue(Value.MaxVMProtection); - SwapValue(Value.InitialVMProtection); - SwapValue(Value.NumSections); - SwapValue(Value.Flags); -} -void MachOObject::ReadSegmentLoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::SegmentLoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::Segment64LoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.VMAddress); - SwapValue(Value.VMSize); - SwapValue(Value.FileOffset); - SwapValue(Value.FileSize); - SwapValue(Value.MaxVMProtection); - SwapValue(Value.InitialVMProtection); - SwapValue(Value.NumSections); - SwapValue(Value.Flags); -} -void MachOObject::ReadSegment64LoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::Segment64LoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::SymtabLoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.SymbolTableOffset); - SwapValue(Value.NumSymbolTableEntries); - SwapValue(Value.StringTableOffset); - SwapValue(Value.StringTableSize); -} -void MachOObject::ReadSymtabLoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::SymtabLoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::DysymtabLoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.LocalSymbolsIndex); - SwapValue(Value.NumLocalSymbols); - SwapValue(Value.ExternalSymbolsIndex); - SwapValue(Value.NumExternalSymbols); - SwapValue(Value.UndefinedSymbolsIndex); - SwapValue(Value.NumUndefinedSymbols); - SwapValue(Value.TOCOffset); - SwapValue(Value.NumTOCEntries); - SwapValue(Value.ModuleTableOffset); - SwapValue(Value.NumModuleTableEntries); - SwapValue(Value.ReferenceSymbolTableOffset); - SwapValue(Value.NumReferencedSymbolTableEntries); - SwapValue(Value.IndirectSymbolTableOffset); - SwapValue(Value.NumIndirectSymbolTableEntries); - SwapValue(Value.ExternalRelocationTableOffset); - SwapValue(Value.NumExternalRelocationTableEntries); - SwapValue(Value.LocalRelocationTableOffset); - SwapValue(Value.NumLocalRelocationTableEntries); -} -void MachOObject::ReadDysymtabLoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::DysymtabLoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::LinkeditDataLoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.DataOffset); - SwapValue(Value.DataSize); -} -void MachOObject::ReadLinkeditDataLoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::LinkeditDataLoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::LinkerOptionsLoadCommand &Value) { - SwapValue(Value.Type); - SwapValue(Value.Size); - SwapValue(Value.Count); -} -void MachOObject::ReadLinkerOptionsLoadCommand(const LoadCommandInfo &LCI, - InMemoryStruct<macho::LinkerOptionsLoadCommand> &Res) const { - ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res); -} - -template<> -void SwapStruct(macho::IndirectSymbolTableEntry &Value) { - SwapValue(Value.Index); -} -void -MachOObject::ReadIndirectSymbolTableEntry(const macho::DysymtabLoadCommand &DLC, - unsigned Index, - InMemoryStruct<macho::IndirectSymbolTableEntry> &Res) const { - uint64_t Offset = (DLC.IndirectSymbolTableOffset + - Index * sizeof(macho::IndirectSymbolTableEntry)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - - -template<> -void SwapStruct(macho::Section &Value) { - SwapValue(Value.Address); - SwapValue(Value.Size); - SwapValue(Value.Offset); - SwapValue(Value.Align); - SwapValue(Value.RelocationTableOffset); - SwapValue(Value.NumRelocationTableEntries); - SwapValue(Value.Flags); - SwapValue(Value.Reserved1); - SwapValue(Value.Reserved2); -} -void MachOObject::ReadSection(const LoadCommandInfo &LCI, - unsigned Index, - InMemoryStruct<macho::Section> &Res) const { - assert(LCI.Command.Type == macho::LCT_Segment && - "Unexpected load command info!"); - uint64_t Offset = (LCI.Offset + sizeof(macho::SegmentLoadCommand) + - Index * sizeof(macho::Section)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -template<> -void SwapStruct(macho::Section64 &Value) { - SwapValue(Value.Address); - SwapValue(Value.Size); - SwapValue(Value.Offset); - SwapValue(Value.Align); - SwapValue(Value.RelocationTableOffset); - SwapValue(Value.NumRelocationTableEntries); - SwapValue(Value.Flags); - SwapValue(Value.Reserved1); - SwapValue(Value.Reserved2); - SwapValue(Value.Reserved3); -} -void MachOObject::ReadSection64(const LoadCommandInfo &LCI, - unsigned Index, - InMemoryStruct<macho::Section64> &Res) const { - assert(LCI.Command.Type == macho::LCT_Segment64 && - "Unexpected load command info!"); - uint64_t Offset = (LCI.Offset + sizeof(macho::Segment64LoadCommand) + - Index * sizeof(macho::Section64)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -template<> -void SwapStruct(macho::RelocationEntry &Value) { - SwapValue(Value.Word0); - SwapValue(Value.Word1); -} -void MachOObject::ReadRelocationEntry(uint64_t RelocationTableOffset, - unsigned Index, - InMemoryStruct<macho::RelocationEntry> &Res) const { - uint64_t Offset = (RelocationTableOffset + - Index * sizeof(macho::RelocationEntry)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -template<> -void SwapStruct(macho::SymbolTableEntry &Value) { - SwapValue(Value.StringIndex); - SwapValue(Value.Flags); - SwapValue(Value.Value); -} -void MachOObject::ReadSymbolTableEntry(uint64_t SymbolTableOffset, - unsigned Index, - InMemoryStruct<macho::SymbolTableEntry> &Res) const { - uint64_t Offset = (SymbolTableOffset + - Index * sizeof(macho::SymbolTableEntry)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -template<> -void SwapStruct(macho::Symbol64TableEntry &Value) { - SwapValue(Value.StringIndex); - SwapValue(Value.Flags); - SwapValue(Value.Value); -} -void MachOObject::ReadSymbol64TableEntry(uint64_t SymbolTableOffset, - unsigned Index, - InMemoryStruct<macho::Symbol64TableEntry> &Res) const { - uint64_t Offset = (SymbolTableOffset + - Index * sizeof(macho::Symbol64TableEntry)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -template<> -void SwapStruct(macho::DataInCodeTableEntry &Value) { - SwapValue(Value.Offset); - SwapValue(Value.Length); - SwapValue(Value.Kind); -} -void MachOObject::ReadDataInCodeTableEntry(uint64_t TableOffset, - unsigned Index, - InMemoryStruct<macho::DataInCodeTableEntry> &Res) const { - uint64_t Offset = (TableOffset + - Index * sizeof(macho::DataInCodeTableEntry)); - ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res); -} - -void MachOObject::ReadULEB128s(uint64_t Index, - SmallVectorImpl<uint64_t> &Out) const { - DataExtractor extractor(Buffer->getBuffer(), true, 0); - - uint32_t offset = Index; - uint64_t data = 0; - while (uint64_t delta = extractor.getULEB128(&offset)) { - data += delta; - Out.push_back(data); - } -} - -/* ** */ -// Object Dumping Facilities -void MachOObject::dump() const { print(dbgs()); dbgs() << '\n'; } -void MachOObject::dumpHeader() const { printHeader(dbgs()); dbgs() << '\n'; } - -void MachOObject::printHeader(raw_ostream &O) const { - O << "('cputype', " << Header.CPUType << ")\n"; - O << "('cpusubtype', " << Header.CPUSubtype << ")\n"; - O << "('filetype', " << Header.FileType << ")\n"; - O << "('num_load_commands', " << Header.NumLoadCommands << ")\n"; - O << "('load_commands_size', " << Header.SizeOfLoadCommands << ")\n"; - O << "('flag', " << Header.Flags << ")\n"; - - // Print extended header if 64-bit. - if (is64Bit()) - O << "('reserved', " << Header64Ext.Reserved << ")\n"; -} - -void MachOObject::print(raw_ostream &O) const { - O << "Header:\n"; - printHeader(O); - O << "Load Commands:\n"; - - O << "Buffer:\n"; -} diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp index 6501df9..dfd8d3d 100644 --- a/lib/Object/MachOObjectFile.cpp +++ b/lib/Object/MachOObjectFile.cpp @@ -15,7 +15,9 @@ #include "llvm/Object/MachO.h" #include "llvm/ADT/Triple.h" #include "llvm/Object/MachOFormat.h" +#include "llvm/Support/DataExtractor.h" #include "llvm/Support/Format.h" +#include "llvm/Support/Host.h" #include "llvm/Support/MemoryBuffer.h" #include <cctype> #include <cstring> @@ -27,236 +29,560 @@ using namespace object; namespace llvm { namespace object { -MachOObjectFile::MachOObjectFile(MemoryBuffer *Object, MachOObject *MOO, - error_code &ec) - : ObjectFile(Binary::ID_MachO, Object, ec), - MachOObj(MOO), - RegisteredStringTable(std::numeric_limits<uint32_t>::max()) { - DataRefImpl DRI; - moveToNextSection(DRI); - uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands; - while (DRI.d.a < LoadCommandCount) { - Sections.push_back(DRI); - DRI.d.b++; - moveToNextSection(DRI); +struct SymbolTableEntryBase { + uint32_t StringIndex; + uint8_t Type; + uint8_t SectionIndex; + uint16_t Flags; +}; + +struct SectionBase { + char Name[16]; + char SegmentName[16]; +}; + +template<typename T> +static void SwapValue(T &Value) { + Value = sys::SwapByteOrder(Value); +} + +template<typename T> +static void SwapStruct(T &Value); + +template<> +void SwapStruct(macho::RelocationEntry &H) { + SwapValue(H.Word0); + SwapValue(H.Word1); +} + +template<> +void SwapStruct(macho::LoadCommand &L) { + SwapValue(L.Type); + SwapValue(L.Size); +} + +template<> +void SwapStruct(SymbolTableEntryBase &S) { + SwapValue(S.StringIndex); + SwapValue(S.Flags); +} + +template<> +void SwapStruct(macho::Section &S) { + SwapValue(S.Address); + SwapValue(S.Size); + SwapValue(S.Offset); + SwapValue(S.Align); + SwapValue(S.RelocationTableOffset); + SwapValue(S.NumRelocationTableEntries); + SwapValue(S.Flags); + SwapValue(S.Reserved1); + SwapValue(S.Reserved2); +} + +template<> +void SwapStruct(macho::Section64 &S) { + SwapValue(S.Address); + SwapValue(S.Size); + SwapValue(S.Offset); + SwapValue(S.Align); + SwapValue(S.RelocationTableOffset); + SwapValue(S.NumRelocationTableEntries); + SwapValue(S.Flags); + SwapValue(S.Reserved1); + SwapValue(S.Reserved2); + SwapValue(S.Reserved3); +} + +template<> +void SwapStruct(macho::SymbolTableEntry &S) { + SwapValue(S.StringIndex); + SwapValue(S.Flags); + SwapValue(S.Value); +} + +template<> +void SwapStruct(macho::Symbol64TableEntry &S) { + SwapValue(S.StringIndex); + SwapValue(S.Flags); + SwapValue(S.Value); +} + +template<> +void SwapStruct(macho::Header &H) { + SwapValue(H.Magic); + SwapValue(H.CPUType); + SwapValue(H.CPUSubtype); + SwapValue(H.FileType); + SwapValue(H.NumLoadCommands); + SwapValue(H.SizeOfLoadCommands); + SwapValue(H.Flags); +} + +template<> +void SwapStruct(macho::Header64Ext &E) { + SwapValue(E.Reserved); +} + +template<> +void SwapStruct(macho::SymtabLoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.SymbolTableOffset); + SwapValue(C.NumSymbolTableEntries); + SwapValue(C.StringTableOffset); + SwapValue(C.StringTableSize); +} + +template<> +void SwapStruct(macho::DysymtabLoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.LocalSymbolsIndex); + SwapValue(C.NumLocalSymbols); + SwapValue(C.ExternalSymbolsIndex); + SwapValue(C.NumExternalSymbols); + SwapValue(C.UndefinedSymbolsIndex); + SwapValue(C.NumUndefinedSymbols); + SwapValue(C.TOCOffset); + SwapValue(C.NumTOCEntries); + SwapValue(C.ModuleTableOffset); + SwapValue(C.NumModuleTableEntries); + SwapValue(C.ReferenceSymbolTableOffset); + SwapValue(C.NumReferencedSymbolTableEntries); + SwapValue(C.IndirectSymbolTableOffset); + SwapValue(C.NumIndirectSymbolTableEntries); + SwapValue(C.ExternalRelocationTableOffset); + SwapValue(C.NumExternalRelocationTableEntries); + SwapValue(C.LocalRelocationTableOffset); + SwapValue(C.NumLocalRelocationTableEntries); +} + +template<> +void SwapStruct(macho::LinkeditDataLoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.DataOffset); + SwapValue(C.DataSize); +} + +template<> +void SwapStruct(macho::SegmentLoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.VMAddress); + SwapValue(C.VMSize); + SwapValue(C.FileOffset); + SwapValue(C.FileSize); + SwapValue(C.MaxVMProtection); + SwapValue(C.InitialVMProtection); + SwapValue(C.NumSections); + SwapValue(C.Flags); +} + +template<> +void SwapStruct(macho::Segment64LoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.VMAddress); + SwapValue(C.VMSize); + SwapValue(C.FileOffset); + SwapValue(C.FileSize); + SwapValue(C.MaxVMProtection); + SwapValue(C.InitialVMProtection); + SwapValue(C.NumSections); + SwapValue(C.Flags); +} + +template<> +void SwapStruct(macho::IndirectSymbolTableEntry &C) { + SwapValue(C.Index); +} + +template<> +void SwapStruct(macho::LinkerOptionsLoadCommand &C) { + SwapValue(C.Type); + SwapValue(C.Size); + SwapValue(C.Count); +} + +template<> +void SwapStruct(macho::DataInCodeTableEntry &C) { + SwapValue(C.Offset); + SwapValue(C.Length); + SwapValue(C.Kind); +} + +template<typename T> +T getStruct(const MachOObjectFile *O, const char *P) { + T Cmd; + memcpy(&Cmd, P, sizeof(T)); + if (O->isLittleEndian() != sys::IsLittleEndianHost) + SwapStruct(Cmd); + return Cmd; +} + +static uint32_t +getSegmentLoadCommandNumSections(const MachOObjectFile *O, + const MachOObjectFile::LoadCommandInfo &L) { + if (O->is64Bit()) { + macho::Segment64LoadCommand S = O->getSegment64LoadCommand(L); + return S.NumSections; } + macho::SegmentLoadCommand S = O->getSegmentLoadCommand(L); + return S.NumSections; } +static const char * +getSectionPtr(const MachOObjectFile *O, MachOObjectFile::LoadCommandInfo L, + unsigned Sec) { + uintptr_t CommandAddr = reinterpret_cast<uintptr_t>(L.Ptr); -ObjectFile *ObjectFile::createMachOObjectFile(MemoryBuffer *Buffer) { + bool Is64 = O->is64Bit(); + unsigned SegmentLoadSize = Is64 ? sizeof(macho::Segment64LoadCommand) : + sizeof(macho::SegmentLoadCommand); + unsigned SectionSize = Is64 ? sizeof(macho::Section64) : + sizeof(macho::Section); + + uintptr_t SectionAddr = CommandAddr + SegmentLoadSize + Sec * SectionSize; + return reinterpret_cast<const char*>(SectionAddr); +} + +static const char *getPtr(const MachOObjectFile *O, size_t Offset) { + return O->getData().substr(Offset, 1).data(); +} + +static SymbolTableEntryBase +getSymbolTableEntryBase(const MachOObjectFile *O, DataRefImpl DRI) { + const char *P = reinterpret_cast<const char *>(DRI.p); + return getStruct<SymbolTableEntryBase>(O, P); +} + +static StringRef parseSegmentOrSectionName(const char *P) { + if (P[15] == 0) + // Null terminated. + return P; + // Not null terminated, so this is a 16 char string. + return StringRef(P, 16); +} + +// Helper to advance a section or symbol iterator multiple increments at a time. +template<class T> +static error_code advance(T &it, size_t Val) { error_code ec; - std::string Err; - MachOObject *MachOObj = MachOObject::LoadFromBuffer(Buffer, &Err); - if (!MachOObj) - return NULL; - // MachOObject takes ownership of the Buffer we passed to it, and - // MachOObjectFile does, too, so we need to make sure they don't get the - // same object. A MemoryBuffer is cheap (it's just a reference to memory, - // not a copy of the memory itself), so just make a new copy here for - // the MachOObjectFile. - MemoryBuffer *NewBuffer = - MemoryBuffer::getMemBuffer(Buffer->getBuffer(), - Buffer->getBufferIdentifier(), false); - return new MachOObjectFile(NewBuffer, MachOObj, ec); -} - -/*===-- Symbols -----------------------------------------------------------===*/ - -void MachOObjectFile::moveToNextSymbol(DataRefImpl &DRI) const { - uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands; - while (DRI.d.a < LoadCommandCount) { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - if (LCI.Command.Type == macho::LCT_Symtab) { - InMemoryStruct<macho::SymtabLoadCommand> SymtabLoadCmd; - MachOObj->ReadSymtabLoadCommand(LCI, SymtabLoadCmd); - if (DRI.d.b < SymtabLoadCmd->NumSymbolTableEntries) - return; + while (Val--) { + it.increment(ec); + } + return ec; +} + +template<class T> +static void advanceTo(T &it, size_t Val) { + if (error_code ec = advance(it, Val)) + report_fatal_error(ec.message()); +} + +static unsigned getCPUType(const MachOObjectFile *O) { + return O->getHeader().CPUType; +} + +static void printRelocationTargetName(const MachOObjectFile *O, + const macho::RelocationEntry &RE, + raw_string_ostream &fmt) { + bool IsScattered = O->isRelocationScattered(RE); + + // Target of a scattered relocation is an address. In the interest of + // generating pretty output, scan through the symbol table looking for a + // symbol that aligns with that address. If we find one, print it. + // Otherwise, we just print the hex address of the target. + if (IsScattered) { + uint32_t Val = O->getPlainRelocationSymbolNum(RE); + + error_code ec; + for (symbol_iterator SI = O->begin_symbols(), SE = O->end_symbols(); + SI != SE; SI.increment(ec)) { + if (ec) report_fatal_error(ec.message()); + + uint64_t Addr; + StringRef Name; + + if ((ec = SI->getAddress(Addr))) + report_fatal_error(ec.message()); + if (Addr != Val) continue; + if ((ec = SI->getName(Name))) + report_fatal_error(ec.message()); + fmt << Name; + return; + } + + // If we couldn't find a symbol that this relocation refers to, try + // to find a section beginning instead. + for (section_iterator SI = O->begin_sections(), SE = O->end_sections(); + SI != SE; SI.increment(ec)) { + if (ec) report_fatal_error(ec.message()); + + uint64_t Addr; + StringRef Name; + + if ((ec = SI->getAddress(Addr))) + report_fatal_error(ec.message()); + if (Addr != Val) continue; + if ((ec = SI->getName(Name))) + report_fatal_error(ec.message()); + fmt << Name; + return; } - DRI.d.a++; - DRI.d.b = 0; + fmt << format("0x%x", Val); + return; } -} -void MachOObjectFile::getSymbolTableEntry(DataRefImpl DRI, - InMemoryStruct<macho::SymbolTableEntry> &Res) const { - InMemoryStruct<macho::SymtabLoadCommand> SymtabLoadCmd; - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - MachOObj->ReadSymtabLoadCommand(LCI, SymtabLoadCmd); + StringRef S; + bool isExtern = O->getPlainRelocationExternal(RE); + uint64_t Val = O->getAnyRelocationAddress(RE); - if (RegisteredStringTable != DRI.d.a) { - MachOObj->RegisterStringTable(*SymtabLoadCmd); - RegisteredStringTable = DRI.d.a; + if (isExtern) { + symbol_iterator SI = O->begin_symbols(); + advanceTo(SI, Val); + SI->getName(S); + } else { + section_iterator SI = O->begin_sections(); + advanceTo(SI, Val); + SI->getName(S); } - MachOObj->ReadSymbolTableEntry(SymtabLoadCmd->SymbolTableOffset, DRI.d.b, - Res); + fmt << S; +} + +static uint32_t getPlainRelocationAddress(const macho::RelocationEntry &RE) { + return RE.Word0; +} + +static unsigned +getScatteredRelocationAddress(const macho::RelocationEntry &RE) { + return RE.Word0 & 0xffffff; +} + +static bool getPlainRelocationPCRel(const MachOObjectFile *O, + const macho::RelocationEntry &RE) { + if (O->isLittleEndian()) + return (RE.Word1 >> 24) & 1; + return (RE.Word1 >> 7) & 1; +} + +static bool +getScatteredRelocationPCRel(const MachOObjectFile *O, + const macho::RelocationEntry &RE) { + return (RE.Word0 >> 30) & 1; +} + +static unsigned getPlainRelocationLength(const MachOObjectFile *O, + const macho::RelocationEntry &RE) { + if (O->isLittleEndian()) + return (RE.Word1 >> 25) & 3; + return (RE.Word1 >> 5) & 3; +} + +static unsigned +getScatteredRelocationLength(const macho::RelocationEntry &RE) { + return (RE.Word0 >> 28) & 3; } -void MachOObjectFile::getSymbol64TableEntry(DataRefImpl DRI, - InMemoryStruct<macho::Symbol64TableEntry> &Res) const { - InMemoryStruct<macho::SymtabLoadCommand> SymtabLoadCmd; - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - MachOObj->ReadSymtabLoadCommand(LCI, SymtabLoadCmd); +static unsigned getPlainRelocationType(const MachOObjectFile *O, + const macho::RelocationEntry &RE) { + if (O->isLittleEndian()) + return RE.Word1 >> 28; + return RE.Word1 & 0xf; +} - if (RegisteredStringTable != DRI.d.a) { - MachOObj->RegisterStringTable(*SymtabLoadCmd); - RegisteredStringTable = DRI.d.a; +static unsigned getScatteredRelocationType(const macho::RelocationEntry &RE) { + return (RE.Word0 >> 24) & 0xf; +} + +static uint32_t getSectionFlags(const MachOObjectFile *O, + DataRefImpl Sec) { + if (O->is64Bit()) { + macho::Section64 Sect = O->getSection64(Sec); + return Sect.Flags; } + macho::Section Sect = O->getSection(Sec); + return Sect.Flags; +} + +MachOObjectFile::MachOObjectFile(MemoryBuffer *Object, + bool IsLittleEndian, bool Is64bits, + error_code &ec) + : ObjectFile(getMachOType(IsLittleEndian, Is64bits), Object), + SymtabLoadCmd(NULL), DysymtabLoadCmd(NULL) { + uint32_t LoadCommandCount = this->getHeader().NumLoadCommands; + macho::LoadCommandType SegmentLoadType = is64Bit() ? + macho::LCT_Segment64 : macho::LCT_Segment; + + MachOObjectFile::LoadCommandInfo Load = getFirstLoadCommandInfo(); + for (unsigned I = 0; ; ++I) { + if (Load.C.Type == macho::LCT_Symtab) { + assert(!SymtabLoadCmd && "Multiple symbol tables"); + SymtabLoadCmd = Load.Ptr; + } else if (Load.C.Type == macho::LCT_Dysymtab) { + assert(!DysymtabLoadCmd && "Multiple dynamic symbol tables"); + DysymtabLoadCmd = Load.Ptr; + } else if (Load.C.Type == SegmentLoadType) { + uint32_t NumSections = getSegmentLoadCommandNumSections(this, Load); + for (unsigned J = 0; J < NumSections; ++J) { + const char *Sec = getSectionPtr(this, Load, J); + Sections.push_back(Sec); + } + } - MachOObj->ReadSymbol64TableEntry(SymtabLoadCmd->SymbolTableOffset, DRI.d.b, - Res); + if (I == LoadCommandCount - 1) + break; + else + Load = getNextLoadCommandInfo(Load); + } } +error_code MachOObjectFile::getSymbolNext(DataRefImpl Symb, + SymbolRef &Res) const { + unsigned SymbolTableEntrySize = is64Bit() ? + sizeof(macho::Symbol64TableEntry) : + sizeof(macho::SymbolTableEntry); + Symb.p += SymbolTableEntrySize; + Res = SymbolRef(Symb, this); + return object_error::success; +} -error_code MachOObjectFile::getSymbolNext(DataRefImpl DRI, - SymbolRef &Result) const { - DRI.d.b++; - moveToNextSymbol(DRI); - Result = SymbolRef(DRI, this); +error_code MachOObjectFile::getSymbolName(DataRefImpl Symb, + StringRef &Res) const { + StringRef StringTable = getStringTableData(); + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, Symb); + const char *Start = &StringTable.data()[Entry.StringIndex]; + Res = StringRef(Start); return object_error::success; } -error_code MachOObjectFile::getSymbolName(DataRefImpl DRI, - StringRef &Result) const { - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - Result = MachOObj->getStringAtIndex(Entry->StringIndex); +error_code MachOObjectFile::getSymbolAddress(DataRefImpl Symb, + uint64_t &Res) const { + if (is64Bit()) { + macho::Symbol64TableEntry Entry = getSymbol64TableEntry(Symb); + Res = Entry.Value; } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - Result = MachOObj->getStringAtIndex(Entry->StringIndex); + macho::SymbolTableEntry Entry = getSymbolTableEntry(Symb); + Res = Entry.Value; } return object_error::success; } -error_code MachOObjectFile::getSymbolFileOffset(DataRefImpl DRI, - uint64_t &Result) const { - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - Result = Entry->Value; - if (Entry->SectionIndex) { - InMemoryStruct<macho::Section64> Section; - getSection64(Sections[Entry->SectionIndex-1], Section); - Result += Section->Offset - Section->Address; - } - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - Result = Entry->Value; - if (Entry->SectionIndex) { - InMemoryStruct<macho::Section> Section; - getSection(Sections[Entry->SectionIndex-1], Section); - Result += Section->Offset - Section->Address; +error_code +MachOObjectFile::getSymbolFileOffset(DataRefImpl Symb, + uint64_t &Res) const { + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, Symb); + getSymbolAddress(Symb, Res); + if (Entry.SectionIndex) { + uint64_t Delta; + DataRefImpl SecRel; + SecRel.d.a = Entry.SectionIndex-1; + if (is64Bit()) { + macho::Section64 Sec = getSection64(SecRel); + Delta = Sec.Offset - Sec.Address; + } else { + macho::Section Sec = getSection(SecRel); + Delta = Sec.Offset - Sec.Address; } + + Res += Delta; } return object_error::success; } -error_code MachOObjectFile::getSymbolAddress(DataRefImpl DRI, - uint64_t &Result) const { - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - Result = Entry->Value; +error_code MachOObjectFile::getSymbolAlignment(DataRefImpl DRI, + uint32_t &Result) const { + uint32_t flags; + this->getSymbolFlags(DRI, flags); + if (flags & SymbolRef::SF_Common) { + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, DRI); + Result = 1 << MachO::GET_COMM_ALIGN(Entry.Flags); } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - Result = Entry->Value; + Result = 0; } return object_error::success; } error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI, uint64_t &Result) const { - uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands; uint64_t BeginOffset; uint64_t EndOffset = 0; uint8_t SectionIndex; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - BeginOffset = Entry->Value; - SectionIndex = Entry->SectionIndex; - if (!SectionIndex) { - uint32_t flags = SymbolRef::SF_None; - getSymbolFlags(DRI, flags); - if (flags & SymbolRef::SF_Common) - Result = Entry->Value; - else - Result = UnknownAddressOrSize; - return object_error::success; - } - // Unfortunately symbols are unsorted so we need to touch all - // symbols from load command - DRI.d.b = 0; - uint32_t Command = DRI.d.a; - while (Command == DRI.d.a) { - moveToNextSymbol(DRI); - if (DRI.d.a < LoadCommandCount) { - getSymbol64TableEntry(DRI, Entry); - if (Entry->SectionIndex == SectionIndex && Entry->Value > BeginOffset) - if (!EndOffset || Entry->Value < EndOffset) - EndOffset = Entry->Value; - } - DRI.d.b++; - } - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - BeginOffset = Entry->Value; - SectionIndex = Entry->SectionIndex; - if (!SectionIndex) { - uint32_t flags = SymbolRef::SF_None; - getSymbolFlags(DRI, flags); - if (flags & SymbolRef::SF_Common) - Result = Entry->Value; - else - Result = UnknownAddressOrSize; - return object_error::success; - } - // Unfortunately symbols are unsorted so we need to touch all - // symbols from load command - DRI.d.b = 0; - uint32_t Command = DRI.d.a; - while (Command == DRI.d.a) { - moveToNextSymbol(DRI); - if (DRI.d.a < LoadCommandCount) { - getSymbolTableEntry(DRI, Entry); - if (Entry->SectionIndex == SectionIndex && Entry->Value > BeginOffset) - if (!EndOffset || Entry->Value < EndOffset) - EndOffset = Entry->Value; - } - DRI.d.b++; - } + + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, DRI); + uint64_t Value; + getSymbolAddress(DRI, Value); + + BeginOffset = Value; + + SectionIndex = Entry.SectionIndex; + if (!SectionIndex) { + uint32_t flags = SymbolRef::SF_None; + this->getSymbolFlags(DRI, flags); + if (flags & SymbolRef::SF_Common) + Result = Value; + else + Result = UnknownAddressOrSize; + return object_error::success; + } + // Unfortunately symbols are unsorted so we need to touch all + // symbols from load command + error_code ec; + for (symbol_iterator I = begin_symbols(), E = end_symbols(); I != E; + I.increment(ec)) { + DataRefImpl DRI = I->getRawDataRefImpl(); + Entry = getSymbolTableEntryBase(this, DRI); + getSymbolAddress(DRI, Value); + if (Entry.SectionIndex == SectionIndex && Value > BeginOffset) + if (!EndOffset || Value < EndOffset) + EndOffset = Value; } if (!EndOffset) { uint64_t Size; - getSectionSize(Sections[SectionIndex-1], Size); - getSectionAddress(Sections[SectionIndex-1], EndOffset); + DataRefImpl Sec; + Sec.d.a = SectionIndex-1; + getSectionSize(Sec, Size); + getSectionAddress(Sec, EndOffset); EndOffset += Size; } Result = EndOffset - BeginOffset; return object_error::success; } -error_code MachOObjectFile::getSymbolNMTypeChar(DataRefImpl DRI, - char &Result) const { - uint8_t Type, Flags; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - Type = Entry->Type; - Flags = Entry->Flags; - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - Type = Entry->Type; - Flags = Entry->Flags; +error_code MachOObjectFile::getSymbolType(DataRefImpl Symb, + SymbolRef::Type &Res) const { + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, Symb); + uint8_t n_type = Entry.Type; + + Res = SymbolRef::ST_Other; + + // If this is a STAB debugging symbol, we can do nothing more. + if (n_type & MachO::NlistMaskStab) { + Res = SymbolRef::ST_Debug; + return object_error::success; + } + + switch (n_type & MachO::NlistMaskType) { + case MachO::NListTypeUndefined : + Res = SymbolRef::ST_Unknown; + break; + case MachO::NListTypeSection : + Res = SymbolRef::ST_Function; + break; } + return object_error::success; +} + +error_code MachOObjectFile::getSymbolNMTypeChar(DataRefImpl Symb, + char &Res) const { + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, Symb); + uint8_t Type = Entry.Type; + uint16_t Flags = Entry.Flags; char Char; switch (Type & macho::STF_TypeMask) { @@ -274,25 +600,16 @@ error_code MachOObjectFile::getSymbolNMTypeChar(DataRefImpl DRI, if (Flags & (macho::STF_External | macho::STF_PrivateExtern)) Char = toupper(static_cast<unsigned char>(Char)); - Result = Char; + Res = Char; return object_error::success; } error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI, uint32_t &Result) const { - uint16_t MachOFlags; - uint8_t MachOType; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(DRI, Entry); - MachOFlags = Entry->Flags; - MachOType = Entry->Type; - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(DRI, Entry); - MachOFlags = Entry->Flags; - MachOType = Entry->Type; - } + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, DRI); + + uint8_t MachOType = Entry.Type; + uint16_t MachOFlags = Entry.Flags; // TODO: Correctly set SF_ThreadLocal Result = SymbolRef::SF_None; @@ -305,8 +622,12 @@ error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI, if (MachOType & MachO::NlistMaskExternal) { Result |= SymbolRef::SF_Global; - if ((MachOType & MachO::NlistMaskType) == MachO::NListTypeUndefined) - Result |= SymbolRef::SF_Common; + if ((MachOType & MachO::NlistMaskType) == MachO::NListTypeUndefined) { + uint64_t Value; + getSymbolAddress(DRI, Value); + if (Value) + Result |= SymbolRef::SF_Common; + } } if (MachOFlags & (MachO::NListDescWeakRef | MachO::NListDescWeakDef)) @@ -318,55 +639,20 @@ error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI, return object_error::success; } -error_code MachOObjectFile::getSymbolSection(DataRefImpl Symb, - section_iterator &Res) const { - uint8_t index; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(Symb, Entry); - index = Entry->SectionIndex; - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(Symb, Entry); - index = Entry->SectionIndex; - } +error_code +MachOObjectFile::getSymbolSection(DataRefImpl Symb, + section_iterator &Res) const { + SymbolTableEntryBase Entry = getSymbolTableEntryBase(this, Symb); + uint8_t index = Entry.SectionIndex; - if (index == 0) + if (index == 0) { Res = end_sections(); - else - Res = section_iterator(SectionRef(Sections[index-1], this)); - - return object_error::success; -} - -error_code MachOObjectFile::getSymbolType(DataRefImpl Symb, - SymbolRef::Type &Res) const { - uint8_t n_type; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(Symb, Entry); - n_type = Entry->Type; } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(Symb, Entry); - n_type = Entry->Type; - } - Res = SymbolRef::ST_Other; - - // If this is a STAB debugging symbol, we can do nothing more. - if (n_type & MachO::NlistMaskStab) { - Res = SymbolRef::ST_Debug; - return object_error::success; + DataRefImpl DRI; + DRI.d.a = index - 1; + Res = section_iterator(SectionRef(DRI, this)); } - switch (n_type & MachO::NlistMaskType) { - case MachO::NListTypeUndefined : - Res = SymbolRef::ST_Unknown; - break; - case MachO::NListTypeSection : - Res = SymbolRef::ST_Function; - break; - } return object_error::success; } @@ -375,242 +661,101 @@ error_code MachOObjectFile::getSymbolValue(DataRefImpl Symb, report_fatal_error("getSymbolValue unimplemented in MachOObjectFile"); } -symbol_iterator MachOObjectFile::begin_symbols() const { - // DRI.d.a = segment number; DRI.d.b = symbol index. - DataRefImpl DRI; - moveToNextSymbol(DRI); - return symbol_iterator(SymbolRef(DRI, this)); -} - -symbol_iterator MachOObjectFile::end_symbols() const { - DataRefImpl DRI; - DRI.d.a = MachOObj->getHeader().NumLoadCommands; - return symbol_iterator(SymbolRef(DRI, this)); -} - -symbol_iterator MachOObjectFile::begin_dynamic_symbols() const { - // TODO: implement - report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile"); -} - -symbol_iterator MachOObjectFile::end_dynamic_symbols() const { - // TODO: implement - report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile"); -} - -library_iterator MachOObjectFile::begin_libraries_needed() const { - // TODO: implement - report_fatal_error("Needed libraries unimplemented in MachOObjectFile"); -} - -library_iterator MachOObjectFile::end_libraries_needed() const { - // TODO: implement - report_fatal_error("Needed libraries unimplemented in MachOObjectFile"); -} - -StringRef MachOObjectFile::getLoadName() const { - // TODO: Implement - report_fatal_error("get_load_name() unimplemented in MachOObjectFile"); -} - -/*===-- Sections ----------------------------------------------------------===*/ - -void MachOObjectFile::moveToNextSection(DataRefImpl &DRI) const { - uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands; - while (DRI.d.a < LoadCommandCount) { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - if (LCI.Command.Type == macho::LCT_Segment) { - InMemoryStruct<macho::SegmentLoadCommand> SegmentLoadCmd; - MachOObj->ReadSegmentLoadCommand(LCI, SegmentLoadCmd); - if (DRI.d.b < SegmentLoadCmd->NumSections) - return; - } else if (LCI.Command.Type == macho::LCT_Segment64) { - InMemoryStruct<macho::Segment64LoadCommand> Segment64LoadCmd; - MachOObj->ReadSegment64LoadCommand(LCI, Segment64LoadCmd); - if (DRI.d.b < Segment64LoadCmd->NumSections) - return; - } - - DRI.d.a++; - DRI.d.b = 0; - } -} - -error_code MachOObjectFile::getSectionNext(DataRefImpl DRI, - SectionRef &Result) const { - DRI.d.b++; - moveToNextSection(DRI); - Result = SectionRef(DRI, this); +error_code MachOObjectFile::getSectionNext(DataRefImpl Sec, + SectionRef &Res) const { + Sec.d.a++; + Res = SectionRef(Sec, this); return object_error::success; } -void -MachOObjectFile::getSection(DataRefImpl DRI, - InMemoryStruct<macho::Section> &Res) const { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - MachOObj->ReadSection(LCI, DRI.d.b, Res); -} - -std::size_t MachOObjectFile::getSectionIndex(DataRefImpl Sec) const { - SectionList::const_iterator loc = - std::find(Sections.begin(), Sections.end(), Sec); - assert(loc != Sections.end() && "Sec is not a valid section!"); - return std::distance(Sections.begin(), loc); -} - -void -MachOObjectFile::getSection64(DataRefImpl DRI, - InMemoryStruct<macho::Section64> &Res) const { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - MachOObj->ReadSection64(LCI, DRI.d.b, Res); -} - -static bool is64BitLoadCommand(const MachOObject *MachOObj, DataRefImpl DRI) { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - if (LCI.Command.Type == macho::LCT_Segment64) - return true; - assert(LCI.Command.Type == macho::LCT_Segment && "Unexpected Type."); - return false; -} - -static StringRef parseSegmentOrSectionName(const char *P) { - if (P[15] == 0) - // Null terminated. - return P; - // Not null terminated, so this is a 16 char string. - return StringRef(P, 16); -} - -error_code MachOObjectFile::getSectionName(DataRefImpl DRI, - StringRef &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - unsigned SectionOffset = LCI.Offset + sizeof(macho::Segment64LoadCommand) + - DRI.d.b * sizeof(macho::Section64); - StringRef Data = MachOObj->getData(SectionOffset, sizeof(macho::Section64)); - const macho::Section64 *sec = - reinterpret_cast<const macho::Section64*>(Data.data()); - Result = parseSegmentOrSectionName(sec->Name); - } else { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a); - unsigned SectionOffset = LCI.Offset + sizeof(macho::SegmentLoadCommand) + - DRI.d.b * sizeof(macho::Section); - StringRef Data = MachOObj->getData(SectionOffset, sizeof(macho::Section)); - const macho::Section *sec = - reinterpret_cast<const macho::Section*>(Data.data()); - Result = parseSegmentOrSectionName(sec->Name); - } +error_code +MachOObjectFile::getSectionName(DataRefImpl Sec, StringRef &Result) const { + ArrayRef<char> Raw = getSectionRawName(Sec); + Result = parseSegmentOrSectionName(Raw.data()); return object_error::success; } -error_code MachOObjectFile::getSectionFinalSegmentName(DataRefImpl Sec, - StringRef &Res) const { - if (is64BitLoadCommand(MachOObj.get(), Sec)) { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(Sec.d.a); - unsigned SectionOffset = LCI.Offset + sizeof(macho::Segment64LoadCommand) + - Sec.d.b * sizeof(macho::Section64); - StringRef Data = MachOObj->getData(SectionOffset, sizeof(macho::Section64)); - const macho::Section64 *sec = - reinterpret_cast<const macho::Section64*>(Data.data()); - Res = parseSegmentOrSectionName(sec->SegmentName); +error_code +MachOObjectFile::getSectionAddress(DataRefImpl Sec, uint64_t &Res) const { + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Res = Sect.Address; } else { - LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(Sec.d.a); - unsigned SectionOffset = LCI.Offset + sizeof(macho::SegmentLoadCommand) + - Sec.d.b * sizeof(macho::Section); - StringRef Data = MachOObj->getData(SectionOffset, sizeof(macho::Section)); - const macho::Section *sec = - reinterpret_cast<const macho::Section*>(Data.data()); - Res = parseSegmentOrSectionName(sec->SegmentName); + macho::Section Sect = getSection(Sec); + Res = Sect.Address; } return object_error::success; } -error_code MachOObjectFile::getSectionAddress(DataRefImpl DRI, - uint64_t &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - Result = Sect->Address; +error_code +MachOObjectFile::getSectionSize(DataRefImpl Sec, uint64_t &Res) const { + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Res = Sect.Size; } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - Result = Sect->Address; + macho::Section Sect = getSection(Sec); + Res = Sect.Size; } - return object_error::success; -} -error_code MachOObjectFile::getSectionSize(DataRefImpl DRI, - uint64_t &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - Result = Sect->Size; - } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - Result = Sect->Size; - } return object_error::success; } -error_code MachOObjectFile::getSectionContents(DataRefImpl DRI, - StringRef &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - Result = MachOObj->getData(Sect->Offset, Sect->Size); +error_code +MachOObjectFile::getSectionContents(DataRefImpl Sec, StringRef &Res) const { + uint32_t Offset; + uint64_t Size; + + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Offset = Sect.Offset; + Size = Sect.Size; } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - Result = MachOObj->getData(Sect->Offset, Sect->Size); + macho::Section Sect =getSection(Sec); + Offset = Sect.Offset; + Size = Sect.Size; } + + Res = this->getData().substr(Offset, Size); return object_error::success; } -error_code MachOObjectFile::getSectionAlignment(DataRefImpl DRI, - uint64_t &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - Result = uint64_t(1) << Sect->Align; +error_code +MachOObjectFile::getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const { + uint32_t Align; + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Align = Sect.Align; } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - Result = uint64_t(1) << Sect->Align; + macho::Section Sect = getSection(Sec); + Align = Sect.Align; } + + Res = uint64_t(1) << Align; return object_error::success; } -error_code MachOObjectFile::isSectionText(DataRefImpl DRI, - bool &Result) const { - if (is64BitLoadCommand(MachOObj.get(), DRI)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - Result = Sect->Flags & macho::SF_PureInstructions; - } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - Result = Sect->Flags & macho::SF_PureInstructions; - } +error_code +MachOObjectFile::isSectionText(DataRefImpl Sec, bool &Res) const { + uint32_t Flags = getSectionFlags(this, Sec); + Res = Flags & macho::SF_PureInstructions; return object_error::success; } -error_code MachOObjectFile::isSectionData(DataRefImpl DRI, - bool &Result) const { +error_code MachOObjectFile::isSectionData(DataRefImpl DRI, bool &Result) const { // FIXME: Unimplemented. Result = false; return object_error::success; } -error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI, - bool &Result) const { +error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI, bool &Result) const { // FIXME: Unimplemented. Result = false; return object_error::success; } -error_code MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec, - bool &Result) const { +error_code +MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec, + bool &Result) const { // FIXME: Unimplemented. Result = true; return object_error::success; @@ -623,22 +768,12 @@ error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec, return object_error::success; } -error_code MachOObjectFile::isSectionZeroInit(DataRefImpl DRI, - bool &Result) const { - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Section64> Sect; - getSection64(DRI, Sect); - unsigned SectionType = Sect->Flags & MachO::SectionFlagMaskSectionType; - Result = (SectionType == MachO::SectionTypeZeroFill || - SectionType == MachO::SectionTypeZeroFillLarge); - } else { - InMemoryStruct<macho::Section> Sect; - getSection(DRI, Sect); - unsigned SectionType = Sect->Flags & MachO::SectionFlagMaskSectionType; - Result = (SectionType == MachO::SectionTypeZeroFill || - SectionType == MachO::SectionTypeZeroFillLarge); - } - +error_code +MachOObjectFile::isSectionZeroInit(DataRefImpl Sec, bool &Res) const { + uint32_t Flags = getSectionFlags(this, Sec); + unsigned SectionType = Flags & MachO::SectionFlagMaskSectionType; + Res = SectionType == MachO::SectionTypeZeroFill || + SectionType == MachO::SectionTypeZeroFillLarge; return object_error::success; } @@ -653,11 +788,11 @@ error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec, return object_error::success; } -error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, - DataRefImpl Symb, - bool &Result) const { +error_code +MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, + bool &Result) const { SymbolRef::Type ST; - getSymbolType(Symb, ST); + this->getSymbolType(Symb, ST); if (ST == SymbolRef::ST_Unknown) { Result = false; return object_error::success; @@ -668,164 +803,107 @@ error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, getSectionSize(Sec, SectEnd); SectEnd += SectBegin; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Symbol64TableEntry> Entry; - getSymbol64TableEntry(Symb, Entry); - uint64_t SymAddr= Entry->Value; - Result = (SymAddr >= SectBegin) && (SymAddr < SectEnd); - } else { - InMemoryStruct<macho::SymbolTableEntry> Entry; - getSymbolTableEntry(Symb, Entry); - uint64_t SymAddr= Entry->Value; - Result = (SymAddr >= SectBegin) && (SymAddr < SectEnd); - } + uint64_t SymAddr; + getSymbolAddress(Symb, SymAddr); + Result = (SymAddr >= SectBegin) && (SymAddr < SectEnd); return object_error::success; } relocation_iterator MachOObjectFile::getSectionRelBegin(DataRefImpl Sec) const { - DataRefImpl ret; - ret.d.b = getSectionIndex(Sec); - return relocation_iterator(RelocationRef(ret, this)); -} -relocation_iterator MachOObjectFile::getSectionRelEnd(DataRefImpl Sec) const { - uint32_t last_reloc; - if (is64BitLoadCommand(MachOObj.get(), Sec)) { - InMemoryStruct<macho::Section64> Sect; - getSection64(Sec, Sect); - last_reloc = Sect->NumRelocationTableEntries; + uint32_t Offset; + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Offset = Sect.RelocationTableOffset; } else { - InMemoryStruct<macho::Section> Sect; - getSection(Sec, Sect); - last_reloc = Sect->NumRelocationTableEntries; + macho::Section Sect = getSection(Sec); + Offset = Sect.RelocationTableOffset; } - DataRefImpl ret; - ret.d.a = last_reloc; - ret.d.b = getSectionIndex(Sec); - return relocation_iterator(RelocationRef(ret, this)); -} - -section_iterator MachOObjectFile::begin_sections() const { - DataRefImpl DRI; - moveToNextSection(DRI); - return section_iterator(SectionRef(DRI, this)); -} -section_iterator MachOObjectFile::end_sections() const { - DataRefImpl DRI; - DRI.d.a = MachOObj->getHeader().NumLoadCommands; - return section_iterator(SectionRef(DRI, this)); + DataRefImpl Ret; + Ret.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); + return relocation_iterator(RelocationRef(Ret, this)); } -/*===-- Relocations -------------------------------------------------------===*/ - -void MachOObjectFile:: -getRelocation(DataRefImpl Rel, - InMemoryStruct<macho::RelocationEntry> &Res) const { - uint32_t relOffset; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Section64> Sect; - getSection64(Sections[Rel.d.b], Sect); - relOffset = Sect->RelocationTableOffset; +relocation_iterator +MachOObjectFile::getSectionRelEnd(DataRefImpl Sec) const { + uint32_t Offset; + uint32_t Num; + if (is64Bit()) { + macho::Section64 Sect = getSection64(Sec); + Offset = Sect.RelocationTableOffset; + Num = Sect.NumRelocationTableEntries; } else { - InMemoryStruct<macho::Section> Sect; - getSection(Sections[Rel.d.b], Sect); - relOffset = Sect->RelocationTableOffset; + macho::Section Sect = getSection(Sec); + Offset = Sect.RelocationTableOffset; + Num = Sect.NumRelocationTableEntries; } - MachOObj->ReadRelocationEntry(relOffset, Rel.d.a, Res); + + const macho::RelocationEntry *P = + reinterpret_cast<const macho::RelocationEntry*>(getPtr(this, Offset)); + + DataRefImpl Ret; + Ret.p = reinterpret_cast<uintptr_t>(P + Num); + return relocation_iterator(RelocationRef(Ret, this)); } + error_code MachOObjectFile::getRelocationNext(DataRefImpl Rel, RelocationRef &Res) const { - ++Rel.d.a; + const macho::RelocationEntry *P = + reinterpret_cast<const macho::RelocationEntry *>(Rel.p); + Rel.p = reinterpret_cast<uintptr_t>(P + 1); Res = RelocationRef(Rel, this); return object_error::success; } -error_code MachOObjectFile::getRelocationAddress(DataRefImpl Rel, - uint64_t &Res) const { - const uint8_t* sectAddress = 0; - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Section64> Sect; - getSection64(Sections[Rel.d.b], Sect); - sectAddress += Sect->Address; - } else { - InMemoryStruct<macho::Section> Sect; - getSection(Sections[Rel.d.b], Sect); - sectAddress += Sect->Address; - } - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - - unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); - uint64_t RelAddr = 0; - if (isScattered) - RelAddr = RE->Word0 & 0xFFFFFF; - else - RelAddr = RE->Word0; - Res = reinterpret_cast<uintptr_t>(sectAddress + RelAddr); - return object_error::success; +error_code +MachOObjectFile::getRelocationAddress(DataRefImpl Rel, uint64_t &Res) const { + report_fatal_error("getRelocationAddress not implemented in MachOObjectFile"); } + error_code MachOObjectFile::getRelocationOffset(DataRefImpl Rel, uint64_t &Res) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - - unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); - if (isScattered) - Res = RE->Word0 & 0xFFFFFF; - else - Res = RE->Word0; + macho::RelocationEntry RE = getRelocation(Rel); + Res = getAnyRelocationAddress(RE); return object_error::success; } -error_code MachOObjectFile::getRelocationSymbol(DataRefImpl Rel, - SymbolRef &Res) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - uint32_t SymbolIdx = RE->Word1 & 0xffffff; - bool isExtern = (RE->Word1 >> 27) & 1; - DataRefImpl Sym; - moveToNextSymbol(Sym); - if (isExtern) { - for (unsigned i = 0; i < SymbolIdx; i++) { - Sym.d.b++; - moveToNextSymbol(Sym); - assert(Sym.d.a < MachOObj->getHeader().NumLoadCommands && - "Relocation symbol index out of range!"); - } +error_code +MachOObjectFile::getRelocationSymbol(DataRefImpl Rel, SymbolRef &Res) const { + macho::RelocationEntry RE = getRelocation(Rel); + uint32_t SymbolIdx = getPlainRelocationSymbolNum(RE); + bool isExtern = getPlainRelocationExternal(RE); + if (!isExtern) { + Res = *end_symbols(); + return object_error::success; } + + macho::SymtabLoadCommand S = getSymtabLoadCommand(); + unsigned SymbolTableEntrySize = is64Bit() ? + sizeof(macho::Symbol64TableEntry) : + sizeof(macho::SymbolTableEntry); + uint64_t Offset = S.SymbolTableOffset + SymbolIdx * SymbolTableEntrySize; + DataRefImpl Sym; + Sym.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); Res = SymbolRef(Sym, this); return object_error::success; } + error_code MachOObjectFile::getRelocationType(DataRefImpl Rel, uint64_t &Res) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - Res = RE->Word0; - Res <<= 32; - Res |= RE->Word1; + macho::RelocationEntry RE = getRelocation(Rel); + Res = getAnyRelocationType(RE); return object_error::success; } -error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, - SmallVectorImpl<char> &Result) const { - // TODO: Support scattered relocations. - StringRef res; - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); +error_code +MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, + SmallVectorImpl<char> &Result) const { + StringRef res; + uint64_t RType; + getRelocationType(Rel, RType); - unsigned r_type; - if (isScattered) - r_type = (RE->Word0 >> 24) & 0xF; - else - r_type = (RE->Word1 >> 28) & 0xF; + unsigned Arch = this->getArch(); switch (Arch) { case Triple::x86: { @@ -837,10 +915,10 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, "GENERIC_RELOC_LOCAL_SECTDIFF", "GENERIC_RELOC_TLV" }; - if (r_type > 6) + if (RType > 6) res = "Unknown"; else - res = Table[r_type]; + res = Table[RType]; break; } case Triple::x86_64: { @@ -856,10 +934,10 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, "X86_64_RELOC_SIGNED_4", "X86_64_RELOC_TLV" }; - if (r_type > 9) + if (RType > 9) res = "Unknown"; else - res = Table[r_type]; + res = Table[RType]; break; } case Triple::arm: { @@ -875,10 +953,10 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, "ARM_RELOC_HALF", "ARM_RELOC_HALF_SECTDIFF" }; - if (r_type > 9) + if (RType > 9) res = "Unknown"; else - res = Table[r_type]; + res = Table[RType]; break; } case Triple::ppc: { @@ -900,7 +978,7 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, "PPC_RELOC_LO14_SECTDIFF", "PPC_RELOC_LOCAL_SECTDIFF" }; - res = Table[r_type]; + res = Table[RType]; break; } case Triple::UnknownArch: @@ -910,193 +988,79 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel, Result.append(res.begin(), res.end()); return object_error::success; } + error_code MachOObjectFile::getRelocationAdditionalInfo(DataRefImpl Rel, int64_t &Res) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - bool isExtern = (RE->Word1 >> 27) & 1; Res = 0; - if (!isExtern) { - const uint8_t* sectAddress = base(); - if (MachOObj->is64Bit()) { - InMemoryStruct<macho::Section64> Sect; - getSection64(Sections[Rel.d.b], Sect); - sectAddress += Sect->Offset; - } else { - InMemoryStruct<macho::Section> Sect; - getSection(Sections[Rel.d.b], Sect); - sectAddress += Sect->Offset; - } - Res = reinterpret_cast<uintptr_t>(sectAddress); - } return object_error::success; } -// Helper to advance a section or symbol iterator multiple increments at a time. -template<class T> -error_code advance(T &it, size_t Val) { - error_code ec; - while (Val--) { - it.increment(ec); - } - return ec; -} - -template<class T> -void advanceTo(T &it, size_t Val) { - if (error_code ec = advance(it, Val)) - report_fatal_error(ec.message()); -} - -void MachOObjectFile::printRelocationTargetName( - InMemoryStruct<macho::RelocationEntry>& RE, - raw_string_ostream &fmt) const { - unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); - - // Target of a scattered relocation is an address. In the interest of - // generating pretty output, scan through the symbol table looking for a - // symbol that aligns with that address. If we find one, print it. - // Otherwise, we just print the hex address of the target. - if (isScattered) { - uint32_t Val = RE->Word1; - - error_code ec; - for (symbol_iterator SI = begin_symbols(), SE = end_symbols(); SI != SE; - SI.increment(ec)) { - if (ec) report_fatal_error(ec.message()); - - uint64_t Addr; - StringRef Name; - - if ((ec = SI->getAddress(Addr))) - report_fatal_error(ec.message()); - if (Addr != Val) continue; - if ((ec = SI->getName(Name))) - report_fatal_error(ec.message()); - fmt << Name; - return; - } - - // If we couldn't find a symbol that this relocation refers to, try - // to find a section beginning instead. - for (section_iterator SI = begin_sections(), SE = end_sections(); SI != SE; - SI.increment(ec)) { - if (ec) report_fatal_error(ec.message()); - - uint64_t Addr; - StringRef Name; - - if ((ec = SI->getAddress(Addr))) - report_fatal_error(ec.message()); - if (Addr != Val) continue; - if ((ec = SI->getName(Name))) - report_fatal_error(ec.message()); - fmt << Name; - return; - } - - fmt << format("0x%x", Val); - return; - } - - StringRef S; - bool isExtern = (RE->Word1 >> 27) & 1; - uint32_t Val = RE->Word1 & 0xFFFFFF; - - if (isExtern) { - symbol_iterator SI = begin_symbols(); - advanceTo(SI, Val); - SI->getName(S); - } else { - section_iterator SI = begin_sections(); - advanceTo(SI, Val); - SI->getName(S); - } - - fmt << S; -} - -error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel, +error_code +MachOObjectFile::getRelocationValueString(DataRefImpl Rel, SmallVectorImpl<char> &Result) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); + macho::RelocationEntry RE = getRelocation(Rel); - unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); + unsigned Arch = this->getArch(); std::string fmtbuf; raw_string_ostream fmt(fmtbuf); - - unsigned Type; - if (isScattered) - Type = (RE->Word0 >> 24) & 0xF; - else - Type = (RE->Word1 >> 28) & 0xF; - - bool isPCRel; - if (isScattered) - isPCRel = ((RE->Word0 >> 30) & 1); - else - isPCRel = ((RE->Word1 >> 24) & 1); + unsigned Type = this->getAnyRelocationType(RE); + bool IsPCRel = this->getAnyRelocationPCRel(RE); // Determine any addends that should be displayed with the relocation. // These require decoding the relocation type, which is triple-specific. // X86_64 has entirely custom relocation types. if (Arch == Triple::x86_64) { - bool isPCRel = ((RE->Word1 >> 24) & 1); + bool isPCRel = getAnyRelocationPCRel(RE); switch (Type) { case macho::RIT_X86_64_GOTLoad: // X86_64_RELOC_GOT_LOAD case macho::RIT_X86_64_GOT: { // X86_64_RELOC_GOT - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "@GOT"; if (isPCRel) fmt << "PCREL"; break; } case macho::RIT_X86_64_Subtractor: { // X86_64_RELOC_SUBTRACTOR - InMemoryStruct<macho::RelocationEntry> RENext; DataRefImpl RelNext = Rel; RelNext.d.a++; - getRelocation(RelNext, RENext); + macho::RelocationEntry RENext = getRelocation(RelNext); // X86_64_SUBTRACTOR must be followed by a relocation of type // X86_64_RELOC_UNSIGNED. // NOTE: Scattered relocations don't exist on x86_64. - unsigned RType = (RENext->Word1 >> 28) & 0xF; + unsigned RType = getAnyRelocationType(RENext); if (RType != 0) report_fatal_error("Expected X86_64_RELOC_UNSIGNED after " "X86_64_RELOC_SUBTRACTOR."); // The X86_64_RELOC_UNSIGNED contains the minuend symbol, // X86_64_SUBTRACTOR contains to the subtrahend. - printRelocationTargetName(RENext, fmt); + printRelocationTargetName(this, RENext, fmt); fmt << "-"; - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); break; } case macho::RIT_X86_64_TLV: - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "@TLV"; if (isPCRel) fmt << "P"; break; case macho::RIT_X86_64_Signed1: // X86_64_RELOC_SIGNED1 - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "-1"; break; case macho::RIT_X86_64_Signed2: // X86_64_RELOC_SIGNED2 - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "-2"; break; case macho::RIT_X86_64_Signed4: // X86_64_RELOC_SIGNED4 - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "-4"; break; default: - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); break; } // X86 and ARM share some relocation types in common. @@ -1106,27 +1070,21 @@ error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel, case macho::RIT_Pair: // GENERIC_RELOC_PAIR - prints no info return object_error::success; case macho::RIT_Difference: { // GENERIC_RELOC_SECTDIFF - InMemoryStruct<macho::RelocationEntry> RENext; DataRefImpl RelNext = Rel; RelNext.d.a++; - getRelocation(RelNext, RENext); + macho::RelocationEntry RENext = getRelocation(RelNext); // X86 sect diff's must be followed by a relocation of type // GENERIC_RELOC_PAIR. - bool isNextScattered = (Arch != Triple::x86_64) && - (RENext->Word0 & macho::RF_Scattered); - unsigned RType; - if (isNextScattered) - RType = (RENext->Word0 >> 24) & 0xF; - else - RType = (RENext->Word1 >> 28) & 0xF; + unsigned RType = getAnyRelocationType(RENext); + if (RType != 1) report_fatal_error("Expected GENERIC_RELOC_PAIR after " "GENERIC_RELOC_SECTDIFF."); - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "-"; - printRelocationTargetName(RENext, fmt); + printRelocationTargetName(this, RENext, fmt); break; } } @@ -1136,37 +1094,30 @@ error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel, // handled in the generic code. switch (Type) { case macho::RIT_Generic_LocalDifference:{// GENERIC_RELOC_LOCAL_SECTDIFF - InMemoryStruct<macho::RelocationEntry> RENext; DataRefImpl RelNext = Rel; RelNext.d.a++; - getRelocation(RelNext, RENext); + macho::RelocationEntry RENext = getRelocation(RelNext); // X86 sect diff's must be followed by a relocation of type // GENERIC_RELOC_PAIR. - bool isNextScattered = (Arch != Triple::x86_64) && - (RENext->Word0 & macho::RF_Scattered); - unsigned RType; - if (isNextScattered) - RType = (RENext->Word0 >> 24) & 0xF; - else - RType = (RENext->Word1 >> 28) & 0xF; + unsigned RType = getAnyRelocationType(RENext); if (RType != 1) report_fatal_error("Expected GENERIC_RELOC_PAIR after " "GENERIC_RELOC_LOCAL_SECTDIFF."); - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "-"; - printRelocationTargetName(RENext, fmt); + printRelocationTargetName(this, RENext, fmt); break; } case macho::RIT_Generic_TLV: { - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt << "@TLV"; - if (isPCRel) fmt << "P"; + if (IsPCRel) fmt << "P"; break; } default: - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); } } else { // ARM-specific relocations switch (Type) { @@ -1174,33 +1125,21 @@ error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel, case macho::RIT_ARM_HalfDifference: { // ARM_RELOC_HALF_SECTDIFF // Half relocations steal a bit from the length field to encode // whether this is an upper16 or a lower16 relocation. - bool isUpper; - if (isScattered) - isUpper = (RE->Word0 >> 28) & 1; - else - isUpper = (RE->Word1 >> 25) & 1; + bool isUpper = getAnyRelocationLength(RE) >> 1; if (isUpper) fmt << ":upper16:("; else fmt << ":lower16:("; - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); - InMemoryStruct<macho::RelocationEntry> RENext; DataRefImpl RelNext = Rel; RelNext.d.a++; - getRelocation(RelNext, RENext); + macho::RelocationEntry RENext = getRelocation(RelNext); // ARM half relocs must be followed by a relocation of type // ARM_RELOC_PAIR. - bool isNextScattered = (Arch != Triple::x86_64) && - (RENext->Word0 & macho::RF_Scattered); - unsigned RType; - if (isNextScattered) - RType = (RENext->Word0 >> 24) & 0xF; - else - RType = (RENext->Word1 >> 28) & 0xF; - + unsigned RType = getAnyRelocationType(RENext); if (RType != 1) report_fatal_error("Expected ARM_RELOC_PAIR after " "GENERIC_RELOC_HALF"); @@ -1214,38 +1153,30 @@ error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel, // symbol/section pointer of the follow-on relocation. if (Type == macho::RIT_ARM_HalfDifference) { fmt << "-"; - printRelocationTargetName(RENext, fmt); + printRelocationTargetName(this, RENext, fmt); } fmt << ")"; break; } default: { - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); } } } } else - printRelocationTargetName(RE, fmt); + printRelocationTargetName(this, RE, fmt); fmt.flush(); Result.append(fmtbuf.begin(), fmtbuf.end()); return object_error::success; } -error_code MachOObjectFile::getRelocationHidden(DataRefImpl Rel, - bool &Result) const { - InMemoryStruct<macho::RelocationEntry> RE; - getRelocation(Rel, RE); - +error_code +MachOObjectFile::getRelocationHidden(DataRefImpl Rel, bool &Result) const { unsigned Arch = getArch(); - bool isScattered = (Arch != Triple::x86_64) && - (RE->Word0 & macho::RF_Scattered); - unsigned Type; - if (isScattered) - Type = (RE->Word0 >> 24) & 0xF; - else - Type = (RE->Word1 >> 28) & 0xF; + uint64_t Type; + getRelocationType(Rel, Type); Result = false; @@ -1259,12 +1190,10 @@ error_code MachOObjectFile::getRelocationHidden(DataRefImpl Rel, if (Type == macho::RIT_X86_64_Unsigned && Rel.d.a > 0) { DataRefImpl RelPrev = Rel; RelPrev.d.a--; - InMemoryStruct<macho::RelocationEntry> REPrev; - getRelocation(RelPrev, REPrev); - - unsigned PrevType = (REPrev->Word1 >> 28) & 0xF; - - if (PrevType == macho::RIT_X86_64_Subtractor) Result = true; + uint64_t PrevType; + getRelocationType(RelPrev, PrevType); + if (PrevType == macho::RIT_X86_64_Subtractor) + Result = true; } } @@ -1281,16 +1210,70 @@ error_code MachOObjectFile::getLibraryPath(DataRefImpl LibData, report_fatal_error("Needed libraries unimplemented in MachOObjectFile"); } +symbol_iterator MachOObjectFile::begin_symbols() const { + DataRefImpl DRI; + if (!SymtabLoadCmd) + return symbol_iterator(SymbolRef(DRI, this)); + + macho::SymtabLoadCommand Symtab = getSymtabLoadCommand(); + DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Symtab.SymbolTableOffset)); + return symbol_iterator(SymbolRef(DRI, this)); +} + +symbol_iterator MachOObjectFile::end_symbols() const { + DataRefImpl DRI; + if (!SymtabLoadCmd) + return symbol_iterator(SymbolRef(DRI, this)); + + macho::SymtabLoadCommand Symtab = getSymtabLoadCommand(); + unsigned SymbolTableEntrySize = is64Bit() ? + sizeof(macho::Symbol64TableEntry) : + sizeof(macho::SymbolTableEntry); + unsigned Offset = Symtab.SymbolTableOffset + + Symtab.NumSymbolTableEntries * SymbolTableEntrySize; + DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); + return symbol_iterator(SymbolRef(DRI, this)); +} + +symbol_iterator MachOObjectFile::begin_dynamic_symbols() const { + // TODO: implement + report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile"); +} + +symbol_iterator MachOObjectFile::end_dynamic_symbols() const { + // TODO: implement + report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile"); +} + +section_iterator MachOObjectFile::begin_sections() const { + DataRefImpl DRI; + return section_iterator(SectionRef(DRI, this)); +} + +section_iterator MachOObjectFile::end_sections() const { + DataRefImpl DRI; + DRI.d.a = Sections.size(); + return section_iterator(SectionRef(DRI, this)); +} -/*===-- Miscellaneous -----------------------------------------------------===*/ +library_iterator MachOObjectFile::begin_libraries_needed() const { + // TODO: implement + report_fatal_error("Needed libraries unimplemented in MachOObjectFile"); +} + +library_iterator MachOObjectFile::end_libraries_needed() const { + // TODO: implement + report_fatal_error("Needed libraries unimplemented in MachOObjectFile"); +} uint8_t MachOObjectFile::getBytesInAddress() const { - return MachOObj->is64Bit() ? 8 : 4; + return is64Bit() ? 8 : 4; } StringRef MachOObjectFile::getFileFormatName() const { - if (!MachOObj->is64Bit()) { - switch (MachOObj->getHeader().CPUType) { + unsigned CPUType = getCPUType(this); + if (!is64Bit()) { + switch (CPUType) { case llvm::MachO::CPUTypeI386: return "Mach-O 32-bit i386"; case llvm::MachO::CPUTypeARM: @@ -1298,18 +1281,18 @@ StringRef MachOObjectFile::getFileFormatName() const { case llvm::MachO::CPUTypePowerPC: return "Mach-O 32-bit ppc"; default: - assert((MachOObj->getHeader().CPUType & llvm::MachO::CPUArchABI64) == 0 && + assert((CPUType & llvm::MachO::CPUArchABI64) == 0 && "64-bit object file when we're not 64-bit?"); return "Mach-O 32-bit unknown"; } } // Make sure the cpu type has the correct mask. - assert((MachOObj->getHeader().CPUType & llvm::MachO::CPUArchABI64) + assert((CPUType & llvm::MachO::CPUArchABI64) == llvm::MachO::CPUArchABI64 && "32-bit object file when we're 64-bit?"); - switch (MachOObj->getHeader().CPUType) { + switch (CPUType) { case llvm::MachO::CPUTypeX86_64: return "Mach-O 64-bit x86-64"; case llvm::MachO::CPUTypePowerPC64: @@ -1320,7 +1303,7 @@ StringRef MachOObjectFile::getFileFormatName() const { } unsigned MachOObjectFile::getArch() const { - switch (MachOObj->getHeader().CPUType) { + switch (getCPUType(this)) { case llvm::MachO::CPUTypeI386: return Triple::x86; case llvm::MachO::CPUTypeX86_64: @@ -1336,5 +1319,260 @@ unsigned MachOObjectFile::getArch() const { } } +StringRef MachOObjectFile::getLoadName() const { + // TODO: Implement + report_fatal_error("get_load_name() unimplemented in MachOObjectFile"); +} + +relocation_iterator MachOObjectFile::getSectionRelBegin(unsigned Index) const { + DataRefImpl DRI; + DRI.d.a = Index; + return getSectionRelBegin(DRI); +} + +relocation_iterator MachOObjectFile::getSectionRelEnd(unsigned Index) const { + DataRefImpl DRI; + DRI.d.a = Index; + return getSectionRelEnd(DRI); +} + +StringRef +MachOObjectFile::getSectionFinalSegmentName(DataRefImpl Sec) const { + ArrayRef<char> Raw = getSectionRawFinalSegmentName(Sec); + return parseSegmentOrSectionName(Raw.data()); +} + +ArrayRef<char> +MachOObjectFile::getSectionRawName(DataRefImpl Sec) const { + const SectionBase *Base = + reinterpret_cast<const SectionBase*>(Sections[Sec.d.a]); + return ArrayRef<char>(Base->Name); +} + +ArrayRef<char> +MachOObjectFile::getSectionRawFinalSegmentName(DataRefImpl Sec) const { + const SectionBase *Base = + reinterpret_cast<const SectionBase*>(Sections[Sec.d.a]); + return ArrayRef<char>(Base->SegmentName); +} + +bool +MachOObjectFile::isRelocationScattered(const macho::RelocationEntry &RE) + const { + if (getCPUType(this) == llvm::MachO::CPUTypeX86_64) + return false; + return getPlainRelocationAddress(RE) & macho::RF_Scattered; +} + +unsigned MachOObjectFile::getPlainRelocationSymbolNum(const macho::RelocationEntry &RE) const { + if (isLittleEndian()) + return RE.Word1 & 0xffffff; + return RE.Word1 >> 8; +} + +bool MachOObjectFile::getPlainRelocationExternal(const macho::RelocationEntry &RE) const { + if (isLittleEndian()) + return (RE.Word1 >> 27) & 1; + return (RE.Word1 >> 4) & 1; +} + +bool +MachOObjectFile::getScatteredRelocationScattered(const macho::RelocationEntry &RE) const { + return RE.Word0 >> 31; +} + +uint32_t +MachOObjectFile::getScatteredRelocationValue(const macho::RelocationEntry &RE) const { + return RE.Word1; +} + +unsigned +MachOObjectFile::getAnyRelocationAddress(const macho::RelocationEntry &RE) const { + if (isRelocationScattered(RE)) + return getScatteredRelocationAddress(RE); + return getPlainRelocationAddress(RE); +} + +unsigned +MachOObjectFile::getAnyRelocationPCRel(const macho::RelocationEntry &RE) const { + if (isRelocationScattered(RE)) + return getScatteredRelocationPCRel(this, RE); + return getPlainRelocationPCRel(this, RE); +} + +unsigned +MachOObjectFile::getAnyRelocationLength(const macho::RelocationEntry &RE) const { + if (isRelocationScattered(RE)) + return getScatteredRelocationLength(RE); + return getPlainRelocationLength(this, RE); +} + +unsigned +MachOObjectFile::getAnyRelocationType(const macho::RelocationEntry &RE) const { + if (isRelocationScattered(RE)) + return getScatteredRelocationType(RE); + return getPlainRelocationType(this, RE); +} + +SectionRef +MachOObjectFile::getRelocationSection(const macho::RelocationEntry &RE) const { + if (isRelocationScattered(RE) || getPlainRelocationExternal(RE)) + return *end_sections(); + unsigned SecNum = getPlainRelocationSymbolNum(RE) - 1; + DataRefImpl DRI; + DRI.d.a = SecNum; + return SectionRef(DRI, this); +} + +MachOObjectFile::LoadCommandInfo +MachOObjectFile::getFirstLoadCommandInfo() const { + MachOObjectFile::LoadCommandInfo Load; + + unsigned HeaderSize = is64Bit() ? macho::Header64Size : macho::Header32Size; + Load.Ptr = getPtr(this, HeaderSize); + Load.C = getStruct<macho::LoadCommand>(this, Load.Ptr); + return Load; +} + +MachOObjectFile::LoadCommandInfo +MachOObjectFile::getNextLoadCommandInfo(const LoadCommandInfo &L) const { + MachOObjectFile::LoadCommandInfo Next; + Next.Ptr = L.Ptr + L.C.Size; + Next.C = getStruct<macho::LoadCommand>(this, Next.Ptr); + return Next; +} + +macho::Section MachOObjectFile::getSection(DataRefImpl DRI) const { + return getStruct<macho::Section>(this, Sections[DRI.d.a]); +} + +macho::Section64 MachOObjectFile::getSection64(DataRefImpl DRI) const { + return getStruct<macho::Section64>(this, Sections[DRI.d.a]); +} + +macho::Section MachOObjectFile::getSection(const LoadCommandInfo &L, + unsigned Index) const { + const char *Sec = getSectionPtr(this, L, Index); + return getStruct<macho::Section>(this, Sec); +} + +macho::Section64 MachOObjectFile::getSection64(const LoadCommandInfo &L, + unsigned Index) const { + const char *Sec = getSectionPtr(this, L, Index); + return getStruct<macho::Section64>(this, Sec); +} + +macho::SymbolTableEntry +MachOObjectFile::getSymbolTableEntry(DataRefImpl DRI) const { + const char *P = reinterpret_cast<const char *>(DRI.p); + return getStruct<macho::SymbolTableEntry>(this, P); +} + +macho::Symbol64TableEntry +MachOObjectFile::getSymbol64TableEntry(DataRefImpl DRI) const { + const char *P = reinterpret_cast<const char *>(DRI.p); + return getStruct<macho::Symbol64TableEntry>(this, P); +} + +macho::LinkeditDataLoadCommand +MachOObjectFile::getLinkeditDataLoadCommand(const MachOObjectFile::LoadCommandInfo &L) const { + return getStruct<macho::LinkeditDataLoadCommand>(this, L.Ptr); +} + +macho::SegmentLoadCommand +MachOObjectFile::getSegmentLoadCommand(const LoadCommandInfo &L) const { + return getStruct<macho::SegmentLoadCommand>(this, L.Ptr); +} + +macho::Segment64LoadCommand +MachOObjectFile::getSegment64LoadCommand(const LoadCommandInfo &L) const { + return getStruct<macho::Segment64LoadCommand>(this, L.Ptr); +} + +macho::LinkerOptionsLoadCommand +MachOObjectFile::getLinkerOptionsLoadCommand(const LoadCommandInfo &L) const { + return getStruct<macho::LinkerOptionsLoadCommand>(this, L.Ptr); +} + +macho::RelocationEntry +MachOObjectFile::getRelocation(DataRefImpl Rel) const { + const char *P = reinterpret_cast<const char *>(Rel.p); + return getStruct<macho::RelocationEntry>(this, P); +} + +macho::Header MachOObjectFile::getHeader() const { + return getStruct<macho::Header>(this, getPtr(this, 0)); +} + +macho::Header64Ext MachOObjectFile::getHeader64Ext() const { + return + getStruct<macho::Header64Ext>(this, getPtr(this, sizeof(macho::Header))); +} + +macho::IndirectSymbolTableEntry MachOObjectFile::getIndirectSymbolTableEntry( + const macho::DysymtabLoadCommand &DLC, + unsigned Index) const { + uint64_t Offset = DLC.IndirectSymbolTableOffset + + Index * sizeof(macho::IndirectSymbolTableEntry); + return getStruct<macho::IndirectSymbolTableEntry>(this, getPtr(this, Offset)); +} + +macho::DataInCodeTableEntry +MachOObjectFile::getDataInCodeTableEntry(uint32_t DataOffset, + unsigned Index) const { + uint64_t Offset = DataOffset + Index * sizeof(macho::DataInCodeTableEntry); + return getStruct<macho::DataInCodeTableEntry>(this, getPtr(this, Offset)); +} + +macho::SymtabLoadCommand MachOObjectFile::getSymtabLoadCommand() const { + return getStruct<macho::SymtabLoadCommand>(this, SymtabLoadCmd); +} + +macho::DysymtabLoadCommand MachOObjectFile::getDysymtabLoadCommand() const { + return getStruct<macho::DysymtabLoadCommand>(this, DysymtabLoadCmd); +} + +StringRef MachOObjectFile::getStringTableData() const { + macho::SymtabLoadCommand S = getSymtabLoadCommand(); + return getData().substr(S.StringTableOffset, S.StringTableSize); +} + +bool MachOObjectFile::is64Bit() const { + return getType() == getMachOType(false, true) || + getType() == getMachOType(true, true); +} + +void MachOObjectFile::ReadULEB128s(uint64_t Index, + SmallVectorImpl<uint64_t> &Out) const { + DataExtractor extractor(ObjectFile::getData(), true, 0); + + uint32_t offset = Index; + uint64_t data = 0; + while (uint64_t delta = extractor.getULEB128(&offset)) { + data += delta; + Out.push_back(data); + } +} + +ObjectFile *ObjectFile::createMachOObjectFile(MemoryBuffer *Buffer) { + StringRef Magic = Buffer->getBuffer().slice(0, 4); + error_code ec; + ObjectFile *Ret; + if (Magic == "\xFE\xED\xFA\xCE") + Ret = new MachOObjectFile(Buffer, false, false, ec); + else if (Magic == "\xCE\xFA\xED\xFE") + Ret = new MachOObjectFile(Buffer, true, false, ec); + else if (Magic == "\xFE\xED\xFA\xCF") + Ret = new MachOObjectFile(Buffer, false, true, ec); + else if (Magic == "\xCF\xFA\xED\xFE") + Ret = new MachOObjectFile(Buffer, true, true, ec); + else + return NULL; + + if (ec) + return NULL; + return Ret; +} + } // end namespace object } // end namespace llvm diff --git a/lib/Object/Object.cpp b/lib/Object/Object.cpp index f061ea7..3e2c78e 100644 --- a/lib/Object/Object.cpp +++ b/lib/Object/Object.cpp @@ -12,12 +12,51 @@ // //===----------------------------------------------------------------------===// +#include "llvm/ADT/SmallVector.h" #include "llvm/Object/ObjectFile.h" #include "llvm-c/Object.h" using namespace llvm; using namespace object; +inline ObjectFile *unwrap(LLVMObjectFileRef OF) { + return reinterpret_cast<ObjectFile*>(OF); +} + +inline LLVMObjectFileRef wrap(const ObjectFile *OF) { + return reinterpret_cast<LLVMObjectFileRef>(const_cast<ObjectFile*>(OF)); +} + +inline section_iterator *unwrap(LLVMSectionIteratorRef SI) { + return reinterpret_cast<section_iterator*>(SI); +} + +inline LLVMSectionIteratorRef +wrap(const section_iterator *SI) { + return reinterpret_cast<LLVMSectionIteratorRef> + (const_cast<section_iterator*>(SI)); +} + +inline symbol_iterator *unwrap(LLVMSymbolIteratorRef SI) { + return reinterpret_cast<symbol_iterator*>(SI); +} + +inline LLVMSymbolIteratorRef +wrap(const symbol_iterator *SI) { + return reinterpret_cast<LLVMSymbolIteratorRef> + (const_cast<symbol_iterator*>(SI)); +} + +inline relocation_iterator *unwrap(LLVMRelocationIteratorRef SI) { + return reinterpret_cast<relocation_iterator*>(SI); +} + +inline LLVMRelocationIteratorRef +wrap(const relocation_iterator *SI) { + return reinterpret_cast<LLVMRelocationIteratorRef> + (const_cast<relocation_iterator*>(SI)); +} + // ObjectFile creation LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf) { return wrap(ObjectFile::createObjectFile(unwrap(MemBuf))); diff --git a/lib/Object/ObjectFile.cpp b/lib/Object/ObjectFile.cpp index 860c87b..77fd995 100644 --- a/lib/Object/ObjectFile.cpp +++ b/lib/Object/ObjectFile.cpp @@ -23,10 +23,16 @@ using namespace object; void ObjectFile::anchor() { } -ObjectFile::ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec) +ObjectFile::ObjectFile(unsigned int Type, MemoryBuffer *source) : Binary(Type, source) { } +error_code ObjectFile::getSymbolAlignment(DataRefImpl DRI, + uint32_t &Result) const { + Result = 0; + return object_error::success; +} + ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) { if (!Object || Object->getBufferSize() < 64) return 0; diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index 5b68fbb..6182e34 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3311,10 +3311,8 @@ namespace { significand = significand.udiv(divisor); - // Truncate the significand down to its active bit count, but - // don't try to drop below 32. - unsigned newPrecision = std::max(32U, significand.getActiveBits()); - significand = significand.trunc(newPrecision); + // Truncate the significand down to its active bit count. + significand = significand.trunc(significand.getActiveBits()); } diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 07cb057..e853475 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -559,12 +559,12 @@ bool APInt::slt(const APInt& RHS) const { if (lhsNeg) { // Sign bit is set so perform two's complement to make it positive lhs.flipAllBits(); - lhs++; + ++lhs; } if (rhsNeg) { // Sign bit is set so perform two's complement to make it positive rhs.flipAllBits(); - rhs++; + ++rhs; } // Now we have unsigned values to compare so do the comparison if necessary @@ -2116,7 +2116,7 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) { } // If its negative, put it in two's complement form if (isNeg) { - (*this)--; + --(*this); this->flipAllBits(); } } @@ -2197,7 +2197,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix, // Flip the bits and add one to turn it into the equivalent positive // value and put a '-' in the result. Tmp.flipAllBits(); - Tmp++; + ++Tmp; Str.push_back('-'); } diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index 5ba69fc..01565c5 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -7,6 +7,7 @@ add_llvm_library(LLVMSupport BranchProbability.cpp circular_raw_ostream.cpp CommandLine.cpp + Compression.cpp ConstantRange.cpp ConvertUTF.c ConvertUTFWrapper.cpp @@ -83,6 +84,7 @@ add_llvm_library(LLVMSupport Threading.cpp TimeValue.cpp Valgrind.cpp + Watchdog.cpp Unix/Host.inc Unix/Memory.inc Unix/Mutex.inc @@ -95,6 +97,7 @@ add_llvm_library(LLVMSupport Unix/system_error.inc Unix/ThreadLocal.inc Unix/TimeValue.inc + Unix/Watchdog.inc Windows/DynamicLibrary.inc Windows/Host.inc Windows/Memory.inc @@ -108,4 +111,5 @@ add_llvm_library(LLVMSupport Windows/system_error.inc Windows/ThreadLocal.inc Windows/TimeValue.inc + Windows/Watchdog.inc ) diff --git a/lib/Support/Compression.cpp b/lib/Support/Compression.cpp new file mode 100644 index 0000000..fd8a874 --- /dev/null +++ b/lib/Support/Compression.cpp @@ -0,0 +1,97 @@ +//===--- Compression.cpp - Compression implementation ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements compression functions. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/Compression.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Config/config.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MemoryBuffer.h" +#if LLVM_ENABLE_ZLIB == 1 && HAVE_ZLIB_H +#include <zlib.h> +#endif + +using namespace llvm; + +#if LLVM_ENABLE_ZLIB == 1 && HAVE_LIBZ +static int encodeZlibCompressionLevel(zlib::CompressionLevel Level) { + switch (Level) { + case zlib::NoCompression: return 0; + case zlib::BestSpeedCompression: return 1; + case zlib::DefaultCompression: return Z_DEFAULT_COMPRESSION; + case zlib::BestSizeCompression: return 9; + } + llvm_unreachable("Invalid zlib::CompressionLevel!"); +} + +static zlib::Status encodeZlibReturnValue(int ReturnValue) { + switch (ReturnValue) { + case Z_OK: return zlib::StatusOK; + case Z_MEM_ERROR: return zlib::StatusOutOfMemory; + case Z_BUF_ERROR: return zlib::StatusBufferTooShort; + case Z_STREAM_ERROR: return zlib::StatusInvalidArg; + case Z_DATA_ERROR: return zlib::StatusInvalidData; + default: llvm_unreachable("unknown zlib return status!"); + } +} + +bool zlib::isAvailable() { return true; } +zlib::Status zlib::compress(StringRef InputBuffer, + OwningPtr<MemoryBuffer> &CompressedBuffer, + CompressionLevel Level) { + unsigned long CompressedSize = ::compressBound(InputBuffer.size()); + OwningArrayPtr<char> TmpBuffer(new char[CompressedSize]); + int CLevel = encodeZlibCompressionLevel(Level); + Status Res = encodeZlibReturnValue(::compress2( + (Bytef *)TmpBuffer.get(), &CompressedSize, + (const Bytef *)InputBuffer.data(), InputBuffer.size(), CLevel)); + if (Res == StatusOK) { + CompressedBuffer.reset(MemoryBuffer::getMemBufferCopy( + StringRef(TmpBuffer.get(), CompressedSize))); + // Tell MSan that memory initialized by zlib is valid. + __msan_unpoison(CompressedBuffer->getBufferStart(), CompressedSize); + } + return Res; +} + +zlib::Status zlib::uncompress(StringRef InputBuffer, + OwningPtr<MemoryBuffer> &UncompressedBuffer, + size_t UncompressedSize) { + OwningArrayPtr<char> TmpBuffer(new char[UncompressedSize]); + Status Res = encodeZlibReturnValue( + ::uncompress((Bytef *)TmpBuffer.get(), (uLongf *)&UncompressedSize, + (const Bytef *)InputBuffer.data(), InputBuffer.size())); + if (Res == StatusOK) { + UncompressedBuffer.reset(MemoryBuffer::getMemBufferCopy( + StringRef(TmpBuffer.get(), UncompressedSize))); + // Tell MSan that memory initialized by zlib is valid. + __msan_unpoison(UncompressedBuffer->getBufferStart(), UncompressedSize); + } + return Res; +} + +#else +bool zlib::isAvailable() { return false; } +zlib::Status zlib::compress(StringRef InputBuffer, + OwningPtr<MemoryBuffer> &CompressedBuffer, + CompressionLevel Level) { + return zlib::StatusUnsupported; +} +zlib::Status zlib::uncompress(StringRef InputBuffer, + OwningPtr<MemoryBuffer> &UncompressedBuffer, + size_t UncompressedSize) { + return zlib::StatusUnsupported; +} +#endif + diff --git a/lib/Support/DataExtractor.cpp b/lib/Support/DataExtractor.cpp index 3d5cce0..a564d21 100644 --- a/lib/Support/DataExtractor.cpp +++ b/lib/Support/DataExtractor.cpp @@ -20,7 +20,7 @@ static T getU(uint32_t *offset_ptr, const DataExtractor *de, uint32_t offset = *offset_ptr; if (de->isValidOffsetForDataOfSize(offset, sizeof(val))) { std::memcpy(&val, &Data[offset], sizeof(val)); - if (sys::isLittleEndianHost() != isLittleEndian) + if (sys::IsLittleEndianHost != isLittleEndian) val = sys::SwapByteOrder(val); // Advance the offset diff --git a/lib/Support/ErrorHandling.cpp b/lib/Support/ErrorHandling.cpp index d4382e5..f4b591e 100644 --- a/lib/Support/ErrorHandling.cpp +++ b/lib/Support/ErrorHandling.cpp @@ -49,21 +49,21 @@ void llvm::remove_fatal_error_handler() { ErrorHandler = 0; } -void llvm::report_fatal_error(const char *Reason) { - report_fatal_error(Twine(Reason)); +void llvm::report_fatal_error(const char *Reason, bool GenCrashDiag) { + report_fatal_error(Twine(Reason), GenCrashDiag); } -void llvm::report_fatal_error(const std::string &Reason) { - report_fatal_error(Twine(Reason)); +void llvm::report_fatal_error(const std::string &Reason, bool GenCrashDiag) { + report_fatal_error(Twine(Reason), GenCrashDiag); } -void llvm::report_fatal_error(StringRef Reason) { - report_fatal_error(Twine(Reason)); +void llvm::report_fatal_error(StringRef Reason, bool GenCrashDiag) { + report_fatal_error(Twine(Reason), GenCrashDiag); } -void llvm::report_fatal_error(const Twine &Reason) { +void llvm::report_fatal_error(const Twine &Reason, bool GenCrashDiag) { if (ErrorHandler) { - ErrorHandler(ErrorHandlerUserData, Reason.str()); + ErrorHandler(ErrorHandlerUserData, Reason.str(), GenCrashDiag); } else { // Blast the result out to stderr. We don't try hard to make sure this // succeeds (e.g. handling EINTR) and we can't use errs() here because diff --git a/lib/Support/FoldingSet.cpp b/lib/Support/FoldingSet.cpp index 36e33b5..145f12d 100644 --- a/lib/Support/FoldingSet.cpp +++ b/lib/Support/FoldingSet.cpp @@ -101,7 +101,7 @@ void FoldingSetNodeID::AddString(StringRef String) { // Otherwise do it the hard way. // To be compatible with above bulk transfer, we need to take endianness // into account. - if (sys::isBigEndianHost()) { + if (sys::IsBigEndianHost) { for (Pos += 4; Pos <= Size; Pos += 4) { unsigned V = ((unsigned char)String[Pos - 4] << 24) | ((unsigned char)String[Pos - 3] << 16) | @@ -110,7 +110,7 @@ void FoldingSetNodeID::AddString(StringRef String) { Bits.push_back(V); } } else { - assert(sys::isLittleEndianHost() && "Unexpected host endianness"); + assert(sys::IsLittleEndianHost && "Unexpected host endianness"); for (Pos += 4; Pos <= Size; Pos += 4) { unsigned V = ((unsigned char)String[Pos - 1] << 24) | ((unsigned char)String[Pos - 2] << 16) | diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp index b9bbcb9..27c99c8 100644 --- a/lib/Support/Host.cpp +++ b/lib/Support/Host.cpp @@ -112,6 +112,21 @@ static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX, #endif } +static bool OSHasAVXSupport() { +#if defined(__GNUC__) + // Check xgetbv; this uses a .byte sequence instead of the instruction + // directly because older assemblers do not include support for xgetbv and + // there is no easy way to conditionally compile based on the assembler used. + int rEAX, rEDX; + __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0)); +#elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK) + unsigned long long rEAX = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); +#else + int rEAX = 0; // Ensures we return false +#endif + return (rEAX & 6) == 6; +} + static void DetectX86FamilyModel(unsigned EAX, unsigned &Family, unsigned &Model) { Family = (EAX >> 8) & 0xf; // Bits 8 - 11 @@ -134,6 +149,11 @@ std::string sys::getHostCPUName() { DetectX86FamilyModel(EAX, Family, Model); bool HasSSE3 = (ECX & 0x1); + // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV + // indicates that the AVX registers will be saved and restored on context + // switch, then we have full AVX support. + const unsigned AVXBits = (1 << 27) | (1 << 28); + bool HasAVX = ((ECX & AVXBits) == AVXBits) && OSHasAVXSupport(); GetX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); bool Em64T = (EDX >> 29) & 0x1; @@ -243,11 +263,15 @@ std::string sys::getHostCPUName() { case 42: // Intel Core i7 processor. All processors are manufactured // using the 32 nm process. case 45: - return "corei7-avx"; + // Not all Sandy Bridge processors support AVX (such as the Pentium + // versions instead of the i7 versions). + return HasAVX ? "corei7-avx" : "corei7"; // Ivy Bridge: case 58: - return "core-avx-i"; + // Not all Ivy Bridge processors support AVX (such as the Pentium + // versions instead of the i7 versions). + return HasAVX ? "core-avx-i" : "corei7"; case 28: // Most 45 nm Intel Atom processors case 38: // 45 nm Atom Lincroft diff --git a/lib/Support/LockFileManager.cpp b/lib/Support/LockFileManager.cpp index 92d8b83..2917e27 100644 --- a/lib/Support/LockFileManager.cpp +++ b/lib/Support/LockFileManager.cpp @@ -174,8 +174,8 @@ void LockFileManager::waitForUnlock() { Interval.tv_sec = 0; Interval.tv_nsec = 1000000; #endif - // Don't wait more than an hour for the file to appear. - const unsigned MaxSeconds = 3600; + // Don't wait more than five minutes for the file to appear. + unsigned MaxSeconds = 300; bool LockFileGone = false; do { // Sleep for the designated interval, to allow the owning process time to @@ -187,21 +187,48 @@ void LockFileManager::waitForUnlock() { #else nanosleep(&Interval, NULL); #endif - // If the lock file no longer exists, wait for the actual file. bool Exists = false; + bool LockFileJustDisappeared = false; + + // If the lock file is still expected to be there, check whether it still + // is. if (!LockFileGone) { if (!sys::fs::exists(LockFileName.str(), Exists) && !Exists) { LockFileGone = true; + LockFileJustDisappeared = true; Exists = false; } } + + // If the lock file is no longer there, check if the original file is + // available now. if (LockFileGone) { - if (!sys::fs::exists(FileName.str(), Exists) && Exists) + if (!sys::fs::exists(FileName.str(), Exists) && Exists) { return; + } + + // The lock file is gone, so now we're waiting for the original file to + // show up. If this just happened, reset our waiting intervals and keep + // waiting. + if (LockFileJustDisappeared) { + MaxSeconds = 5; + +#if LLVM_ON_WIN32 + Interval = 1; +#else + Interval.tv_sec = 0; + Interval.tv_nsec = 1000000; +#endif + continue; + } } - if (!processStillExecuting((*Owner).first, (*Owner).second)) + // If we're looking for the lock file to disappear, but the process + // owning the lock died without cleaning up, just bail out. + if (!LockFileGone && + !processStillExecuting((*Owner).first, (*Owner).second)) { return; + } // Exponentially increase the time we wait for the lock to be removed. #if LLVM_ON_WIN32 diff --git a/lib/Support/MemoryBuffer.cpp b/lib/Support/MemoryBuffer.cpp index 8042237..7c5ab96 100644 --- a/lib/Support/MemoryBuffer.cpp +++ b/lib/Support/MemoryBuffer.cpp @@ -72,10 +72,12 @@ static void CopyStringRef(char *Memory, StringRef Data) { Memory[Data.size()] = 0; // Null terminate string. } +namespace { struct NamedBufferAlloc { StringRef Name; NamedBufferAlloc(StringRef Name) : Name(Name) {} }; +} void *operator new(size_t N, const NamedBufferAlloc &Alloc) { char *Mem = static_cast<char *>(operator new(N + Alloc.Name.size() + 1)); diff --git a/lib/Support/PathV2.cpp b/lib/Support/PathV2.cpp index 41add96..ac53a9e 100644 --- a/lib/Support/PathV2.cpp +++ b/lib/Support/PathV2.cpp @@ -18,6 +18,9 @@ #include <cctype> #include <cstdio> #include <cstring> +#ifdef __APPLE__ +#include <unistd.h> +#endif namespace { using llvm::StringRef; @@ -493,6 +496,27 @@ bool is_separator(char value) { void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result) { result.clear(); +#ifdef __APPLE__ + // On Darwin, use DARWIN_USER_TEMP_DIR or DARWIN_USER_CACHE_DIR. + int ConfName = erasedOnReboot? _CS_DARWIN_USER_TEMP_DIR + : _CS_DARWIN_USER_CACHE_DIR; + size_t ConfLen = confstr(ConfName, 0, 0); + if (ConfLen > 0) { + do { + result.resize(ConfLen); + ConfLen = confstr(ConfName, result.data(), result.size()); + } while (ConfLen > 0 && ConfLen != result.size()); + + if (ConfLen > 0) { + assert(result.back() == 0); + result.pop_back(); + return; + } + + result.clear(); + } +#endif + // Check whether the temporary directory is specified by an environment // variable. const char *EnvironmentVariable; @@ -765,8 +789,11 @@ file_magic identify_magic(StringRef magic) { case '\177': if (magic[1] == 'E' && magic[2] == 'L' && magic[3] == 'F') { - if (magic.size() >= 18 && magic[17] == 0) - switch (magic[16]) { + bool Data2MSB = magic[5] == 2; + unsigned high = Data2MSB ? 16 : 17; + unsigned low = Data2MSB ? 17 : 16; + if (magic.size() >= 18 && magic[high] == 0) + switch (magic[low]) { default: break; case 1: return file_magic::elf_relocatable; case 2: return file_magic::elf_executable; diff --git a/lib/Support/PrettyStackTrace.cpp b/lib/Support/PrettyStackTrace.cpp index 21d56ad..23ee5ab 100644 --- a/lib/Support/PrettyStackTrace.cpp +++ b/lib/Support/PrettyStackTrace.cpp @@ -17,6 +17,7 @@ #include "llvm/Config/config.h" // Get autoconf configuration settings #include "llvm/Support/Signals.h" #include "llvm/Support/ThreadLocal.h" +#include "llvm/Support/Watchdog.h" #include "llvm/Support/raw_ostream.h" #ifdef HAVE_CRASHREPORTERCLIENT_H @@ -37,7 +38,10 @@ static unsigned PrintStack(const PrettyStackTraceEntry *Entry, raw_ostream &OS){ if (Entry->getNextEntry()) NextID = PrintStack(Entry->getNextEntry(), OS); OS << NextID << ".\t"; - Entry->print(OS); + { + sys::Watchdog W(5); + Entry->print(OS); + } return NextID+1; } diff --git a/lib/Support/Program.cpp b/lib/Support/Program.cpp index 75bc282..201d5c0 100644 --- a/lib/Support/Program.cpp +++ b/lib/Support/Program.cpp @@ -29,12 +29,15 @@ Program::ExecuteAndWait(const Path& path, const Path** redirects, unsigned secondsToWait, unsigned memoryLimit, - std::string* ErrMsg) { + std::string* ErrMsg, + bool *ExecutionFailed) { Program prg; - if (prg.Execute(path, args, envp, redirects, memoryLimit, ErrMsg)) + if (prg.Execute(path, args, envp, redirects, memoryLimit, ErrMsg)) { + if (ExecutionFailed) *ExecutionFailed = false; return prg.Wait(path, secondsToWait, ErrMsg); - else - return -1; + } + if (ExecutionFailed) *ExecutionFailed = true; + return -1; } void diff --git a/lib/Support/SmallPtrSet.cpp b/lib/Support/SmallPtrSet.cpp index 3b53e9f..f0fed77 100644 --- a/lib/Support/SmallPtrSet.cpp +++ b/lib/Support/SmallPtrSet.cpp @@ -29,13 +29,9 @@ void SmallPtrSetImpl::shrink_and_clear() { NumElements = NumTombstones = 0; // Install the new array. Clear all the buckets to empty. - CurArray = (const void**)malloc(sizeof(void*) * (CurArraySize+1)); + CurArray = (const void**)malloc(sizeof(void*) * CurArraySize); assert(CurArray && "Failed to allocate memory?"); memset(CurArray, -1, CurArraySize*sizeof(void*)); - - // The end pointer, always valid, is set to a valid element to help the - // iterator. - CurArray[CurArraySize] = 0; } bool SmallPtrSetImpl::insert_imp(const void * Ptr) { @@ -139,15 +135,11 @@ void SmallPtrSetImpl::Grow(unsigned NewSize) { bool WasSmall = isSmall(); // Install the new array. Clear all the buckets to empty. - CurArray = (const void**)malloc(sizeof(void*) * (NewSize+1)); + CurArray = (const void**)malloc(sizeof(void*) * NewSize); assert(CurArray && "Failed to allocate memory?"); CurArraySize = NewSize; memset(CurArray, -1, NewSize*sizeof(void*)); - // The end pointer, always valid, is set to a valid element to help the - // iterator. - CurArray[NewSize] = 0; - // Copy over all the elements. if (WasSmall) { // Small sets store their elements in order. @@ -180,7 +172,7 @@ SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage, CurArray = SmallArray; // Otherwise, allocate new heap space (unless we were the same size) } else { - CurArray = (const void**)malloc(sizeof(void*) * (that.CurArraySize+1)); + CurArray = (const void**)malloc(sizeof(void*) * that.CurArraySize); assert(CurArray && "Failed to allocate memory?"); } @@ -188,7 +180,7 @@ SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage, CurArraySize = that.CurArraySize; // Copy over the contents from the other set - memcpy(CurArray, that.CurArray, sizeof(void*)*(CurArraySize+1)); + memcpy(CurArray, that.CurArray, sizeof(void*)*CurArraySize); NumElements = that.NumElements; NumTombstones = that.NumTombstones; @@ -200,7 +192,7 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) { if (isSmall() && RHS.isSmall()) assert(CurArraySize == RHS.CurArraySize && "Cannot assign sets with different small sizes"); - + // If we're becoming small, prepare to insert into our stack space if (RHS.isSmall()) { if (!isSmall()) @@ -209,9 +201,9 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) { // Otherwise, allocate new heap space (unless we were the same size) } else if (CurArraySize != RHS.CurArraySize) { if (isSmall()) - CurArray = (const void**)malloc(sizeof(void*) * (RHS.CurArraySize+1)); + CurArray = (const void**)malloc(sizeof(void*) * RHS.CurArraySize); else - CurArray = (const void**)realloc(CurArray, sizeof(void*)*(RHS.CurArraySize+1)); + CurArray = (const void**)realloc(CurArray, sizeof(void*)*RHS.CurArraySize); assert(CurArray && "Failed to allocate memory?"); } @@ -219,7 +211,7 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) { CurArraySize = RHS.CurArraySize; // Copy over the contents from the other set - memcpy(CurArray, RHS.CurArray, sizeof(void*)*(CurArraySize+1)); + memcpy(CurArray, RHS.CurArray, sizeof(void*)*CurArraySize); NumElements = RHS.NumElements; NumTombstones = RHS.NumTombstones; diff --git a/lib/Support/Unix/PathV2.inc b/lib/Support/Unix/PathV2.inc index 11c5b05..7ef7dfd 100644 --- a/lib/Support/Unix/PathV2.inc +++ b/lib/Support/Unix/PathV2.inc @@ -432,9 +432,7 @@ rety_open_create: if (SavedErrno == errc::file_exists) goto retry_random_path; // If path prefix doesn't exist, try to create it. - if (SavedErrno == errc::no_such_file_or_directory && - !exists(path::parent_path(RandomPath)) && - !TriedToCreateParent) { + if (SavedErrno == errc::no_such_file_or_directory && !TriedToCreateParent) { TriedToCreateParent = true; StringRef p(RandomPath); SmallString<64> dir_to_create; diff --git a/lib/Support/Unix/Program.inc b/lib/Support/Unix/Program.inc index 117151c..aa03d48 100644 --- a/lib/Support/Unix/Program.inc +++ b/lib/Support/Unix/Program.inc @@ -32,6 +32,9 @@ #if HAVE_FCNTL_H #include <fcntl.h> #endif +#if HAVE_UNISTD_H +#include <unistd.h> +#endif #ifdef HAVE_POSIX_SPAWN #include <spawn.h> #if !defined(__APPLE__) @@ -409,4 +412,25 @@ error_code Program::ChangeStderrToBinary(){ return make_error_code(errc::success); } +bool llvm::sys::argumentsFitWithinSystemLimits(ArrayRef<const char*> Args) { + static long ArgMax = sysconf(_SC_ARG_MAX); + + // System says no practical limit. + if (ArgMax == -1) + return true; + + // Conservatively account for space required by environment variables. + ArgMax /= 2; + + size_t ArgLength = 0; + for (ArrayRef<const char*>::iterator I = Args.begin(), E = Args.end(); + I != E; ++I) { + ArgLength += strlen(*I) + 1; + if (ArgLength > size_t(ArgMax)) { + return false; + } + } + return true; +} + } diff --git a/lib/Support/Unix/Signals.inc b/lib/Support/Unix/Signals.inc index b8be623..7580fb7 100644 --- a/lib/Support/Unix/Signals.inc +++ b/lib/Support/Unix/Signals.inc @@ -27,10 +27,12 @@ #if HAVE_SYS_STAT_H #include <sys/stat.h> #endif -#if HAVE_DLFCN_H && HAVE_CXXABI_H && __GNUG__ -#include <dlfcn.h> +#if HAVE_CXXABI_H #include <cxxabi.h> #endif +#if HAVE_DLFCN_H +#include <dlfcn.h> +#endif #if HAVE_MACH_MACH_H #include <mach/mach.h> #endif @@ -290,9 +292,13 @@ void llvm::sys::PrintStackTrace(FILE *FD) { (int)(sizeof(void*) * 2) + 2, (unsigned long)StackTrace[i]); if (dlinfo.dli_sname != NULL) { - int res; fputc(' ', FD); +# if HAVE_CXXABI_H + int res; char* d = abi::__cxa_demangle(dlinfo.dli_sname, NULL, NULL, &res); +# else + char* d = NULL; +# endif if (d == NULL) fputs(dlinfo.dli_sname, FD); else fputs(d, FD); free(d); diff --git a/lib/Support/Unix/Watchdog.inc b/lib/Support/Unix/Watchdog.inc new file mode 100644 index 0000000..5d89c0e --- /dev/null +++ b/lib/Support/Unix/Watchdog.inc @@ -0,0 +1,32 @@ +//===--- Unix/Watchdog.inc - Unix Watchdog Implementation -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides the generic Unix implementation of the Watchdog class. +// +//===----------------------------------------------------------------------===// + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +namespace llvm { + namespace sys { + Watchdog::Watchdog(unsigned int seconds) { +#ifdef HAVE_UNISTD_H + alarm(seconds); +#endif + } + + Watchdog::~Watchdog() { +#ifdef HAVE_UNISTD_H + alarm(0); +#endif + } + } +} diff --git a/lib/Support/Watchdog.cpp b/lib/Support/Watchdog.cpp new file mode 100644 index 0000000..724aa00 --- /dev/null +++ b/lib/Support/Watchdog.cpp @@ -0,0 +1,23 @@ +//===---- Watchdog.cpp - Implement Watchdog ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Watchdog class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/Watchdog.h" +#include "llvm/Config/config.h" + +// Include the platform-specific parts of this class. +#ifdef LLVM_ON_UNIX +#include "Unix/Watchdog.inc" +#endif +#ifdef LLVM_ON_WIN32 +#include "Windows/Watchdog.inc" +#endif diff --git a/lib/Support/Windows/Program.inc b/lib/Support/Windows/Program.inc index 691d6d4..619ae5d 100644 --- a/lib/Support/Windows/Program.inc +++ b/lib/Support/Windows/Program.inc @@ -126,20 +126,58 @@ static bool ArgNeedsQuotes(const char *Str) { return Str[0] == '\0' || strpbrk(Str, "\t \"&\'()*<>\\`^|") != 0; } +/// CountPrecedingBackslashes - Returns the number of backslashes preceding Cur +/// in the C string Start. +static unsigned int CountPrecedingBackslashes(const char *Start, + const char *Cur) { + unsigned int Count = 0; + --Cur; + while (Cur >= Start && *Cur == '\\') { + ++Count; + --Cur; + } + return Count; +} + +/// EscapePrecedingEscapes - Append a backslash to Dst for every backslash +/// preceding Cur in the Start string. Assumes Dst has enough space. +static char *EscapePrecedingEscapes(char *Dst, const char *Start, + const char *Cur) { + unsigned PrecedingEscapes = CountPrecedingBackslashes(Start, Cur); + while (PrecedingEscapes > 0) { + *Dst++ = '\\'; + --PrecedingEscapes; + } + return Dst; +} /// ArgLenWithQuotes - Check whether argument needs to be quoted when calling /// CreateProcess and returns length of quoted arg with escaped quotes static unsigned int ArgLenWithQuotes(const char *Str) { - unsigned int len = ArgNeedsQuotes(Str) ? 2 : 0; + const char *Start = Str; + bool Quoted = ArgNeedsQuotes(Str); + unsigned int len = Quoted ? 2 : 0; while (*Str != '\0') { - if (*Str == '\"') - ++len; + if (*Str == '\"') { + // We need to add a backslash, but ensure that it isn't escaped. + unsigned PrecedingEscapes = CountPrecedingBackslashes(Start, Str); + len += PrecedingEscapes + 1; + } + // Note that we *don't* need to escape runs of backslashes that don't + // precede a double quote! See MSDN: + // http://msdn.microsoft.com/en-us/library/17w5ykft%28v=vs.85%29.aspx ++len; ++Str; } + if (Quoted) { + // Make sure the closing quote doesn't get escaped by a trailing backslash. + unsigned PrecedingEscapes = CountPrecedingBackslashes(Start, Str); + len += PrecedingEscapes + 1; + } + return len; } @@ -180,20 +218,27 @@ Program::Execute(const Path& path, for (unsigned i = 0; args[i]; i++) { const char *arg = args[i]; + const char *start = arg; bool needsQuoting = ArgNeedsQuotes(arg); if (needsQuoting) *p++ = '"'; while (*arg != '\0') { - if (*arg == '\"') + if (*arg == '\"') { + // Escape all preceding escapes (if any), and then escape the quote. + p = EscapePrecedingEscapes(p, start, arg); *p++ = '\\'; + } *p++ = *arg++; } - if (needsQuoting) + if (needsQuoting) { + // Make sure our quote doesn't get escaped by a trailing backslash. + p = EscapePrecedingEscapes(p, start, arg); *p++ = '"'; + } *p++ = ' '; } @@ -396,4 +441,20 @@ error_code Program::ChangeStderrToBinary(){ return make_error_code(errc::success); } +bool llvm::sys::argumentsFitWithinSystemLimits(ArrayRef<const char*> Args) { + // The documented max length of the command line passed to CreateProcess. + static const size_t MaxCommandStringLength = 32768; + size_t ArgLength = 0; + for (ArrayRef<const char*>::iterator I = Args.begin(), E = Args.end(); + I != E; ++I) { + // Account for the trailing space for every arg but the last one and the + // trailing NULL of the last argument. + ArgLength += ArgLenWithQuotes(*I) + 1; + if (ArgLength > MaxCommandStringLength) { + return false; + } + } + return true; +} + } diff --git a/lib/Support/Windows/Signals.inc b/lib/Support/Windows/Signals.inc index 3dd6660..b18b4d1 100644 --- a/lib/Support/Windows/Signals.inc +++ b/lib/Support/Windows/Signals.inc @@ -178,6 +178,19 @@ namespace llvm { //===----------------------------------------------------------------------===// #ifdef _MSC_VER +/// AvoidMessageBoxHook - Emulates hitting "retry" from an "abort, retry, +/// ignore" CRT debug report dialog. "retry" raises an exception which +/// ultimately triggers our stack dumper. +static int AvoidMessageBoxHook(int ReportType, char *Message, int *Return) { + // Set *Return to the retry code for the return value of _CrtDbgReport: + // http://msdn.microsoft.com/en-us/library/8hyw4sy7(v=vs.71).aspx + // This may also trigger just-in-time debugging via DebugBreak(). + if (Return) + *Return = 1; + // Don't call _CrtDbgReport. + return TRUE; +} + /// CRTReportHook - Function called on a CRT debugging event. static int CRTReportHook(int ReportType, char *Message, int *Return) { // Don't cause a DebugBreak() on return. @@ -238,6 +251,15 @@ static void RegisterHandler() { OldFilter = SetUnhandledExceptionFilter(LLVMUnhandledExceptionFilter); SetConsoleCtrlHandler(LLVMConsoleCtrlHandler, TRUE); +#ifdef _MSC_VER + const char *EnableMsgbox = getenv("LLVM_ENABLE_CRT_REPORT"); + if (!EnableMsgbox || strcmp("0", EnableMsgbox) == 0) { + // Setting a report hook overrides the default behavior of popping an "abort, + // retry, or ignore" dialog. + _CrtSetReportHook(AvoidMessageBoxHook); + } +#endif + // Environment variable to disable any kind of crash dialog. if (getenv("LLVM_DISABLE_CRASH_REPORT")) { #ifdef _MSC_VER diff --git a/lib/Support/Windows/Watchdog.inc b/lib/Support/Windows/Watchdog.inc new file mode 100644 index 0000000..fab2bdf --- /dev/null +++ b/lib/Support/Windows/Watchdog.inc @@ -0,0 +1,24 @@ +//===--- Windows/Watchdog.inc - Windows Watchdog Implementation -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides the generic Windows implementation of the Watchdog class. +// +//===----------------------------------------------------------------------===// + +// TODO: implement. +// Currently this is only used by PrettyStackTrace which is also unimplemented +// on Windows. Roughly, a Windows implementation would use CreateWaitableTimer +// and a second thread to run the TimerAPCProc. + +namespace llvm { + namespace sys { + Watchdog::Watchdog(unsigned int seconds) {} + Watchdog::~Watchdog() {} + } +} diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp index da26a37..a433088 100644 --- a/lib/Support/raw_ostream.cpp +++ b/lib/Support/raw_ostream.cpp @@ -517,7 +517,7 @@ raw_fd_ostream::~raw_fd_ostream() { // has_error() and clear the error flag with clear_error() before // destructing raw_ostream objects which may have errors. if (has_error()) - report_fatal_error("IO failure on output stream."); + report_fatal_error("IO failure on output stream.", /*GenCrashDiag=*/false); } diff --git a/lib/TableGen/Error.cpp b/lib/TableGen/Error.cpp index ec84a72..928b120 100644 --- a/lib/TableGen/Error.cpp +++ b/lib/TableGen/Error.cpp @@ -20,9 +20,15 @@ namespace llvm { SourceMgr SrcMgr; +unsigned ErrorsPrinted = 0; static void PrintMessage(ArrayRef<SMLoc> Loc, SourceMgr::DiagKind Kind, const Twine &Msg) { + // Count the total number of errors printed. + // This is used to exit with an error code if there were any errors. + if (Kind == SourceMgr::DK_Error) + ++ErrorsPrinted; + SMLoc NullLoc; if (Loc.empty()) Loc = NullLoc; diff --git a/lib/TableGen/Main.cpp b/lib/TableGen/Main.cpp index e1cd623..dc4167b 100644 --- a/lib/TableGen/Main.cpp +++ b/lib/TableGen/Main.cpp @@ -117,11 +117,14 @@ int TableGenMain(char *argv0, TableGenMainFn *MainFn) { if (MainFn(Out.os(), Records)) return 1; + if (ErrorsPrinted > 0) { + errs() << argv0 << ": " << ErrorsPrinted << " errors.\n"; + return 1; + } + // Declare success. Out.keep(); return 0; - - return 1; } } diff --git a/lib/TableGen/TGParser.cpp b/lib/TableGen/TGParser.cpp index c4b48fe..86ad2a6 100644 --- a/lib/TableGen/TGParser.cpp +++ b/lib/TableGen/TGParser.cpp @@ -1547,29 +1547,39 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) { /// ParseDagArgList - Parse the argument list for a dag literal expression. /// -/// ParseDagArgList ::= Value (':' VARNAME)? -/// ParseDagArgList ::= ParseDagArgList ',' Value (':' VARNAME)? +/// DagArg ::= Value (':' VARNAME)? +/// DagArg ::= VARNAME +/// DagArgList ::= DagArg +/// DagArgList ::= DagArgList ',' DagArg std::vector<std::pair<llvm::Init*, std::string> > TGParser::ParseDagArgList(Record *CurRec) { std::vector<std::pair<llvm::Init*, std::string> > Result; while (1) { - Init *Val = ParseValue(CurRec); - if (Val == 0) return std::vector<std::pair<llvm::Init*, std::string> >(); - - // If the variable name is present, add it. - std::string VarName; - if (Lex.getCode() == tgtok::colon) { - if (Lex.Lex() != tgtok::VarName) { // eat the ':' - TokError("expected variable name in dag literal"); + // DagArg ::= VARNAME + if (Lex.getCode() == tgtok::VarName) { + // A missing value is treated like '?'. + Result.push_back(std::make_pair(UnsetInit::get(), Lex.getCurStrVal())); + Lex.Lex(); + } else { + // DagArg ::= Value (':' VARNAME)? + Init *Val = ParseValue(CurRec); + if (Val == 0) return std::vector<std::pair<llvm::Init*, std::string> >(); - } - VarName = Lex.getCurStrVal(); - Lex.Lex(); // eat the VarName. - } - Result.push_back(std::make_pair(Val, VarName)); + // If the variable name is present, add it. + std::string VarName; + if (Lex.getCode() == tgtok::colon) { + if (Lex.Lex() != tgtok::VarName) { // eat the ':' + TokError("expected variable name in dag literal"); + return std::vector<std::pair<llvm::Init*, std::string> >(); + } + VarName = Lex.getCurStrVal(); + Lex.Lex(); // eat the VarName. + } + Result.push_back(std::make_pair(Val, VarName)); + } if (Lex.getCode() != tgtok::comma) break; Lex.Lex(); // eat the ',' } diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index 572617c..daa7f1d 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -367,9 +367,8 @@ AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // shoving a base register and an offset into the instruction then we may well // need to scavenge registers. We should either specifically add an // callee-save register for this purpose or allocate an extra spill slot. - bool BigStack = - (RS && MFI->estimateStackSize(MF) >= TII.estimateRSStackLimit(MF)) + MFI->estimateStackSize(MF) >= TII.estimateRSStackLimit(MF) || MFI->hasVarSizedObjects() // Access will be from X29: messes things up || (MFI->adjustsStack() && !hasReservedCallFrame(MF)); @@ -392,11 +391,13 @@ AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, if (ExtraReg != 0) { MF.getRegInfo().setPhysRegUsed(ExtraReg); } else { + assert(RS && "Expect register scavenger to be available"); + // Create a stack slot for scavenging purposes. PrologEpilogInserter // helpfully places it near either SP or FP for us to avoid // infinitely-regression during scavenging. const TargetRegisterClass *RC = &AArch64::GPR64RegClass; - RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); } diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 46b8221..468c561 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -88,6 +88,8 @@ public: bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth); + SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64); + SDNode *TrySelectToMoveImm(SDNode *N); SDNode *LowerToFPLitPool(SDNode *Node); SDNode *SelectToLitPool(SDNode *N); @@ -318,6 +320,38 @@ AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos, return true; } +SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8, + unsigned Op16,unsigned Op32, + unsigned Op64) { + // Mostly direct translation to the given operations, except that we preserve + // the AtomicOrdering for use later on. + AtomicSDNode *AN = cast<AtomicSDNode>(Node); + EVT VT = AN->getMemoryVT(); + + unsigned Op; + if (VT == MVT::i8) + Op = Op8; + else if (VT == MVT::i16) + Op = Op16; + else if (VT == MVT::i32) + Op = Op32; + else if (VT == MVT::i64) + Op = Op64; + else + llvm_unreachable("Unexpected atomic operation"); + + SmallVector<SDValue, 4> Ops; + for (unsigned i = 1; i < AN->getNumOperands(); ++i) + Ops.push_back(AN->getOperand(i)); + + Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32)); + Ops.push_back(AN->getOperand(0)); // Chain moves to the end + + return CurDAG->SelectNodeTo(Node, Op, + AN->getValueType(0), MVT::Other, + &Ops[0], Ops.size()); +} + SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { // Dump information about the Node being selected DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n"); @@ -328,6 +362,78 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { } switch (Node->getOpcode()) { + case ISD::ATOMIC_LOAD_ADD: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_ADD_I8, + AArch64::ATOMIC_LOAD_ADD_I16, + AArch64::ATOMIC_LOAD_ADD_I32, + AArch64::ATOMIC_LOAD_ADD_I64); + case ISD::ATOMIC_LOAD_SUB: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_SUB_I8, + AArch64::ATOMIC_LOAD_SUB_I16, + AArch64::ATOMIC_LOAD_SUB_I32, + AArch64::ATOMIC_LOAD_SUB_I64); + case ISD::ATOMIC_LOAD_AND: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_AND_I8, + AArch64::ATOMIC_LOAD_AND_I16, + AArch64::ATOMIC_LOAD_AND_I32, + AArch64::ATOMIC_LOAD_AND_I64); + case ISD::ATOMIC_LOAD_OR: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_OR_I8, + AArch64::ATOMIC_LOAD_OR_I16, + AArch64::ATOMIC_LOAD_OR_I32, + AArch64::ATOMIC_LOAD_OR_I64); + case ISD::ATOMIC_LOAD_XOR: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_XOR_I8, + AArch64::ATOMIC_LOAD_XOR_I16, + AArch64::ATOMIC_LOAD_XOR_I32, + AArch64::ATOMIC_LOAD_XOR_I64); + case ISD::ATOMIC_LOAD_NAND: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_NAND_I8, + AArch64::ATOMIC_LOAD_NAND_I16, + AArch64::ATOMIC_LOAD_NAND_I32, + AArch64::ATOMIC_LOAD_NAND_I64); + case ISD::ATOMIC_LOAD_MIN: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_MIN_I8, + AArch64::ATOMIC_LOAD_MIN_I16, + AArch64::ATOMIC_LOAD_MIN_I32, + AArch64::ATOMIC_LOAD_MIN_I64); + case ISD::ATOMIC_LOAD_MAX: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_MAX_I8, + AArch64::ATOMIC_LOAD_MAX_I16, + AArch64::ATOMIC_LOAD_MAX_I32, + AArch64::ATOMIC_LOAD_MAX_I64); + case ISD::ATOMIC_LOAD_UMIN: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_UMIN_I8, + AArch64::ATOMIC_LOAD_UMIN_I16, + AArch64::ATOMIC_LOAD_UMIN_I32, + AArch64::ATOMIC_LOAD_UMIN_I64); + case ISD::ATOMIC_LOAD_UMAX: + return SelectAtomic(Node, + AArch64::ATOMIC_LOAD_UMAX_I8, + AArch64::ATOMIC_LOAD_UMAX_I16, + AArch64::ATOMIC_LOAD_UMAX_I32, + AArch64::ATOMIC_LOAD_UMAX_I64); + case ISD::ATOMIC_SWAP: + return SelectAtomic(Node, + AArch64::ATOMIC_SWAP_I8, + AArch64::ATOMIC_SWAP_I16, + AArch64::ATOMIC_SWAP_I32, + AArch64::ATOMIC_SWAP_I64); + case ISD::ATOMIC_CMP_SWAP: + return SelectAtomic(Node, + AArch64::ATOMIC_CMP_SWAP_I8, + AArch64::ATOMIC_CMP_SWAP_I16, + AArch64::ATOMIC_CMP_SWAP_I32, + AArch64::ATOMIC_CMP_SWAP_I64); case ISD::FrameIndex: { int FI = cast<FrameIndexSDNode>(Node)->getIndex(); EVT PtrTy = TLI.getPointerTy(); diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index e9f4497..786b1ba 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -59,13 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) computeRegisterProperties(); - // Some atomic operations can be folded into load-acquire or store-release - // instructions on AArch64. It's marginally simpler to let LLVM expand - // everything out to a barrier and then recombine the (few) barriers we can. - setInsertFencesForAtomic(true); - setTargetDAGCombine(ISD::ATOMIC_FENCE); - setTargetDAGCombine(ISD::ATOMIC_STORE); - // We combine OR nodes for bitfield and NEON BSL operations. setTargetDAGCombine(ISD::OR); @@ -275,27 +268,34 @@ EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const { return VT.changeVectorElementTypeToInteger(); } -static void getExclusiveOperation(unsigned Size, unsigned &ldrOpc, - unsigned &strOpc) { - switch (Size) { - default: llvm_unreachable("unsupported size for atomic binary op!"); - case 1: - ldrOpc = AArch64::LDXR_byte; - strOpc = AArch64::STXR_byte; - break; - case 2: - ldrOpc = AArch64::LDXR_hword; - strOpc = AArch64::STXR_hword; - break; - case 4: - ldrOpc = AArch64::LDXR_word; - strOpc = AArch64::STXR_word; - break; - case 8: - ldrOpc = AArch64::LDXR_dword; - strOpc = AArch64::STXR_dword; - break; - } +static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, + unsigned &LdrOpc, + unsigned &StrOpc) { + static unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, + AArch64::LDXR_word, AArch64::LDXR_dword}; + static unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, + AArch64::LDAXR_word, AArch64::LDAXR_dword}; + static unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, + AArch64::STXR_word, AArch64::STXR_dword}; + static unsigned StoreRels[] = {AArch64::STLXR_byte, AArch64::STLXR_hword, + AArch64::STLXR_word, AArch64::STLXR_dword}; + + unsigned *LoadOps, *StoreOps; + if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) + LoadOps = LoadAcqs; + else + LoadOps = LoadBares; + + if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) + StoreOps = StoreRels; + else + StoreOps = StoreBares; + + assert(isPowerOf2_32(Size) && Size <= 8 && + "unsupported size for atomic binary op!"); + + LdrOpc = LoadOps[Log2_32(Size)]; + StrOpc = StoreOps[Log2_32(Size)]; } MachineBasicBlock * @@ -313,12 +313,13 @@ AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, unsigned dest = MI->getOperand(0).getReg(); unsigned ptr = MI->getOperand(1).getReg(); unsigned incr = MI->getOperand(2).getReg(); + AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); DebugLoc dl = MI->getDebugLoc(); MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, ldrOpc, strOpc); + getExclusiveOperation(Size, Ord, ldrOpc, strOpc); MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); @@ -397,6 +398,8 @@ AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, unsigned dest = MI->getOperand(0).getReg(); unsigned ptr = MI->getOperand(1).getReg(); unsigned incr = MI->getOperand(2).getReg(); + AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); + unsigned oldval = dest; DebugLoc dl = MI->getDebugLoc(); @@ -411,7 +414,7 @@ AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, } unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, ldrOpc, strOpc); + getExclusiveOperation(Size, Ord, ldrOpc, strOpc); MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); @@ -479,6 +482,7 @@ AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, unsigned ptr = MI->getOperand(1).getReg(); unsigned oldval = MI->getOperand(2).getReg(); unsigned newval = MI->getOperand(3).getReg(); + AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); DebugLoc dl = MI->getDebugLoc(); @@ -487,7 +491,7 @@ AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, ldrOpc, strOpc); + getExclusiveOperation(Size, Ord, ldrOpc, strOpc); MachineFunction *MF = BB->getParent(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); @@ -2377,78 +2381,6 @@ static SDValue PerformANDCombine(SDNode *N, DAG.getConstant(LSB + Width - 1, MVT::i64)); } -static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode, - TargetLowering::DAGCombinerInfo &DCI) { - // An atomic operation followed by an acquiring atomic fence can be reduced to - // an acquiring load. The atomic operation provides a convenient pointer to - // load from. If the original operation was a load anyway we can actually - // combine the two operations into an acquiring load. - SelectionDAG &DAG = DCI.DAG; - SDValue AtomicOp = FenceNode->getOperand(0); - AtomicSDNode *AtomicNode = dyn_cast<AtomicSDNode>(AtomicOp); - - // A fence on its own can't be optimised - if (!AtomicNode) - return SDValue(); - - AtomicOrdering FenceOrder - = static_cast<AtomicOrdering>(FenceNode->getConstantOperandVal(1)); - SynchronizationScope FenceScope - = static_cast<SynchronizationScope>(FenceNode->getConstantOperandVal(2)); - - if (FenceOrder != Acquire || FenceScope != AtomicNode->getSynchScope()) - return SDValue(); - - // If the original operation was an ATOMIC_LOAD then we'll be replacing it, so - // the chain we use should be its input, otherwise we'll put our store after - // it so we use its output chain. - SDValue Chain = AtomicNode->getOpcode() == ISD::ATOMIC_LOAD ? - AtomicNode->getChain() : AtomicOp; - - // We have an acquire fence with a handy atomic operation nearby, we can - // convert the fence into a load-acquire, discarding the result. - DebugLoc DL = FenceNode->getDebugLoc(); - SDValue Op = DAG.getAtomic(ISD::ATOMIC_LOAD, DL, AtomicNode->getMemoryVT(), - AtomicNode->getValueType(0), - Chain, // Chain - AtomicOp.getOperand(1), // Pointer - AtomicNode->getMemOperand(), Acquire, - FenceScope); - - if (AtomicNode->getOpcode() == ISD::ATOMIC_LOAD) - DAG.ReplaceAllUsesWith(AtomicNode, Op.getNode()); - - return Op.getValue(1); -} - -static SDValue PerformATOMIC_STORECombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { - // A releasing atomic fence followed by an atomic store can be combined into a - // single store operation. - SelectionDAG &DAG = DCI.DAG; - AtomicSDNode *AtomicNode = cast<AtomicSDNode>(N); - SDValue FenceOp = AtomicNode->getOperand(0); - - if (FenceOp.getOpcode() != ISD::ATOMIC_FENCE) - return SDValue(); - - AtomicOrdering FenceOrder - = static_cast<AtomicOrdering>(FenceOp->getConstantOperandVal(1)); - SynchronizationScope FenceScope - = static_cast<SynchronizationScope>(FenceOp->getConstantOperandVal(2)); - - if (FenceOrder != Release || FenceScope != AtomicNode->getSynchScope()) - return SDValue(); - - DebugLoc DL = AtomicNode->getDebugLoc(); - return DAG.getAtomic(ISD::ATOMIC_STORE, DL, AtomicNode->getMemoryVT(), - FenceOp.getOperand(0), // Chain - AtomicNode->getOperand(1), // Pointer - AtomicNode->getOperand(2), // Value - AtomicNode->getMemOperand(), Release, - FenceScope); -} - /// For a true bitfield insert, the bits getting into that contiguous mask /// should come from the low part of an existing value: they must be formed from /// a compatible SHL operation (unless they're already low). This function @@ -2804,8 +2736,6 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N, switch (N->getOpcode()) { default: break; case ISD::AND: return PerformANDCombine(N, DCI); - case ISD::ATOMIC_FENCE: return PerformATOMIC_FENCECombine(N, DCI); - case ISD::ATOMIC_STORE: return PerformATOMIC_STORECombine(N, DCI); case ISD::OR: return PerformORCombine(N, DCI, Subtarget); case ISD::SRA: return PerformSRACombine(N, DCI); } diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td index cb93471..9dd122f 100644 --- a/lib/Target/AArch64/AArch64InstrFormats.td +++ b/lib/Target/AArch64/AArch64InstrFormats.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // This file describes AArch64 instruction formats, down to the level of the // instruction's overall class. -// ===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 7b93463..cf3a2c3 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -618,11 +618,11 @@ void llvm::emitRegUpdate(MachineBasicBlock &MBB, int64_t NumBytes, MachineInstr::MIFlag MIFlags) { if (NumBytes == 0 && DstReg == SrcReg) return; - else if (abs(NumBytes) & ~0xffffff) { + else if (abs64(NumBytes) & ~0xffffff) { // Generically, we have to materialize the offset into a temporary register // and subtract it. There are a couple of ways this could be done, for now // we'll use a movz/movk or movn/movk sequence. - uint64_t Bits = static_cast<uint64_t>(abs(NumBytes)); + uint64_t Bits = static_cast<uint64_t>(abs64(NumBytes)); BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVZxii), ScratchReg) .addImm(0xffff & Bits).addImm(0) .setMIFlags(MIFlags); @@ -673,7 +673,7 @@ void llvm::emitRegUpdate(MachineBasicBlock &MBB, } else { LowOp = AArch64::SUBxxi_lsl0_s; HighOp = AArch64::SUBxxi_lsl12_s; - NumBytes = abs(NumBytes); + NumBytes = abs64(NumBytes); } // If we're here, at the very least a move needs to be produced, which just diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index 319ec97..e3b39ce 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -159,53 +159,55 @@ let Defs = [XSP], Uses = [XSP] in { // Atomic operation pseudo-instructions //===----------------------------------------------------------------------===// -let usesCustomInserter = 1 in { -multiclass AtomicSizes<string opname> { - def _I8 : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$incr), - [(set GPR32:$dst, (!cast<SDNode>(opname # "_8") GPR64:$ptr, GPR32:$incr))]>; - def _I16 : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$incr), - [(set GPR32:$dst, (!cast<SDNode>(opname # "_16") GPR64:$ptr, GPR32:$incr))]>; - def _I32 : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$incr), - [(set GPR32:$dst, (!cast<SDNode>(opname # "_32") GPR64:$ptr, GPR32:$incr))]>; - def _I64 : PseudoInst<(outs GPR64:$dst), (ins GPR64:$ptr, GPR64:$incr), - [(set GPR64:$dst, (!cast<SDNode>(opname # "_64") GPR64:$ptr, GPR64:$incr))]>; -} -} - -defm ATOMIC_LOAD_ADD : AtomicSizes<"atomic_load_add">; -defm ATOMIC_LOAD_SUB : AtomicSizes<"atomic_load_sub">; -defm ATOMIC_LOAD_AND : AtomicSizes<"atomic_load_and">; -defm ATOMIC_LOAD_OR : AtomicSizes<"atomic_load_or">; -defm ATOMIC_LOAD_XOR : AtomicSizes<"atomic_load_xor">; -defm ATOMIC_LOAD_NAND : AtomicSizes<"atomic_load_nand">; -defm ATOMIC_SWAP : AtomicSizes<"atomic_swap">; +// These get selected from C++ code as a pretty much direct translation from the +// generic DAG nodes. The one exception is the AtomicOrdering is added as an +// operand so that the eventual lowering can make use of it and choose +// acquire/release operations when required. + +let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1 in { +multiclass AtomicSizes { + def _I8 : PseudoInst<(outs GPR32:$dst), + (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>; + def _I16 : PseudoInst<(outs GPR32:$dst), + (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>; + def _I32 : PseudoInst<(outs GPR32:$dst), + (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>; + def _I64 : PseudoInst<(outs GPR64:$dst), + (ins GPR64xsp:$ptr, GPR64:$incr, i32imm:$ordering), []>; +} +} + +defm ATOMIC_LOAD_ADD : AtomicSizes; +defm ATOMIC_LOAD_SUB : AtomicSizes; +defm ATOMIC_LOAD_AND : AtomicSizes; +defm ATOMIC_LOAD_OR : AtomicSizes; +defm ATOMIC_LOAD_XOR : AtomicSizes; +defm ATOMIC_LOAD_NAND : AtomicSizes; +defm ATOMIC_SWAP : AtomicSizes; let Defs = [NZCV] in { // These operations need a CMP to calculate the correct value - defm ATOMIC_LOAD_MIN : AtomicSizes<"atomic_load_min">; - defm ATOMIC_LOAD_MAX : AtomicSizes<"atomic_load_max">; - defm ATOMIC_LOAD_UMIN : AtomicSizes<"atomic_load_umin">; - defm ATOMIC_LOAD_UMAX : AtomicSizes<"atomic_load_umax">; -} - -let usesCustomInserter = 1, Defs = [NZCV] in { -def ATOMIC_CMP_SWAP_I8 - : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$old, GPR32:$new), - [(set GPR32:$dst, - (atomic_cmp_swap_8 GPR64:$ptr, GPR32:$old, GPR32:$new))]>; -def ATOMIC_CMP_SWAP_I16 - : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$old, GPR32:$new), - [(set GPR32:$dst, - (atomic_cmp_swap_16 GPR64:$ptr, GPR32:$old, GPR32:$new))]>; -def ATOMIC_CMP_SWAP_I32 - : PseudoInst<(outs GPR32:$dst), (ins GPR64:$ptr, GPR32:$old, GPR32:$new), - [(set GPR32:$dst, - (atomic_cmp_swap_32 GPR64:$ptr, GPR32:$old, GPR32:$new))]>; -def ATOMIC_CMP_SWAP_I64 - : PseudoInst<(outs GPR64:$dst), (ins GPR64:$ptr, GPR64:$old, GPR64:$new), - [(set GPR64:$dst, - (atomic_cmp_swap_64 GPR64:$ptr, GPR64:$old, GPR64:$new))]>; + defm ATOMIC_LOAD_MIN : AtomicSizes; + defm ATOMIC_LOAD_MAX : AtomicSizes; + defm ATOMIC_LOAD_UMIN : AtomicSizes; + defm ATOMIC_LOAD_UMAX : AtomicSizes; } +class AtomicCmpSwap<RegisterClass GPRData> + : PseudoInst<(outs GPRData:$dst), + (ins GPR64xsp:$ptr, GPRData:$old, GPRData:$new, + i32imm:$ordering), []> { + let usesCustomInserter = 1; + let hasCtrlDep = 1; + let mayLoad = 1; + let mayStore = 1; + let Defs = [NZCV]; +} + +def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<GPR32>; +def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<GPR32>; +def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<GPR32>; +def ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap<GPR64>; + //===----------------------------------------------------------------------===// // Add-subtract (extended register) instructions //===----------------------------------------------------------------------===// @@ -264,31 +266,39 @@ def LSL_extoperand : Operand<i64> { class extend_types { dag uxtb; dag uxth; dag uxtw; dag uxtx; dag sxtb; dag sxth; dag sxtw; dag sxtx; + ValueType ty; + RegisterClass GPR; } def extends_to_i64 : extend_types { - let uxtb = (and (anyext GPR32:$Rm), 255); - let uxth = (and (anyext GPR32:$Rm), 65535); - let uxtw = (zext GPR32:$Rm); - let uxtx = (i64 GPR64:$Rm); + let uxtb = (and (anyext i32:$Rm), 255); + let uxth = (and (anyext i32:$Rm), 65535); + let uxtw = (zext i32:$Rm); + let uxtx = (i64 $Rm); + + let sxtb = (sext_inreg (anyext i32:$Rm), i8); + let sxth = (sext_inreg (anyext i32:$Rm), i16); + let sxtw = (sext i32:$Rm); + let sxtx = (i64 $Rm); - let sxtb = (sext_inreg (anyext GPR32:$Rm), i8); - let sxth = (sext_inreg (anyext GPR32:$Rm), i16); - let sxtw = (sext GPR32:$Rm); - let sxtx = (i64 GPR64:$Rm); + let ty = i64; + let GPR = GPR64xsp; } def extends_to_i32 : extend_types { - let uxtb = (and GPR32:$Rm, 255); - let uxth = (and GPR32:$Rm, 65535); - let uxtw = (i32 GPR32:$Rm); - let uxtx = (i32 GPR32:$Rm); + let uxtb = (and i32:$Rm, 255); + let uxth = (and i32:$Rm, 65535); + let uxtw = (i32 i32:$Rm); + let uxtx = (i32 i32:$Rm); + + let sxtb = (sext_inreg i32:$Rm, i8); + let sxth = (sext_inreg i32:$Rm, i16); + let sxtw = (i32 i32:$Rm); + let sxtx = (i32 i32:$Rm); - let sxtb = (sext_inreg GPR32:$Rm, i8); - let sxth = (sext_inreg GPR32:$Rm, i16); - let sxtw = (i32 GPR32:$Rm); - let sxtx = (i32 GPR32:$Rm); + let ty = i32; + let GPR = GPR32wsp; } // Now, six of the extensions supported are easy and uniform: if the source size @@ -303,44 +313,38 @@ def extends_to_i32 : extend_types { // would probably be the best option). multiclass addsub_exts<bit sf, bit op, bit S, string asmop, SDPatternOperator opfrag, - dag outs, extend_types exts, RegisterClass GPRsp> { + dag outs, extend_types exts> { def w_uxtb : A64I_addsubext<sf, op, S, 0b00, 0b000, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, UXTB_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.uxtb, UXTB_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTB_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.uxtb, UXTB_operand:$Imm3))], + NoItinerary>; def w_uxth : A64I_addsubext<sf, op, S, 0b00, 0b001, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, UXTH_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.uxth, UXTH_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTH_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.uxth, UXTH_operand:$Imm3))], + NoItinerary>; def w_uxtw : A64I_addsubext<sf, op, S, 0b00, 0b010, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, UXTW_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.uxtw, UXTW_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTW_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.uxtw, UXTW_operand:$Imm3))], + NoItinerary>; def w_sxtb : A64I_addsubext<sf, op, S, 0b00, 0b100, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, SXTB_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.sxtb, SXTB_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTB_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.sxtb, SXTB_operand:$Imm3))], + NoItinerary>; def w_sxth : A64I_addsubext<sf, op, S, 0b00, 0b101, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, SXTH_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.sxth, SXTH_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTH_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.sxth, SXTH_operand:$Imm3))], + NoItinerary>; def w_sxtw : A64I_addsubext<sf, op, S, 0b00, 0b110, - outs, - (ins GPRsp:$Rn, GPR32:$Rm, SXTW_operand:$Imm3), - !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPRsp:$Rn, (shl exts.sxtw, SXTW_operand:$Imm3))], - NoItinerary>; + outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTW_operand:$Imm3), + !strconcat(asmop, "$Rn, $Rm, $Imm3"), + [(opfrag exts.ty:$Rn, (shl exts.sxtw, SXTW_operand:$Imm3))], + NoItinerary>; } // These two could be merge in with the above, but their patterns aren't really @@ -351,7 +355,7 @@ multiclass addsub_xxtx<bit op, bit S, string asmop, SDPatternOperator opfrag, outs, (ins GPR64xsp:$Rn, GPR64:$Rm, UXTX_operand:$Imm3), !strconcat(asmop, "$Rn, $Rm, $Imm3"), - [(opfrag GPR64xsp:$Rn, (shl GPR64:$Rm, UXTX_operand:$Imm3))], + [(opfrag i64:$Rn, (shl i64:$Rm, UXTX_operand:$Imm3))], NoItinerary>; def x_sxtx : A64I_addsubext<0b1, op, S, 0b00, 0b111, @@ -384,53 +388,53 @@ class SetNZCV<SDPatternOperator op> : PatFrag<(ops node:$lhs, node:$rhs), (set NZCV, (op node:$lhs, node:$rhs))>; defm ADDxx :addsub_exts<0b1, 0b0, 0b0, "add\t$Rd, ", SetRD<GPR64xsp, add>, - (outs GPR64xsp:$Rd), extends_to_i64, GPR64xsp>, + (outs GPR64xsp:$Rd), extends_to_i64>, addsub_xxtx< 0b0, 0b0, "add\t$Rd, ", SetRD<GPR64xsp, add>, (outs GPR64xsp:$Rd)>; defm ADDww :addsub_exts<0b0, 0b0, 0b0, "add\t$Rd, ", SetRD<GPR32wsp, add>, - (outs GPR32wsp:$Rd), extends_to_i32, GPR32wsp>, + (outs GPR32wsp:$Rd), extends_to_i32>, addsub_wxtx< 0b0, 0b0, "add\t$Rd, ", (outs GPR32wsp:$Rd)>; defm SUBxx :addsub_exts<0b1, 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR64xsp, sub>, - (outs GPR64xsp:$Rd), extends_to_i64, GPR64xsp>, + (outs GPR64xsp:$Rd), extends_to_i64>, addsub_xxtx< 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR64xsp, sub>, (outs GPR64xsp:$Rd)>; defm SUBww :addsub_exts<0b0, 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR32wsp, sub>, - (outs GPR32wsp:$Rd), extends_to_i32, GPR32wsp>, + (outs GPR32wsp:$Rd), extends_to_i32>, addsub_wxtx< 0b1, 0b0, "sub\t$Rd, ", (outs GPR32wsp:$Rd)>; let Defs = [NZCV] in { defm ADDSxx :addsub_exts<0b1, 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR64, addc>, - (outs GPR64:$Rd), extends_to_i64, GPR64xsp>, + (outs GPR64:$Rd), extends_to_i64>, addsub_xxtx< 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR64, addc>, (outs GPR64:$Rd)>; defm ADDSww :addsub_exts<0b0, 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR32, addc>, - (outs GPR32:$Rd), extends_to_i32, GPR32wsp>, + (outs GPR32:$Rd), extends_to_i32>, addsub_wxtx< 0b0, 0b1, "adds\t$Rd, ", (outs GPR32:$Rd)>; defm SUBSxx :addsub_exts<0b1, 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR64, subc>, - (outs GPR64:$Rd), extends_to_i64, GPR64xsp>, + (outs GPR64:$Rd), extends_to_i64>, addsub_xxtx< 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR64, subc>, (outs GPR64:$Rd)>; defm SUBSww :addsub_exts<0b0, 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR32, subc>, - (outs GPR32:$Rd), extends_to_i32, GPR32wsp>, + (outs GPR32:$Rd), extends_to_i32>, addsub_wxtx< 0b1, 0b1, "subs\t$Rd, ", (outs GPR32:$Rd)>; let Rd = 0b11111, isCompare = 1 in { defm CMNx : addsub_exts<0b1, 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>, - (outs), extends_to_i64, GPR64xsp>, + (outs), extends_to_i64>, addsub_xxtx< 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>, (outs)>; defm CMNw : addsub_exts<0b0, 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>, - (outs), extends_to_i32, GPR32wsp>, + (outs), extends_to_i32>, addsub_wxtx< 0b0, 0b1, "cmn\t", (outs)>; defm CMPx : addsub_exts<0b1, 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>, - (outs), extends_to_i64, GPR64xsp>, + (outs), extends_to_i64>, addsub_xxtx< 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>, (outs)>; defm CMPw : addsub_exts<0b0, 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>, - (outs), extends_to_i32, GPR32wsp>, + (outs), extends_to_i32>, addsub_wxtx< 0b1, 0b1, "cmp\t", (outs)>; } } @@ -439,31 +443,31 @@ defm CMPw : addsub_exts<0b0, 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>, // created for uxtx/sxtx since they're non-uniform and it's expected that // add/sub (shifted register) will handle those cases anyway. multiclass addsubext_noshift_patterns<string prefix, SDPatternOperator nodeop, - RegisterClass GPRsp, extend_types exts> { - def : Pat<(nodeop GPRsp:$Rn, exts.uxtb), - (!cast<Instruction>(prefix # "w_uxtb") GPRsp:$Rn, GPR32:$Rm, 0)>; - def : Pat<(nodeop GPRsp:$Rn, exts.uxth), - (!cast<Instruction>(prefix # "w_uxth") GPRsp:$Rn, GPR32:$Rm, 0)>; - def : Pat<(nodeop GPRsp:$Rn, exts.uxtw), - (!cast<Instruction>(prefix # "w_uxtw") GPRsp:$Rn, GPR32:$Rm, 0)>; - - def : Pat<(nodeop GPRsp:$Rn, exts.sxtb), - (!cast<Instruction>(prefix # "w_sxtb") GPRsp:$Rn, GPR32:$Rm, 0)>; - def : Pat<(nodeop GPRsp:$Rn, exts.sxth), - (!cast<Instruction>(prefix # "w_sxth") GPRsp:$Rn, GPR32:$Rm, 0)>; - def : Pat<(nodeop GPRsp:$Rn, exts.sxtw), - (!cast<Instruction>(prefix # "w_sxtw") GPRsp:$Rn, GPR32:$Rm, 0)>; -} - -defm : addsubext_noshift_patterns<"ADDxx", add, GPR64xsp, extends_to_i64>; -defm : addsubext_noshift_patterns<"ADDww", add, GPR32wsp, extends_to_i32>; -defm : addsubext_noshift_patterns<"SUBxx", sub, GPR64xsp, extends_to_i64>; -defm : addsubext_noshift_patterns<"SUBww", sub, GPR32wsp, extends_to_i32>; - -defm : addsubext_noshift_patterns<"CMNx", A64cmn, GPR64xsp, extends_to_i64>; -defm : addsubext_noshift_patterns<"CMNw", A64cmn, GPR32wsp, extends_to_i32>; -defm : addsubext_noshift_patterns<"CMPx", A64cmp, GPR64xsp, extends_to_i64>; -defm : addsubext_noshift_patterns<"CMPw", A64cmp, GPR32wsp, extends_to_i32>; + extend_types exts> { + def : Pat<(nodeop exts.ty:$Rn, exts.uxtb), + (!cast<Instruction>(prefix # "w_uxtb") $Rn, $Rm, 0)>; + def : Pat<(nodeop exts.ty:$Rn, exts.uxth), + (!cast<Instruction>(prefix # "w_uxth") $Rn, $Rm, 0)>; + def : Pat<(nodeop exts.ty:$Rn, exts.uxtw), + (!cast<Instruction>(prefix # "w_uxtw") $Rn, $Rm, 0)>; + + def : Pat<(nodeop exts.ty:$Rn, exts.sxtb), + (!cast<Instruction>(prefix # "w_sxtb") $Rn, $Rm, 0)>; + def : Pat<(nodeop exts.ty:$Rn, exts.sxth), + (!cast<Instruction>(prefix # "w_sxth") $Rn, $Rm, 0)>; + def : Pat<(nodeop exts.ty:$Rn, exts.sxtw), + (!cast<Instruction>(prefix # "w_sxtw") $Rn, $Rm, 0)>; +} + +defm : addsubext_noshift_patterns<"ADDxx", add, extends_to_i64>; +defm : addsubext_noshift_patterns<"ADDww", add, extends_to_i32>; +defm : addsubext_noshift_patterns<"SUBxx", sub, extends_to_i64>; +defm : addsubext_noshift_patterns<"SUBww", sub, extends_to_i32>; + +defm : addsubext_noshift_patterns<"CMNx", A64cmn, extends_to_i64>; +defm : addsubext_noshift_patterns<"CMNw", A64cmn, extends_to_i32>; +defm : addsubext_noshift_patterns<"CMPx", A64cmp, extends_to_i64>; +defm : addsubext_noshift_patterns<"CMPw", A64cmp, extends_to_i32>; // An extend of "lsl #imm" is valid if and only if one of Rn and Rd is // sp/wsp. It is synonymous with uxtx/uxtw depending on the size of the @@ -614,14 +618,13 @@ multiclass addsubimm_varieties<string prefix, bit sf, bit op, bits<2> shift, string asmop, string cmpasmop, Operand imm_operand, Operand cmp_imm_operand, RegisterClass GPR, RegisterClass GPRsp, - AArch64Reg ZR> { + AArch64Reg ZR, ValueType Ty> { // All registers for non-S variants allow SP def _s : A64I_addsubimm<sf, op, 0b0, shift, (outs GPRsp:$Rd), (ins GPRsp:$Rn, imm_operand:$Imm12), !strconcat(asmop, "\t$Rd, $Rn, $Imm12"), - [(set GPRsp:$Rd, - (add GPRsp:$Rn, imm_operand:$Imm12))], + [(set Ty:$Rd, (add Ty:$Rn, imm_operand:$Imm12))], NoItinerary>; @@ -630,7 +633,7 @@ multiclass addsubimm_varieties<string prefix, bit sf, bit op, bits<2> shift, (outs GPR:$Rd), (ins GPRsp:$Rn, imm_operand:$Imm12), !strconcat(asmop, "s\t$Rd, $Rn, $Imm12"), - [(set GPR:$Rd, (addc GPRsp:$Rn, imm_operand:$Imm12))], + [(set Ty:$Rd, (addc Ty:$Rn, imm_operand:$Imm12))], NoItinerary> { let Defs = [NZCV]; } @@ -642,7 +645,7 @@ multiclass addsubimm_varieties<string prefix, bit sf, bit op, bits<2> shift, (outs), (ins GPRsp:$Rn, imm_operand:$Imm12), !strconcat(cmpasmop, " $Rn, $Imm12"), [(set NZCV, - (A64cmp GPRsp:$Rn, cmp_imm_operand:$Imm12))], + (A64cmp Ty:$Rn, cmp_imm_operand:$Imm12))], NoItinerary> { let Rd = 0b11111; let Defs = [NZCV]; @@ -653,36 +656,37 @@ multiclass addsubimm_varieties<string prefix, bit sf, bit op, bits<2> shift, multiclass addsubimm_shifts<string prefix, bit sf, bit op, string asmop, string cmpasmop, string operand, string cmpoperand, - RegisterClass GPR, RegisterClass GPRsp, AArch64Reg ZR> { + RegisterClass GPR, RegisterClass GPRsp, AArch64Reg ZR, + ValueType Ty> { defm _lsl0 : addsubimm_varieties<prefix # "_lsl0", sf, op, 0b00, asmop, cmpasmop, !cast<Operand>(operand # "_lsl0"), !cast<Operand>(cmpoperand # "_lsl0"), - GPR, GPRsp, ZR>; + GPR, GPRsp, ZR, Ty>; defm _lsl12 : addsubimm_varieties<prefix # "_lsl12", sf, op, 0b01, asmop, cmpasmop, !cast<Operand>(operand # "_lsl12"), !cast<Operand>(cmpoperand # "_lsl12"), - GPR, GPRsp, ZR>; + GPR, GPRsp, ZR, Ty>; } defm ADDwwi : addsubimm_shifts<"ADDwi", 0b0, 0b0, "add", "cmn", "addsubimm_operand_i32_posimm", "addsubimm_operand_i32_negimm", - GPR32, GPR32wsp, WZR>; + GPR32, GPR32wsp, WZR, i32>; defm ADDxxi : addsubimm_shifts<"ADDxi", 0b1, 0b0, "add", "cmn", "addsubimm_operand_i64_posimm", "addsubimm_operand_i64_negimm", - GPR64, GPR64xsp, XZR>; + GPR64, GPR64xsp, XZR, i64>; defm SUBwwi : addsubimm_shifts<"SUBwi", 0b0, 0b1, "sub", "cmp", "addsubimm_operand_i32_negimm", "addsubimm_operand_i32_posimm", - GPR32, GPR32wsp, WZR>; + GPR32, GPR32wsp, WZR, i32>; defm SUBxxi : addsubimm_shifts<"SUBxi", 0b1, 0b1, "sub", "cmp", "addsubimm_operand_i64_negimm", "addsubimm_operand_i64_posimm", - GPR64, GPR64xsp, XZR>; + GPR64, GPR64xsp, XZR, i64>; multiclass MOVsp<RegisterClass GPRsp, RegisterClass SP, Instruction addop> { def _fromsp : InstAlias<"mov $Rd, $Rn", @@ -753,36 +757,36 @@ defm ror_operand : shift_operands<"ror_operand", "ROR">; // N.b. the commutable parameter is just !N. It will be first against the wall // when the revolution comes. multiclass addsub_shifts<string prefix, bit sf, bit op, bit s, bit commutable, - string asmop, SDPatternOperator opfrag, string sty, + string asmop, SDPatternOperator opfrag, ValueType ty, RegisterClass GPR, list<Register> defs> { let isCommutable = commutable, Defs = defs in { def _lsl : A64I_addsubshift<sf, op, s, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6), + !cast<Operand>("lsl_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (shl GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)) + [(set GPR:$Rd, (opfrag ty:$Rn, (shl ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)) )], NoItinerary>; def _lsr : A64I_addsubshift<sf, op, s, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6), + !cast<Operand>("lsr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (srl GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (srl ty:$Rm, + !cast<Operand>("lsr_operand_" # ty):$Imm6)) )], NoItinerary>; def _asr : A64I_addsubshift<sf, op, s, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6), + !cast<Operand>("asr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (sra GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (sra ty:$Rm, + !cast<Operand>("asr_operand_" # ty):$Imm6)) )], NoItinerary>; } @@ -792,17 +796,17 @@ multiclass addsub_shifts<string prefix, bit sf, bit op, bit s, bit commutable, (!cast<Instruction>(prefix # "_lsl") GPR:$Rd, GPR:$Rn, GPR:$Rm, 0)>; - def : Pat<(opfrag GPR:$Rn, GPR:$Rm), - (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; + def : Pat<(opfrag ty:$Rn, ty:$Rm), + (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>; } multiclass addsub_sizes<string prefix, bit op, bit s, bit commutable, string asmop, SDPatternOperator opfrag, list<Register> defs> { defm xxx : addsub_shifts<prefix # "xxx", 0b1, op, s, - commutable, asmop, opfrag, "i64", GPR64, defs>; + commutable, asmop, opfrag, i64, GPR64, defs>; defm www : addsub_shifts<prefix # "www", 0b0, op, s, - commutable, asmop, opfrag, "i32", GPR32, defs>; + commutable, asmop, opfrag, i32, GPR32, defs>; } @@ -816,26 +820,26 @@ defm SUBS : addsub_sizes<"SUBS", 0b1, 0b1, 0b0, "subs", subc, [NZCV]>; // 1. The NEG/NEGS aliases //===------------------------------- -multiclass neg_alias<Instruction INST, RegisterClass GPR, - Register ZR, Operand shift_operand, SDNode shiftop> { +multiclass neg_alias<Instruction INST, RegisterClass GPR, Register ZR, + ValueType ty, Operand shift_operand, SDNode shiftop> { def : InstAlias<"neg $Rd, $Rm, $Imm6", (INST GPR:$Rd, ZR, GPR:$Rm, shift_operand:$Imm6)>; - def : Pat<(sub 0, (shiftop GPR:$Rm, shift_operand:$Imm6)), - (INST ZR, GPR:$Rm, shift_operand:$Imm6)>; + def : Pat<(sub 0, (shiftop ty:$Rm, shift_operand:$Imm6)), + (INST ZR, $Rm, shift_operand:$Imm6)>; } -defm : neg_alias<SUBwww_lsl, GPR32, WZR, lsl_operand_i32, shl>; -defm : neg_alias<SUBwww_lsr, GPR32, WZR, lsr_operand_i32, srl>; -defm : neg_alias<SUBwww_asr, GPR32, WZR, asr_operand_i32, sra>; +defm : neg_alias<SUBwww_lsl, GPR32, WZR, i32, lsl_operand_i32, shl>; +defm : neg_alias<SUBwww_lsr, GPR32, WZR, i32, lsr_operand_i32, srl>; +defm : neg_alias<SUBwww_asr, GPR32, WZR, i32, asr_operand_i32, sra>; def : InstAlias<"neg $Rd, $Rm", (SUBwww_lsl GPR32:$Rd, WZR, GPR32:$Rm, 0)>; -def : Pat<(sub 0, GPR32:$Rm), (SUBwww_lsl WZR, GPR32:$Rm, 0)>; +def : Pat<(sub 0, i32:$Rm), (SUBwww_lsl WZR, $Rm, 0)>; -defm : neg_alias<SUBxxx_lsl, GPR64, XZR, lsl_operand_i64, shl>; -defm : neg_alias<SUBxxx_lsr, GPR64, XZR, lsr_operand_i64, srl>; -defm : neg_alias<SUBxxx_asr, GPR64, XZR, asr_operand_i64, sra>; +defm : neg_alias<SUBxxx_lsl, GPR64, XZR, i64, lsl_operand_i64, shl>; +defm : neg_alias<SUBxxx_lsr, GPR64, XZR, i64, lsr_operand_i64, srl>; +defm : neg_alias<SUBxxx_asr, GPR64, XZR, i64, asr_operand_i64, sra>; def : InstAlias<"neg $Rd, $Rm", (SUBxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>; -def : Pat<(sub 0, GPR64:$Rm), (SUBxxx_lsl XZR, GPR64:$Rm, 0)>; +def : Pat<(sub 0, i64:$Rm), (SUBxxx_lsl XZR, $Rm, 0)>; // NEGS doesn't get any patterns yet: defining multiple outputs means C++ has to // be involved. @@ -859,36 +863,36 @@ def : InstAlias<"negs $Rd, $Rm", (SUBSxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>; //===------------------------------- multiclass cmp_shifts<string prefix, bit sf, bit op, bit commutable, - string asmop, SDPatternOperator opfrag, string sty, + string asmop, SDPatternOperator opfrag, ValueType ty, RegisterClass GPR> { let isCommutable = commutable, Rd = 0b11111, Defs = [NZCV] in { def _lsl : A64I_addsubshift<sf, op, 0b1, 0b00, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6), + !cast<Operand>("lsl_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rn, $Rm, $Imm6"), - [(set NZCV, (opfrag GPR:$Rn, (shl GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)) + [(set NZCV, (opfrag ty:$Rn, (shl ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)) )], NoItinerary>; def _lsr : A64I_addsubshift<sf, op, 0b1, 0b01, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6), + !cast<Operand>("lsr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rn, $Rm, $Imm6"), - [(set NZCV, (opfrag GPR:$Rn, (srl GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6)) + [(set NZCV, (opfrag ty:$Rn, (srl ty:$Rm, + !cast<Operand>("lsr_operand_" # ty):$Imm6)) )], NoItinerary>; def _asr : A64I_addsubshift<sf, op, 0b1, 0b10, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6), + !cast<Operand>("asr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rn, $Rm, $Imm6"), - [(set NZCV, (opfrag GPR:$Rn, (sra GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6)) + [(set NZCV, (opfrag ty:$Rn, (sra ty:$Rm, + !cast<Operand>("asr_operand_" # ty):$Imm6)) )], NoItinerary>; } @@ -897,15 +901,15 @@ multiclass cmp_shifts<string prefix, bit sf, bit op, bit commutable, : InstAlias<!strconcat(asmop, " $Rn, $Rm"), (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; - def : Pat<(opfrag GPR:$Rn, GPR:$Rm), - (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; + def : Pat<(opfrag ty:$Rn, ty:$Rm), + (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>; } -defm CMPww : cmp_shifts<"CMPww", 0b0, 0b1, 0b0, "cmp", A64cmp, "i32", GPR32>; -defm CMPxx : cmp_shifts<"CMPxx", 0b1, 0b1, 0b0, "cmp", A64cmp, "i64", GPR64>; +defm CMPww : cmp_shifts<"CMPww", 0b0, 0b1, 0b0, "cmp", A64cmp, i32, GPR32>; +defm CMPxx : cmp_shifts<"CMPxx", 0b1, 0b1, 0b0, "cmp", A64cmp, i64, GPR64>; -defm CMNww : cmp_shifts<"CMNww", 0b0, 0b0, 0b1, "cmn", A64cmn, "i32", GPR32>; -defm CMNxx : cmp_shifts<"CMNxx", 0b1, 0b0, 0b1, "cmn", A64cmn, "i64", GPR64>; +defm CMNww : cmp_shifts<"CMNww", 0b0, 0b0, 0b1, "cmn", A64cmn, i32, GPR32>; +defm CMNxx : cmp_shifts<"CMNxx", 0b1, 0b0, 0b1, "cmn", A64cmn, i64, GPR64>; //===----------------------------------------------------------------------===// // Add-subtract (with carry) instructions @@ -947,10 +951,10 @@ def : InstAlias<"ngcs $Rd, $Rm", (SBCSxxx GPR64:$Rd, XZR, GPR64:$Rm)>; // Note that adde and sube can form a chain longer than two (e.g. for 256-bit // addition). So the flag-setting instructions are appropriate. -def : Pat<(adde GPR32:$Rn, GPR32:$Rm), (ADCSwww GPR32:$Rn, GPR32:$Rm)>; -def : Pat<(adde GPR64:$Rn, GPR64:$Rm), (ADCSxxx GPR64:$Rn, GPR64:$Rm)>; -def : Pat<(sube GPR32:$Rn, GPR32:$Rm), (SBCSwww GPR32:$Rn, GPR32:$Rm)>; -def : Pat<(sube GPR64:$Rn, GPR64:$Rm), (SBCSxxx GPR64:$Rn, GPR64:$Rm)>; +def : Pat<(adde i32:$Rn, i32:$Rm), (ADCSwww $Rn, $Rm)>; +def : Pat<(adde i64:$Rn, i64:$Rm), (ADCSxxx $Rn, $Rm)>; +def : Pat<(sube i32:$Rn, i32:$Rm), (SBCSwww $Rn, $Rm)>; +def : Pat<(sube i64:$Rn, i64:$Rm), (SBCSxxx $Rn, $Rm)>; //===----------------------------------------------------------------------===// // Bitfield @@ -1053,52 +1057,52 @@ def BFMxxii : // Note that these instructions are strictly more specific than the // BFM ones (in ImmR) so they can handle their own decoding. -class A64I_bf_ext<bit sf, bits<2> opc, RegisterClass GPRDest, string asmop, - bits<6> imms, dag pattern> +class A64I_bf_ext<bit sf, bits<2> opc, RegisterClass GPRDest, ValueType dty, + string asmop, bits<6> imms, dag pattern> : A64I_bitfield<sf, opc, sf, (outs GPRDest:$Rd), (ins GPR32:$Rn), !strconcat(asmop, "\t$Rd, $Rn"), - [(set GPRDest:$Rd, pattern)], NoItinerary> { + [(set dty:$Rd, pattern)], NoItinerary> { let ImmR = 0b000000; let ImmS = imms; } // Signed extensions -def SXTBxw : A64I_bf_ext<0b1, 0b00, GPR64, "sxtb", 7, - (sext_inreg (anyext GPR32:$Rn), i8)>; -def SXTBww : A64I_bf_ext<0b0, 0b00, GPR32, "sxtb", 7, - (sext_inreg GPR32:$Rn, i8)>; -def SXTHxw : A64I_bf_ext<0b1, 0b00, GPR64, "sxth", 15, - (sext_inreg (anyext GPR32:$Rn), i16)>; -def SXTHww : A64I_bf_ext<0b0, 0b00, GPR32, "sxth", 15, - (sext_inreg GPR32:$Rn, i16)>; -def SXTWxw : A64I_bf_ext<0b1, 0b00, GPR64, "sxtw", 31, (sext GPR32:$Rn)>; +def SXTBxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxtb", 7, + (sext_inreg (anyext i32:$Rn), i8)>; +def SXTBww : A64I_bf_ext<0b0, 0b00, GPR32, i32, "sxtb", 7, + (sext_inreg i32:$Rn, i8)>; +def SXTHxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxth", 15, + (sext_inreg (anyext i32:$Rn), i16)>; +def SXTHww : A64I_bf_ext<0b0, 0b00, GPR32, i32, "sxth", 15, + (sext_inreg i32:$Rn, i16)>; +def SXTWxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxtw", 31, (sext i32:$Rn)>; // Unsigned extensions -def UXTBww : A64I_bf_ext<0b0, 0b10, GPR32, "uxtb", 7, - (and GPR32:$Rn, 255)>; -def UXTHww : A64I_bf_ext<0b0, 0b10, GPR32, "uxth", 15, - (and GPR32:$Rn, 65535)>; +def UXTBww : A64I_bf_ext<0b0, 0b10, GPR32, i32, "uxtb", 7, + (and i32:$Rn, 255)>; +def UXTHww : A64I_bf_ext<0b0, 0b10, GPR32, i32, "uxth", 15, + (and i32:$Rn, 65535)>; // The 64-bit unsigned variants are not strictly architectural but recommended // for consistency. let isAsmParserOnly = 1 in { - def UXTBxw : A64I_bf_ext<0b0, 0b10, GPR64, "uxtb", 7, - (and (anyext GPR32:$Rn), 255)>; - def UXTHxw : A64I_bf_ext<0b0, 0b10, GPR64, "uxth", 15, - (and (anyext GPR32:$Rn), 65535)>; + def UXTBxw : A64I_bf_ext<0b0, 0b10, GPR64, i64, "uxtb", 7, + (and (anyext i32:$Rn), 255)>; + def UXTHxw : A64I_bf_ext<0b0, 0b10, GPR64, i64, "uxth", 15, + (and (anyext i32:$Rn), 65535)>; } // Extra patterns for when the source register is actually 64-bits // too. There's no architectural difference here, it's just LLVM // shinanigans. There's no need for equivalent zero-extension patterns // because they'll already be caught by logical (immediate) matching. -def : Pat<(sext_inreg GPR64:$Rn, i8), - (SXTBxw (EXTRACT_SUBREG GPR64:$Rn, sub_32))>; -def : Pat<(sext_inreg GPR64:$Rn, i16), - (SXTHxw (EXTRACT_SUBREG GPR64:$Rn, sub_32))>; -def : Pat<(sext_inreg GPR64:$Rn, i32), - (SXTWxw (EXTRACT_SUBREG GPR64:$Rn, sub_32))>; +def : Pat<(sext_inreg i64:$Rn, i8), + (SXTBxw (EXTRACT_SUBREG $Rn, sub_32))>; +def : Pat<(sext_inreg i64:$Rn, i16), + (SXTHxw (EXTRACT_SUBREG $Rn, sub_32))>; +def : Pat<(sext_inreg i64:$Rn, i32), + (SXTWxw (EXTRACT_SUBREG $Rn, sub_32))>; //===------------------------------- @@ -1111,7 +1115,7 @@ multiclass A64I_shift<bits<2> opc, string asmop, SDNode opnode> { def wwi : A64I_bitfield<0b0, opc, 0b0, (outs GPR32:$Rd), (ins GPR32:$Rn, bitfield32_imm:$ImmR), !strconcat(asmop, "\t$Rd, $Rn, $ImmR"), - [(set GPR32:$Rd, (opnode GPR32:$Rn, bitfield32_imm:$ImmR))], + [(set i32:$Rd, (opnode i32:$Rn, bitfield32_imm:$ImmR))], NoItinerary> { let ImmS = 31; } @@ -1119,7 +1123,7 @@ multiclass A64I_shift<bits<2> opc, string asmop, SDNode opnode> { def xxi : A64I_bitfield<0b1, opc, 0b1, (outs GPR64:$Rd), (ins GPR64:$Rn, bitfield64_imm:$ImmR), !strconcat(asmop, "\t$Rd, $Rn, $ImmR"), - [(set GPR64:$Rd, (opnode GPR64:$Rn, bitfield64_imm:$ImmR))], + [(set i64:$Rd, (opnode i64:$Rn, bitfield64_imm:$ImmR))], NoItinerary> { let ImmS = 63; } @@ -1156,10 +1160,11 @@ def bitfield64_lsl_imm : Operand<i64>, let EncoderMethod = "getBitfield64LSLOpValue"; } -class A64I_bitfield_lsl<bit sf, RegisterClass GPR, Operand operand> +class A64I_bitfield_lsl<bit sf, RegisterClass GPR, ValueType ty, + Operand operand> : A64I_bitfield<sf, 0b10, sf, (outs GPR:$Rd), (ins GPR:$Rn, operand:$FullImm), "lsl\t$Rd, $Rn, $FullImm", - [(set GPR:$Rd, (shl GPR:$Rn, operand:$FullImm))], + [(set ty:$Rd, (shl ty:$Rn, operand:$FullImm))], NoItinerary> { bits<12> FullImm; let ImmR = FullImm{5-0}; @@ -1170,8 +1175,8 @@ class A64I_bitfield_lsl<bit sf, RegisterClass GPR, Operand operand> let isAsmParserOnly = 1; } -def LSLwwi : A64I_bitfield_lsl<0b0, GPR32, bitfield32_lsl_imm>; -def LSLxxi : A64I_bitfield_lsl<0b1, GPR64, bitfield64_lsl_imm>; +def LSLwwi : A64I_bitfield_lsl<0b0, GPR32, i32, bitfield32_lsl_imm>; +def LSLxxi : A64I_bitfield_lsl<0b1, GPR64, i64, bitfield64_lsl_imm>; //===------------------------------- // 5. Aliases for bitfield extract instructions @@ -1206,7 +1211,7 @@ multiclass A64I_bitfield_extract<bits<2> opc, string asmop, SDNode op> { def wwii : A64I_bitfield<0b0, opc, 0b0, (outs GPR32:$Rd), (ins GPR32:$Rn, bitfield32_imm:$ImmR, bfx32_width:$ImmS), !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"), - [(set GPR32:$Rd, (op GPR32:$Rn, imm:$ImmR, imm:$ImmS))], + [(set i32:$Rd, (op i32:$Rn, imm:$ImmR, imm:$ImmS))], NoItinerary> { // As above, no disassembler allowed. let isAsmParserOnly = 1; @@ -1215,7 +1220,7 @@ multiclass A64I_bitfield_extract<bits<2> opc, string asmop, SDNode op> { def xxii : A64I_bitfield<0b1, opc, 0b1, (outs GPR64:$Rd), (ins GPR64:$Rn, bitfield64_imm:$ImmR, bfx64_width:$ImmS), !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"), - [(set GPR64:$Rd, (op GPR64:$Rn, imm:$ImmR, imm:$ImmS))], + [(set i64:$Rd, (op i64:$Rn, imm:$ImmR, imm:$ImmS))], NoItinerary> { // As above, no disassembler allowed. let isAsmParserOnly = 1; @@ -1243,15 +1248,15 @@ def BFXILxxii : A64I_bitfield<0b1, 0b01, 0b1, (outs GPR64:$Rd), } // SBFX instructions can do a 1-instruction sign-extension of boolean values. -def : Pat<(sext_inreg GPR64:$Rn, i1), (SBFXxxii GPR64:$Rn, 0, 0)>; -def : Pat<(sext_inreg GPR32:$Rn, i1), (SBFXwwii GPR32:$Rn, 0, 0)>; -def : Pat<(i64 (sext_inreg (anyext GPR32:$Rn), i1)), - (SBFXxxii (SUBREG_TO_REG (i64 0), GPR32:$Rn, sub_32), 0, 0)>; +def : Pat<(sext_inreg i64:$Rn, i1), (SBFXxxii $Rn, 0, 0)>; +def : Pat<(sext_inreg i32:$Rn, i1), (SBFXwwii $Rn, 0, 0)>; +def : Pat<(i64 (sext_inreg (anyext i32:$Rn), i1)), + (SBFXxxii (SUBREG_TO_REG (i64 0), $Rn, sub_32), 0, 0)>; // UBFX makes sense as an implementation of a 64-bit zero-extension too. Could // use either 64-bit or 32-bit variant, but 32-bit might be more efficient. -def : Pat<(zext GPR32:$Rn), (SUBREG_TO_REG (i64 0), (UBFXwwii GPR32:$Rn, 0, 31), - sub_32)>; +def : Pat<(zext i32:$Rn), (SUBREG_TO_REG (i64 0), (UBFXwwii $Rn, 0, 31), + sub_32)>; //===------------------------------- // 6. Aliases for bitfield insert instructions @@ -1380,14 +1385,14 @@ multiclass cmpbr_sizes<bit op, string asmop, ImmLeaf SETOP> { (outs), (ins GPR64:$Rt, bcc_target:$Label), !strconcat(asmop,"\t$Rt, $Label"), - [(A64br_cc (A64cmp GPR64:$Rt, 0), SETOP, bb:$Label)], + [(A64br_cc (A64cmp i64:$Rt, 0), SETOP, bb:$Label)], NoItinerary>; def w : A64I_cmpbr<0b0, op, (outs), (ins GPR32:$Rt, bcc_target:$Label), !strconcat(asmop,"\t$Rt, $Label"), - [(A64br_cc (A64cmp GPR32:$Rt, 0), SETOP, bb:$Label)], + [(A64br_cc (A64cmp i32:$Rt, 0), SETOP, bb:$Label)], NoItinerary>; } } @@ -1530,7 +1535,7 @@ multiclass A64I_condselSizes<bit op, bits<2> op2, string asmop, (outs GPR32:$Rd), (ins GPR32:$Rn, GPR32:$Rm, cond_code_op:$Cond), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Cond"), - [(set GPR32:$Rd, (select GPR32:$Rn, GPR32:$Rm))], + [(set i32:$Rd, (select i32:$Rn, i32:$Rm))], NoItinerary>; @@ -1538,7 +1543,7 @@ multiclass A64I_condselSizes<bit op, bits<2> op2, string asmop, (outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm, cond_code_op:$Cond), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Cond"), - [(set GPR64:$Rd, (select GPR64:$Rn, GPR64:$Rm))], + [(set i64:$Rd, (select i64:$Rn, i64:$Rm))], NoItinerary>; } } @@ -1613,24 +1618,22 @@ def : Pat<(A64select_cc NZCV, -1, 0, inv_cond_code:$Cond), // No commutable pattern for CSEL since the commuted version is isomorphic. // CSINC -def :Pat<(A64select_cc NZCV, (add GPR32:$Rm, 1), GPR32:$Rn, - inv_cond_code:$Cond), - (CSINCwwwc GPR32:$Rn, GPR32:$Rm, inv_cond_code:$Cond)>; -def :Pat<(A64select_cc NZCV, (add GPR64:$Rm, 1), GPR64:$Rn, - inv_cond_code:$Cond), - (CSINCxxxc GPR64:$Rn, GPR64:$Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (add i32:$Rm, 1), i32:$Rn, inv_cond_code:$Cond), + (CSINCwwwc $Rn, $Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (add i64:$Rm, 1), i64:$Rn, inv_cond_code:$Cond), + (CSINCxxxc $Rn, $Rm, inv_cond_code:$Cond)>; // CSINV -def :Pat<(A64select_cc NZCV, (not GPR32:$Rm), GPR32:$Rn, inv_cond_code:$Cond), - (CSINVwwwc GPR32:$Rn, GPR32:$Rm, inv_cond_code:$Cond)>; -def :Pat<(A64select_cc NZCV, (not GPR64:$Rm), GPR64:$Rn, inv_cond_code:$Cond), - (CSINVxxxc GPR64:$Rn, GPR64:$Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (not i32:$Rm), i32:$Rn, inv_cond_code:$Cond), + (CSINVwwwc $Rn, $Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (not i64:$Rm), i64:$Rn, inv_cond_code:$Cond), + (CSINVxxxc $Rn, $Rm, inv_cond_code:$Cond)>; // CSNEG -def :Pat<(A64select_cc NZCV, (ineg GPR32:$Rm), GPR32:$Rn, inv_cond_code:$Cond), - (CSNEGwwwc GPR32:$Rn, GPR32:$Rm, inv_cond_code:$Cond)>; -def :Pat<(A64select_cc NZCV, (ineg GPR64:$Rm), GPR64:$Rn, inv_cond_code:$Cond), - (CSNEGxxxc GPR64:$Rn, GPR64:$Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (ineg i32:$Rm), i32:$Rn, inv_cond_code:$Cond), + (CSNEGwwwc $Rn, $Rm, inv_cond_code:$Cond)>; +def :Pat<(A64select_cc NZCV, (ineg i64:$Rm), i64:$Rn, inv_cond_code:$Cond), + (CSNEGxxxc $Rn, $Rm, inv_cond_code:$Cond)>; //===----------------------------------------------------------------------===// // Data Processing (1 source) instructions @@ -1664,28 +1667,28 @@ defm RBIT : A64I_dp_1src<0b000000, "rbit">; defm CLS : A64I_dp_1src<0b000101, "cls">; defm CLZ : A64I_dp_1src<0b000100, "clz">; -def : Pat<(ctlz GPR32:$Rn), (CLZww GPR32:$Rn)>; -def : Pat<(ctlz GPR64:$Rn), (CLZxx GPR64:$Rn)>; -def : Pat<(ctlz_zero_undef GPR32:$Rn), (CLZww GPR32:$Rn)>; -def : Pat<(ctlz_zero_undef GPR64:$Rn), (CLZxx GPR64:$Rn)>; +def : Pat<(ctlz i32:$Rn), (CLZww $Rn)>; +def : Pat<(ctlz i64:$Rn), (CLZxx $Rn)>; +def : Pat<(ctlz_zero_undef i32:$Rn), (CLZww $Rn)>; +def : Pat<(ctlz_zero_undef i64:$Rn), (CLZxx $Rn)>; -def : Pat<(cttz GPR32:$Rn), (CLZww (RBITww GPR32:$Rn))>; -def : Pat<(cttz GPR64:$Rn), (CLZxx (RBITxx GPR64:$Rn))>; -def : Pat<(cttz_zero_undef GPR32:$Rn), (CLZww (RBITww GPR32:$Rn))>; -def : Pat<(cttz_zero_undef GPR64:$Rn), (CLZxx (RBITxx GPR64:$Rn))>; +def : Pat<(cttz i32:$Rn), (CLZww (RBITww $Rn))>; +def : Pat<(cttz i64:$Rn), (CLZxx (RBITxx $Rn))>; +def : Pat<(cttz_zero_undef i32:$Rn), (CLZww (RBITww $Rn))>; +def : Pat<(cttz_zero_undef i64:$Rn), (CLZxx (RBITxx $Rn))>; def REVww : A64I_dp_1src_impl<0b0, 0b000010, "rev", - [(set GPR32:$Rd, (bswap GPR32:$Rn))], + [(set i32:$Rd, (bswap i32:$Rn))], GPR32, NoItinerary>; def REVxx : A64I_dp_1src_impl<0b1, 0b000011, "rev", - [(set GPR64:$Rd, (bswap GPR64:$Rn))], + [(set i64:$Rd, (bswap i64:$Rn))], GPR64, NoItinerary>; def REV32xx : A64I_dp_1src_impl<0b1, 0b000010, "rev32", - [(set GPR64:$Rd, (bswap (rotr GPR64:$Rn, (i64 32))))], + [(set i64:$Rd, (bswap (rotr i64:$Rn, (i64 32))))], GPR64, NoItinerary>; def REV16ww : A64I_dp_1src_impl<0b0, 0b000001, "rev16", - [(set GPR32:$Rd, (bswap (rotr GPR32:$Rn, (i64 16))))], + [(set i32:$Rd, (bswap (rotr i32:$Rn, (i64 16))))], GPR32, NoItinerary>; def REV16xx : A64I_dp_1src_impl<0b1, 0b000001, "rev16", [], GPR64, NoItinerary>; @@ -1726,14 +1729,14 @@ multiclass dp_2src_zext <bits<6> opcode, string asmop, SDPatternOperator op> { def www : dp_2src_impl<0b0, opcode, asmop, - [(set GPR32:$Rd, - (op GPR32:$Rn, (i64 (zext GPR32:$Rm))))], + [(set i32:$Rd, + (op i32:$Rn, (i64 (zext i32:$Rm))))], GPR32, NoItinerary>; def xxx : dp_2src_impl<0b1, opcode, asmop, - [(set GPR64:$Rd, (op GPR64:$Rn, GPR64:$Rm))], + [(set i64:$Rd, (op i64:$Rn, i64:$Rm))], GPR64, NoItinerary>; } @@ -1743,13 +1746,13 @@ multiclass dp_2src <bits<6> opcode, string asmop, SDPatternOperator op> { def www : dp_2src_impl<0b0, opcode, asmop, - [(set GPR32:$Rd, (op GPR32:$Rn, GPR32:$Rm))], + [(set i32:$Rd, (op i32:$Rn, i32:$Rm))], GPR32, NoItinerary>; def xxx : dp_2src_impl<0b1, opcode, asmop, - [(set GPR64:$Rd, (op GPR64:$Rn, GPR64:$Rm))], + [(set i64:$Rd, (op i64:$Rn, i64:$Rm))], GPR64, NoItinerary>; } @@ -1770,14 +1773,14 @@ defm RORV : dp_2src_zext<0b001011, "ror", rotr>; // operation. Since the LLVM operations are undefined (as in C) if the // RHS is out of range, it's perfectly permissible to discard the high // bits of the GPR64. -def : Pat<(shl GPR32:$Rn, GPR64:$Rm), - (LSLVwww GPR32:$Rn, (EXTRACT_SUBREG GPR64:$Rm, sub_32))>; -def : Pat<(srl GPR32:$Rn, GPR64:$Rm), - (LSRVwww GPR32:$Rn, (EXTRACT_SUBREG GPR64:$Rm, sub_32))>; -def : Pat<(sra GPR32:$Rn, GPR64:$Rm), - (ASRVwww GPR32:$Rn, (EXTRACT_SUBREG GPR64:$Rm, sub_32))>; -def : Pat<(rotr GPR32:$Rn, GPR64:$Rm), - (RORVwww GPR32:$Rn, (EXTRACT_SUBREG GPR64:$Rm, sub_32))>; +def : Pat<(shl i32:$Rn, i64:$Rm), + (LSLVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>; +def : Pat<(srl i32:$Rn, i64:$Rm), + (LSRVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>; +def : Pat<(sra i32:$Rn, i64:$Rm), + (ASRVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>; +def : Pat<(rotr i32:$Rn, i64:$Rm), + (RORVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>; // Here we define the aliases for the data processing 2 source instructions. def LSL_mnemonic : MnemonicAlias<"lslv", "lsl">; @@ -1792,46 +1795,47 @@ def ROR_menmonic : MnemonicAlias<"rorv", "ror">; // + aliases MUL, MNEG, SMULL, SMNEGL, UMULL, UMNEGL class A64I_dp3_4operand<bit sf, bits<6> opcode, RegisterClass AccReg, - RegisterClass SrcReg, string asmop, dag pattern> + ValueType AccTy, RegisterClass SrcReg, + string asmop, dag pattern> : A64I_dp3<sf, opcode, (outs AccReg:$Rd), (ins SrcReg:$Rn, SrcReg:$Rm, AccReg:$Ra), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Ra"), - [(set AccReg:$Rd, pattern)], NoItinerary> { + [(set AccTy:$Rd, pattern)], NoItinerary> { RegisterClass AccGPR = AccReg; RegisterClass SrcGPR = SrcReg; } -def MADDwwww : A64I_dp3_4operand<0b0, 0b000000, GPR32, GPR32, "madd", - (add GPR32:$Ra, (mul GPR32:$Rn, GPR32:$Rm))>; -def MADDxxxx : A64I_dp3_4operand<0b1, 0b000000, GPR64, GPR64, "madd", - (add GPR64:$Ra, (mul GPR64:$Rn, GPR64:$Rm))>; +def MADDwwww : A64I_dp3_4operand<0b0, 0b000000, GPR32, i32, GPR32, "madd", + (add i32:$Ra, (mul i32:$Rn, i32:$Rm))>; +def MADDxxxx : A64I_dp3_4operand<0b1, 0b000000, GPR64, i64, GPR64, "madd", + (add i64:$Ra, (mul i64:$Rn, i64:$Rm))>; -def MSUBwwww : A64I_dp3_4operand<0b0, 0b000001, GPR32, GPR32, "msub", - (sub GPR32:$Ra, (mul GPR32:$Rn, GPR32:$Rm))>; -def MSUBxxxx : A64I_dp3_4operand<0b1, 0b000001, GPR64, GPR64, "msub", - (sub GPR64:$Ra, (mul GPR64:$Rn, GPR64:$Rm))>; +def MSUBwwww : A64I_dp3_4operand<0b0, 0b000001, GPR32, i32, GPR32, "msub", + (sub i32:$Ra, (mul i32:$Rn, i32:$Rm))>; +def MSUBxxxx : A64I_dp3_4operand<0b1, 0b000001, GPR64, i64, GPR64, "msub", + (sub i64:$Ra, (mul i64:$Rn, i64:$Rm))>; -def SMADDLxwwx : A64I_dp3_4operand<0b1, 0b000010, GPR64, GPR32, "smaddl", - (add GPR64:$Ra, (mul (i64 (sext GPR32:$Rn)), (sext GPR32:$Rm)))>; -def SMSUBLxwwx : A64I_dp3_4operand<0b1, 0b000011, GPR64, GPR32, "smsubl", - (sub GPR64:$Ra, (mul (i64 (sext GPR32:$Rn)), (sext GPR32:$Rm)))>; +def SMADDLxwwx : A64I_dp3_4operand<0b1, 0b000010, GPR64, i64, GPR32, "smaddl", + (add i64:$Ra, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>; +def SMSUBLxwwx : A64I_dp3_4operand<0b1, 0b000011, GPR64, i64, GPR32, "smsubl", + (sub i64:$Ra, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>; -def UMADDLxwwx : A64I_dp3_4operand<0b1, 0b001010, GPR64, GPR32, "umaddl", - (add GPR64:$Ra, (mul (i64 (zext GPR32:$Rn)), (zext GPR32:$Rm)))>; -def UMSUBLxwwx : A64I_dp3_4operand<0b1, 0b001011, GPR64, GPR32, "umsubl", - (sub GPR64:$Ra, (mul (i64 (zext GPR32:$Rn)), (zext GPR32:$Rm)))>; +def UMADDLxwwx : A64I_dp3_4operand<0b1, 0b001010, GPR64, i64, GPR32, "umaddl", + (add i64:$Ra, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>; +def UMSUBLxwwx : A64I_dp3_4operand<0b1, 0b001011, GPR64, i64, GPR32, "umsubl", + (sub i64:$Ra, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>; let isCommutable = 1, PostEncoderMethod = "fixMulHigh" in { def UMULHxxx : A64I_dp3<0b1, 0b001100, (outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm), "umulh\t$Rd, $Rn, $Rm", - [(set GPR64:$Rd, (mulhu GPR64:$Rn, GPR64:$Rm))], + [(set i64:$Rd, (mulhu i64:$Rn, i64:$Rm))], NoItinerary>; def SMULHxxx : A64I_dp3<0b1, 0b000100, (outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm), "smulh\t$Rd, $Rn, $Rm", - [(set GPR64:$Rd, (mulhs GPR64:$Rn, GPR64:$Rm))], + [(set i64:$Rd, (mulhs i64:$Rn, i64:$Rm))], NoItinerary>; } @@ -1840,26 +1844,26 @@ multiclass A64I_dp3_3operand<string asmop, A64I_dp3_4operand INST, def : InstAlias<asmop # " $Rd, $Rn, $Rm", (INST INST.AccGPR:$Rd, INST.SrcGPR:$Rn, INST.SrcGPR:$Rm, ZR)>; - def : Pat<pattern, (INST INST.SrcGPR:$Rn, INST.SrcGPR:$Rm, ZR)>; + def : Pat<pattern, (INST $Rn, $Rm, ZR)>; } -defm : A64I_dp3_3operand<"mul", MADDwwww, WZR, (mul GPR32:$Rn, GPR32:$Rm)>; -defm : A64I_dp3_3operand<"mul", MADDxxxx, XZR, (mul GPR64:$Rn, GPR64:$Rm)>; +defm : A64I_dp3_3operand<"mul", MADDwwww, WZR, (mul i32:$Rn, i32:$Rm)>; +defm : A64I_dp3_3operand<"mul", MADDxxxx, XZR, (mul i64:$Rn, i64:$Rm)>; defm : A64I_dp3_3operand<"mneg", MSUBwwww, WZR, - (sub 0, (mul GPR32:$Rn, GPR32:$Rm))>; + (sub 0, (mul i32:$Rn, i32:$Rm))>; defm : A64I_dp3_3operand<"mneg", MSUBxxxx, XZR, - (sub 0, (mul GPR64:$Rn, GPR64:$Rm))>; + (sub 0, (mul i64:$Rn, i64:$Rm))>; defm : A64I_dp3_3operand<"smull", SMADDLxwwx, XZR, - (mul (i64 (sext GPR32:$Rn)), (sext GPR32:$Rm))>; + (mul (i64 (sext i32:$Rn)), (sext i32:$Rm))>; defm : A64I_dp3_3operand<"smnegl", SMSUBLxwwx, XZR, - (sub 0, (mul (i64 (sext GPR32:$Rn)), (sext GPR32:$Rm)))>; + (sub 0, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>; defm : A64I_dp3_3operand<"umull", UMADDLxwwx, XZR, - (mul (i64 (zext GPR32:$Rn)), (zext GPR32:$Rm))>; + (mul (i64 (zext i32:$Rn)), (zext i32:$Rm))>; defm : A64I_dp3_3operand<"umnegl", UMSUBLxwwx, XZR, - (sub 0, (mul (i64 (zext GPR32:$Rn)), (zext GPR32:$Rm)))>; + (sub 0, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>; //===----------------------------------------------------------------------===// @@ -1909,15 +1913,15 @@ def EXTRwwwi : A64I_extract<0b0, 0b000, 0b0, (outs GPR32:$Rd), (ins GPR32:$Rn, GPR32:$Rm, bitfield32_imm:$LSB), "extr\t$Rd, $Rn, $Rm, $LSB", - [(set GPR32:$Rd, - (A64Extr GPR32:$Rn, GPR32:$Rm, imm:$LSB))], + [(set i32:$Rd, + (A64Extr i32:$Rn, i32:$Rm, imm:$LSB))], NoItinerary>; def EXTRxxxi : A64I_extract<0b1, 0b000, 0b1, (outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm, bitfield64_imm:$LSB), "extr\t$Rd, $Rn, $Rm, $LSB", - [(set GPR64:$Rd, - (A64Extr GPR64:$Rn, GPR64:$Rm, imm:$LSB))], + [(set i64:$Rd, + (A64Extr i64:$Rn, i64:$Rm, imm:$LSB))], NoItinerary>; def : InstAlias<"ror $Rd, $Rs, $LSB", @@ -1925,10 +1929,10 @@ def : InstAlias<"ror $Rd, $Rs, $LSB", def : InstAlias<"ror $Rd, $Rs, $LSB", (EXTRxxxi GPR64:$Rd, GPR64:$Rs, GPR64:$Rs, bitfield64_imm:$LSB)>; -def : Pat<(rotr GPR32:$Rn, bitfield32_imm:$LSB), - (EXTRwwwi GPR32:$Rn, GPR32:$Rn, bitfield32_imm:$LSB)>; -def : Pat<(rotr GPR64:$Rn, bitfield64_imm:$LSB), - (EXTRxxxi GPR64:$Rn, GPR64:$Rn, bitfield64_imm:$LSB)>; +def : Pat<(rotr i32:$Rn, bitfield32_imm:$LSB), + (EXTRwwwi $Rn, $Rn, bitfield32_imm:$LSB)>; +def : Pat<(rotr i64:$Rn, bitfield64_imm:$LSB), + (EXTRxxxi $Rn, $Rn, bitfield64_imm:$LSB)>; //===----------------------------------------------------------------------===// // Floating-point compare instructions @@ -1969,17 +1973,17 @@ multiclass A64I_fpcmpSignal<bits<2> type, bit imm, dag ins, dag pattern> { } defm FCMPss : A64I_fpcmpSignal<0b00, 0b0, (ins FPR32:$Rn, FPR32:$Rm), - (set NZCV, (A64cmp (f32 FPR32:$Rn), FPR32:$Rm))>; + (set NZCV, (A64cmp f32:$Rn, f32:$Rm))>; defm FCMPdd : A64I_fpcmpSignal<0b01, 0b0, (ins FPR64:$Rn, FPR64:$Rm), - (set NZCV, (A64cmp (f64 FPR64:$Rn), FPR64:$Rm))>; + (set NZCV, (A64cmp f64:$Rn, f64:$Rm))>; // What would be Rm should be written as 0; note that even though it's called // "$Rm" here to fit in with the InstrFormats, it's actually an immediate. defm FCMPsi : A64I_fpcmpSignal<0b00, 0b1, (ins FPR32:$Rn, fpz32:$Rm), - (set NZCV, (A64cmp (f32 FPR32:$Rn), fpz32:$Rm))>; + (set NZCV, (A64cmp f32:$Rn, fpz32:$Rm))>; defm FCMPdi : A64I_fpcmpSignal<0b01, 0b1, (ins FPR64:$Rn, fpz64:$Rm), - (set NZCV, (A64cmp (f64 FPR64:$Rn), fpz64:$Rm))>; + (set NZCV, (A64cmp f64:$Rn, fpz64:$Rm))>; //===----------------------------------------------------------------------===// @@ -2010,18 +2014,16 @@ let Uses = [NZCV] in { def FCSELsssc : A64I_fpcondsel<0b0, 0b0, 0b00, (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm, cond_code_op:$Cond), "fcsel\t$Rd, $Rn, $Rm, $Cond", - [(set FPR32:$Rd, - (simple_select (f32 FPR32:$Rn), - FPR32:$Rm))], + [(set f32:$Rd, + (simple_select f32:$Rn, f32:$Rm))], NoItinerary>; def FCSELdddc : A64I_fpcondsel<0b0, 0b0, 0b01, (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm, cond_code_op:$Cond), "fcsel\t$Rd, $Rn, $Rm, $Cond", - [(set FPR64:$Rd, - (simple_select (f64 FPR64:$Rn), - FPR64:$Rm))], + [(set f64:$Rd, + (simple_select f64:$Rn, f64:$Rm))], NoItinerary>; } @@ -2039,12 +2041,12 @@ multiclass A64I_fpdp1sizes<bits<6> opcode, string asmstr, SDPatternOperator opnode = FPNoUnop> { def ss : A64I_fpdp1<0b0, 0b0, 0b00, opcode, (outs FPR32:$Rd), (ins FPR32:$Rn), !strconcat(asmstr, "\t$Rd, $Rn"), - [(set (f32 FPR32:$Rd), (opnode FPR32:$Rn))], + [(set f32:$Rd, (opnode f32:$Rn))], NoItinerary>; def dd : A64I_fpdp1<0b0, 0b0, 0b01, opcode, (outs FPR64:$Rd), (ins FPR64:$Rn), !strconcat(asmstr, "\t$Rd, $Rn"), - [(set (f64 FPR64:$Rd), (opnode FPR64:$Rn))], + [(set f64:$Rd, (opnode f64:$Rn))], NoItinerary>; } @@ -2080,8 +2082,7 @@ class A64I_fpdp1_fcvt<FCVTRegType DestReg, FCVTRegType SrcReg, SDNode opnode> {0,0,0,1, DestReg.t1, DestReg.t0}, (outs DestReg.Class:$Rd), (ins SrcReg.Class:$Rn), "fcvt\t$Rd, $Rn", - [(set (DestReg.VT DestReg.Class:$Rd), - (opnode (SrcReg.VT SrcReg.Class:$Rn)))], NoItinerary>; + [(set DestReg.VT:$Rd, (opnode SrcReg.VT:$Rn))], NoItinerary>; def FCVTds : A64I_fpdp1_fcvt<FCVT64, FCVT32, fextend>; def FCVThs : A64I_fpdp1_fcvt<FCVT16, FCVT32, fround>; @@ -2105,14 +2106,14 @@ multiclass A64I_fpdp2sizes<bits<4> opcode, string asmstr, (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm), !strconcat(asmstr, "\t$Rd, $Rn, $Rm"), - [(set (f32 FPR32:$Rd), (opnode FPR32:$Rn, FPR32:$Rm))], + [(set f32:$Rd, (opnode f32:$Rn, f32:$Rm))], NoItinerary>; def ddd : A64I_fpdp2<0b0, 0b0, 0b01, opcode, (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm), !strconcat(asmstr, "\t$Rd, $Rn, $Rm"), - [(set (f64 FPR64:$Rd), (opnode FPR64:$Rn, FPR64:$Rm))], + [(set f64:$Rd, (opnode f64:$Rn, f64:$Rm))], NoItinerary>; } @@ -2151,7 +2152,7 @@ class A64I_fpdp3Impl<string asmop, RegisterClass FPR, ValueType VT, : A64I_fpdp3<0b0, 0b0, type, o1, o0, (outs FPR:$Rd), (ins FPR:$Rn, FPR:$Rm, FPR:$Ra), !strconcat(asmop,"\t$Rd, $Rn, $Rm, $Ra"), - [(set FPR:$Rd, (fmakind (VT FPR:$Rn), FPR:$Rm, FPR:$Ra))], + [(set VT:$Rd, (fmakind VT:$Rn, VT:$Rm, VT:$Ra))], NoItinerary>; def FMADDssss : A64I_fpdp3Impl<"fmadd", FPR32, f32, 0b00, 0b0, 0b0, fma>; @@ -2208,57 +2209,59 @@ class cvtfix_i64_op<ValueType FloatVT> // worth going for a multiclass here. Oh well. class A64I_fptofix<bit sf, bits<2> type, bits<3> opcode, - RegisterClass GPR, RegisterClass FPR, Operand scale_op, - string asmop, SDNode cvtop> + RegisterClass GPR, RegisterClass FPR, + ValueType DstTy, ValueType SrcTy, + Operand scale_op, string asmop, SDNode cvtop> : A64I_fpfixed<sf, 0b0, type, 0b11, opcode, (outs GPR:$Rd), (ins FPR:$Rn, scale_op:$Scale), !strconcat(asmop, "\t$Rd, $Rn, $Scale"), - [(set GPR:$Rd, (cvtop (fmul FPR:$Rn, scale_op:$Scale)))], + [(set DstTy:$Rd, (cvtop (fmul SrcTy:$Rn, scale_op:$Scale)))], NoItinerary>; -def FCVTZSwsi : A64I_fptofix<0b0, 0b00, 0b000, GPR32, FPR32, +def FCVTZSwsi : A64I_fptofix<0b0, 0b00, 0b000, GPR32, FPR32, i32, f32, cvtfix_i32_op<f32>, "fcvtzs", fp_to_sint>; -def FCVTZSxsi : A64I_fptofix<0b1, 0b00, 0b000, GPR64, FPR32, +def FCVTZSxsi : A64I_fptofix<0b1, 0b00, 0b000, GPR64, FPR32, i64, f32, cvtfix_i64_op<f32>, "fcvtzs", fp_to_sint>; -def FCVTZUwsi : A64I_fptofix<0b0, 0b00, 0b001, GPR32, FPR32, +def FCVTZUwsi : A64I_fptofix<0b0, 0b00, 0b001, GPR32, FPR32, i32, f32, cvtfix_i32_op<f32>, "fcvtzu", fp_to_uint>; -def FCVTZUxsi : A64I_fptofix<0b1, 0b00, 0b001, GPR64, FPR32, +def FCVTZUxsi : A64I_fptofix<0b1, 0b00, 0b001, GPR64, FPR32, i64, f32, cvtfix_i64_op<f32>, "fcvtzu", fp_to_uint>; -def FCVTZSwdi : A64I_fptofix<0b0, 0b01, 0b000, GPR32, FPR64, +def FCVTZSwdi : A64I_fptofix<0b0, 0b01, 0b000, GPR32, FPR64, i32, f64, cvtfix_i32_op<f64>, "fcvtzs", fp_to_sint>; -def FCVTZSxdi : A64I_fptofix<0b1, 0b01, 0b000, GPR64, FPR64, +def FCVTZSxdi : A64I_fptofix<0b1, 0b01, 0b000, GPR64, FPR64, i64, f64, cvtfix_i64_op<f64>, "fcvtzs", fp_to_sint>; -def FCVTZUwdi : A64I_fptofix<0b0, 0b01, 0b001, GPR32, FPR64, +def FCVTZUwdi : A64I_fptofix<0b0, 0b01, 0b001, GPR32, FPR64, i32, f64, cvtfix_i32_op<f64>, "fcvtzu", fp_to_uint>; -def FCVTZUxdi : A64I_fptofix<0b1, 0b01, 0b001, GPR64, FPR64, +def FCVTZUxdi : A64I_fptofix<0b1, 0b01, 0b001, GPR64, FPR64, i64, f64, cvtfix_i64_op<f64>, "fcvtzu", fp_to_uint>; class A64I_fixtofp<bit sf, bits<2> type, bits<3> opcode, - RegisterClass FPR, RegisterClass GPR, Operand scale_op, - string asmop, SDNode cvtop> + RegisterClass FPR, RegisterClass GPR, + ValueType DstTy, ValueType SrcTy, + Operand scale_op, string asmop, SDNode cvtop> : A64I_fpfixed<sf, 0b0, type, 0b00, opcode, (outs FPR:$Rd), (ins GPR:$Rn, scale_op:$Scale), !strconcat(asmop, "\t$Rd, $Rn, $Scale"), - [(set FPR:$Rd, (fdiv (cvtop GPR:$Rn), scale_op:$Scale))], + [(set DstTy:$Rd, (fdiv (cvtop SrcTy:$Rn), scale_op:$Scale))], NoItinerary>; -def SCVTFswi : A64I_fixtofp<0b0, 0b00, 0b010, FPR32, GPR32, +def SCVTFswi : A64I_fixtofp<0b0, 0b00, 0b010, FPR32, GPR32, f32, i32, cvtfix_i32_op<f32>, "scvtf", sint_to_fp>; -def SCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b010, FPR32, GPR64, +def SCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b010, FPR32, GPR64, f32, i64, cvtfix_i64_op<f32>, "scvtf", sint_to_fp>; -def UCVTFswi : A64I_fixtofp<0b0, 0b00, 0b011, FPR32, GPR32, +def UCVTFswi : A64I_fixtofp<0b0, 0b00, 0b011, FPR32, GPR32, f32, i32, cvtfix_i32_op<f32>, "ucvtf", uint_to_fp>; -def UCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b011, FPR32, GPR64, +def UCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b011, FPR32, GPR64, f32, i64, cvtfix_i64_op<f32>, "ucvtf", uint_to_fp>; -def SCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b010, FPR64, GPR32, +def SCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b010, FPR64, GPR32, f64, i32, cvtfix_i32_op<f64>, "scvtf", sint_to_fp>; -def SCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b010, FPR64, GPR64, +def SCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b010, FPR64, GPR64, f64, i64, cvtfix_i64_op<f64>, "scvtf", sint_to_fp>; -def UCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b011, FPR64, GPR32, +def UCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b011, FPR64, GPR32, f64, i32, cvtfix_i32_op<f64>, "ucvtf", uint_to_fp>; -def UCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b011, FPR64, GPR64, +def UCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b011, FPR64, GPR64, f64, i64, cvtfix_i64_op<f64>, "ucvtf", uint_to_fp>; //===----------------------------------------------------------------------===// @@ -2297,14 +2300,14 @@ defm FCVTM : A64I_fptointRM<0b10, 0b0, "fcvtm">; defm FCVTZ : A64I_fptointRM<0b11, 0b0, "fcvtz">; defm FCVTA : A64I_fptointRM<0b00, 0b1, "fcvta">; -def : Pat<(i32 (fp_to_sint FPR32:$Rn)), (FCVTZSws FPR32:$Rn)>; -def : Pat<(i64 (fp_to_sint FPR32:$Rn)), (FCVTZSxs FPR32:$Rn)>; -def : Pat<(i32 (fp_to_uint FPR32:$Rn)), (FCVTZUws FPR32:$Rn)>; -def : Pat<(i64 (fp_to_uint FPR32:$Rn)), (FCVTZUxs FPR32:$Rn)>; -def : Pat<(i32 (fp_to_sint (f64 FPR64:$Rn))), (FCVTZSwd FPR64:$Rn)>; -def : Pat<(i64 (fp_to_sint (f64 FPR64:$Rn))), (FCVTZSxd FPR64:$Rn)>; -def : Pat<(i32 (fp_to_uint (f64 FPR64:$Rn))), (FCVTZUwd FPR64:$Rn)>; -def : Pat<(i64 (fp_to_uint (f64 FPR64:$Rn))), (FCVTZUxd FPR64:$Rn)>; +def : Pat<(i32 (fp_to_sint f32:$Rn)), (FCVTZSws $Rn)>; +def : Pat<(i64 (fp_to_sint f32:$Rn)), (FCVTZSxs $Rn)>; +def : Pat<(i32 (fp_to_uint f32:$Rn)), (FCVTZUws $Rn)>; +def : Pat<(i64 (fp_to_uint f32:$Rn)), (FCVTZUxs $Rn)>; +def : Pat<(i32 (fp_to_sint f64:$Rn)), (FCVTZSwd $Rn)>; +def : Pat<(i64 (fp_to_sint f64:$Rn)), (FCVTZSxd $Rn)>; +def : Pat<(i32 (fp_to_uint f64:$Rn)), (FCVTZUwd $Rn)>; +def : Pat<(i64 (fp_to_uint f64:$Rn)), (FCVTZUxd $Rn)>; multiclass A64I_inttofp<bit o0, string asmop> { def CVTFsw : A64I_fpintI<0b0, 0b00, 0b00, {0, 1, o0}, FPR32, GPR32, asmop>; @@ -2316,24 +2319,24 @@ multiclass A64I_inttofp<bit o0, string asmop> { defm S : A64I_inttofp<0b0, "scvtf">; defm U : A64I_inttofp<0b1, "ucvtf">; -def : Pat<(f32 (sint_to_fp GPR32:$Rn)), (SCVTFsw GPR32:$Rn)>; -def : Pat<(f32 (sint_to_fp GPR64:$Rn)), (SCVTFsx GPR64:$Rn)>; -def : Pat<(f64 (sint_to_fp GPR32:$Rn)), (SCVTFdw GPR32:$Rn)>; -def : Pat<(f64 (sint_to_fp GPR64:$Rn)), (SCVTFdx GPR64:$Rn)>; -def : Pat<(f32 (uint_to_fp GPR32:$Rn)), (UCVTFsw GPR32:$Rn)>; -def : Pat<(f32 (uint_to_fp GPR64:$Rn)), (UCVTFsx GPR64:$Rn)>; -def : Pat<(f64 (uint_to_fp GPR32:$Rn)), (UCVTFdw GPR32:$Rn)>; -def : Pat<(f64 (uint_to_fp GPR64:$Rn)), (UCVTFdx GPR64:$Rn)>; +def : Pat<(f32 (sint_to_fp i32:$Rn)), (SCVTFsw $Rn)>; +def : Pat<(f32 (sint_to_fp i64:$Rn)), (SCVTFsx $Rn)>; +def : Pat<(f64 (sint_to_fp i32:$Rn)), (SCVTFdw $Rn)>; +def : Pat<(f64 (sint_to_fp i64:$Rn)), (SCVTFdx $Rn)>; +def : Pat<(f32 (uint_to_fp i32:$Rn)), (UCVTFsw $Rn)>; +def : Pat<(f32 (uint_to_fp i64:$Rn)), (UCVTFsx $Rn)>; +def : Pat<(f64 (uint_to_fp i32:$Rn)), (UCVTFdw $Rn)>; +def : Pat<(f64 (uint_to_fp i64:$Rn)), (UCVTFdx $Rn)>; def FMOVws : A64I_fpintI<0b0, 0b00, 0b00, 0b110, GPR32, FPR32, "fmov">; def FMOVsw : A64I_fpintI<0b0, 0b00, 0b00, 0b111, FPR32, GPR32, "fmov">; def FMOVxd : A64I_fpintI<0b1, 0b01, 0b00, 0b110, GPR64, FPR64, "fmov">; def FMOVdx : A64I_fpintI<0b1, 0b01, 0b00, 0b111, FPR64, GPR64, "fmov">; -def : Pat<(i32 (bitconvert (f32 FPR32:$Rn))), (FMOVws FPR32:$Rn)>; -def : Pat<(f32 (bitconvert (i32 GPR32:$Rn))), (FMOVsw GPR32:$Rn)>; -def : Pat<(i64 (bitconvert (f64 FPR64:$Rn))), (FMOVxd FPR64:$Rn)>; -def : Pat<(f64 (bitconvert (i64 GPR64:$Rn))), (FMOVdx GPR64:$Rn)>; +def : Pat<(i32 (bitconvert f32:$Rn)), (FMOVws $Rn)>; +def : Pat<(f32 (bitconvert i32:$Rn)), (FMOVsw $Rn)>; +def : Pat<(i64 (bitconvert f64:$Rn)), (FMOVxd $Rn)>; +def : Pat<(f64 (bitconvert i64:$Rn)), (FMOVdx $Rn)>; def lane1_asmoperand : AsmOperandClass { let Name = "Lane1"; @@ -2397,7 +2400,7 @@ class A64I_fpimm_impl<bits<2> type, RegisterClass Reg, ValueType VT, (outs Reg:$Rd), (ins fmov_operand:$Imm8), "fmov\t$Rd, $Imm8", - [(set (VT Reg:$Rd), fmov_operand:$Imm8)], + [(set VT:$Rd, fmov_operand:$Imm8)], NoItinerary>; def FMOVsi : A64I_fpimm_impl<0b00, FPR32, f32, fmov32_operand>; @@ -2582,7 +2585,8 @@ defm LDAR : A64I_LRex<"ldar", 0b101>; class acquiring_load<PatFrag base> : PatFrag<(ops node:$ptr), (base node:$ptr), [{ - return cast<AtomicSDNode>(N)->getOrdering() == Acquire; + AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); + return Ordering == Acquire || Ordering == SequentiallyConsistent; }]>; def atomic_load_acquire_8 : acquiring_load<atomic_load_8>; @@ -2590,10 +2594,10 @@ def atomic_load_acquire_16 : acquiring_load<atomic_load_16>; def atomic_load_acquire_32 : acquiring_load<atomic_load_32>; def atomic_load_acquire_64 : acquiring_load<atomic_load_64>; -def : Pat<(atomic_load_acquire_8 GPR64xsp:$Rn), (LDAR_byte GPR64xsp0:$Rn)>; -def : Pat<(atomic_load_acquire_16 GPR64xsp:$Rn), (LDAR_hword GPR64xsp0:$Rn)>; -def : Pat<(atomic_load_acquire_32 GPR64xsp:$Rn), (LDAR_word GPR64xsp0:$Rn)>; -def : Pat<(atomic_load_acquire_64 GPR64xsp:$Rn), (LDAR_dword GPR64xsp0:$Rn)>; +def : Pat<(atomic_load_acquire_8 i64:$Rn), (LDAR_byte $Rn)>; +def : Pat<(atomic_load_acquire_16 i64:$Rn), (LDAR_hword $Rn)>; +def : Pat<(atomic_load_acquire_32 i64:$Rn), (LDAR_word $Rn)>; +def : Pat<(atomic_load_acquire_64 i64:$Rn), (LDAR_dword $Rn)>; //===---------------------------------- // Store-release (no exclusivity) @@ -2613,7 +2617,8 @@ class A64I_SLexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs, class releasing_store<PatFrag base> : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ - return cast<AtomicSDNode>(N)->getOrdering() == Release; + AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); + return Ordering == Release || Ordering == SequentiallyConsistent; }]>; def atomic_store_release_8 : releasing_store<atomic_store_8>; @@ -2624,22 +2629,22 @@ def atomic_store_release_64 : releasing_store<atomic_store_64>; multiclass A64I_SLex<string asmstr, bits<3> opcode, string prefix> { def _byte: A64I_SLexs_impl<0b00, opcode, !strconcat(asmstr, "b"), (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn), - [(atomic_store_release_8 GPR64xsp0:$Rn, GPR32:$Rt)], + [(atomic_store_release_8 i64:$Rn, i32:$Rt)], NoItinerary>; def _hword: A64I_SLexs_impl<0b01, opcode, !strconcat(asmstr, "h"), (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn), - [(atomic_store_release_16 GPR64xsp0:$Rn, GPR32:$Rt)], + [(atomic_store_release_16 i64:$Rn, i32:$Rt)], NoItinerary>; def _word: A64I_SLexs_impl<0b10, opcode, asmstr, (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn), - [(atomic_store_release_32 GPR64xsp0:$Rn, GPR32:$Rt)], + [(atomic_store_release_32 i64:$Rn, i32:$Rt)], NoItinerary>; def _dword: A64I_SLexs_impl<0b11, opcode, asmstr, (outs), (ins GPR64:$Rt, GPR64xsp0:$Rn), - [(atomic_store_release_64 GPR64xsp0:$Rn, GPR64:$Rt)], + [(atomic_store_release_64 i64:$Rn, i64:$Rt)], NoItinerary>; } @@ -3596,15 +3601,15 @@ multiclass A64I_logimmSizes<bits<2> opc, string asmop, SDNode opnode> { def wwi : A64I_logicalimm<0b0, opc, (outs GPR32wsp:$Rd), (ins GPR32:$Rn, logical_imm32_operand:$Imm), !strconcat(asmop, "\t$Rd, $Rn, $Imm"), - [(set GPR32wsp:$Rd, - (opnode GPR32:$Rn, logical_imm32_operand:$Imm))], + [(set i32:$Rd, + (opnode i32:$Rn, logical_imm32_operand:$Imm))], NoItinerary>; def xxi : A64I_logicalimm<0b1, opc, (outs GPR64xsp:$Rd), (ins GPR64:$Rn, logical_imm64_operand:$Imm), !strconcat(asmop, "\t$Rd, $Rn, $Imm"), - [(set GPR64xsp:$Rd, - (opnode GPR64:$Rn, logical_imm64_operand:$Imm))], + [(set i64:$Rd, + (opnode i64:$Rn, logical_imm64_operand:$Imm))], NoItinerary>; } @@ -3655,46 +3660,46 @@ def signed_cond : PatLeaf<(cond), [{ // when the revolution comes. multiclass logical_shifts<string prefix, bit sf, bits<2> opc, bit N, bit commutable, - string asmop, SDPatternOperator opfrag, string sty, + string asmop, SDPatternOperator opfrag, ValueType ty, RegisterClass GPR, list<Register> defs> { let isCommutable = commutable, Defs = defs in { def _lsl : A64I_logicalshift<sf, opc, 0b00, N, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6), + !cast<Operand>("lsl_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (shl GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (shl ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)) )], NoItinerary>; def _lsr : A64I_logicalshift<sf, opc, 0b01, N, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6), + !cast<Operand>("lsr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (srl GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (srl ty:$Rm, + !cast<Operand>("lsr_operand_" # ty):$Imm6)) )], NoItinerary>; def _asr : A64I_logicalshift<sf, opc, 0b10, N, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6), + !cast<Operand>("asr_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (sra GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (sra ty:$Rm, + !cast<Operand>("asr_operand_" # ty):$Imm6)) )], NoItinerary>; def _ror : A64I_logicalshift<sf, opc, 0b11, N, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("ror_operand_" # sty):$Imm6), + !cast<Operand>("ror_operand_" # ty):$Imm6), !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"), - [(set GPR:$Rd, (opfrag GPR:$Rn, (rotr GPR:$Rm, - !cast<Operand>("ror_operand_" # sty):$Imm6)) + [(set ty:$Rd, (opfrag ty:$Rn, (rotr ty:$Rm, + !cast<Operand>("ror_operand_" # ty):$Imm6)) )], NoItinerary>; } @@ -3704,17 +3709,17 @@ multiclass logical_shifts<string prefix, bit sf, bits<2> opc, (!cast<Instruction>(prefix # "_lsl") GPR:$Rd, GPR:$Rn, GPR:$Rm, 0)>; - def : Pat<(opfrag GPR:$Rn, GPR:$Rm), - (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; + def : Pat<(opfrag ty:$Rn, ty:$Rm), + (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>; } multiclass logical_sizes<string prefix, bits<2> opc, bit N, bit commutable, string asmop, SDPatternOperator opfrag, list<Register> defs> { defm xxx : logical_shifts<prefix # "xxx", 0b1, opc, N, - commutable, asmop, opfrag, "i64", GPR64, defs>; + commutable, asmop, opfrag, i64, GPR64, defs>; defm www : logical_shifts<prefix # "www", 0b0, opc, N, - commutable, asmop, opfrag, "i32", GPR32, defs>; + commutable, asmop, opfrag, i32, GPR32, defs>; } @@ -3741,15 +3746,15 @@ defm BICS : logical_sizes<"BICS", 0b11, 0b1, 0b0, "bics", [{ (void)N; return false; }]>, [NZCV]>; -multiclass tst_shifts<string prefix, bit sf, string sty, RegisterClass GPR> { +multiclass tst_shifts<string prefix, bit sf, ValueType ty, RegisterClass GPR> { let isCommutable = 1, Rd = 0b11111, Defs = [NZCV] in { def _lsl : A64I_logicalshift<sf, 0b11, 0b00, 0b0, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6), + !cast<Operand>("lsl_operand_" # ty):$Imm6), "tst\t$Rn, $Rm, $Imm6", - [(set NZCV, (A64setcc (and GPR:$Rn, (shl GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)), + [(set NZCV, (A64setcc (and ty:$Rn, (shl ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)), 0, signed_cond))], NoItinerary>; @@ -3757,30 +3762,30 @@ multiclass tst_shifts<string prefix, bit sf, string sty, RegisterClass GPR> { def _lsr : A64I_logicalshift<sf, 0b11, 0b01, 0b0, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6), + !cast<Operand>("lsr_operand_" # ty):$Imm6), "tst\t$Rn, $Rm, $Imm6", - [(set NZCV, (A64setcc (and GPR:$Rn, (srl GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6)), + [(set NZCV, (A64setcc (and ty:$Rn, (srl ty:$Rm, + !cast<Operand>("lsr_operand_" # ty):$Imm6)), 0, signed_cond))], NoItinerary>; def _asr : A64I_logicalshift<sf, 0b11, 0b10, 0b0, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6), + !cast<Operand>("asr_operand_" # ty):$Imm6), "tst\t$Rn, $Rm, $Imm6", - [(set NZCV, (A64setcc (and GPR:$Rn, (sra GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6)), + [(set NZCV, (A64setcc (and ty:$Rn, (sra ty:$Rm, + !cast<Operand>("asr_operand_" # ty):$Imm6)), 0, signed_cond))], NoItinerary>; def _ror : A64I_logicalshift<sf, 0b11, 0b11, 0b0, (outs), (ins GPR:$Rn, GPR:$Rm, - !cast<Operand>("ror_operand_" # sty):$Imm6), + !cast<Operand>("ror_operand_" # ty):$Imm6), "tst\t$Rn, $Rm, $Imm6", - [(set NZCV, (A64setcc (and GPR:$Rn, (rotr GPR:$Rm, - !cast<Operand>("ror_operand_" # sty):$Imm6)), + [(set NZCV, (A64setcc (and ty:$Rn, (rotr ty:$Rm, + !cast<Operand>("ror_operand_" # ty):$Imm6)), 0, signed_cond))], NoItinerary>; } @@ -3788,63 +3793,63 @@ multiclass tst_shifts<string prefix, bit sf, string sty, RegisterClass GPR> { def _noshift : InstAlias<"tst $Rn, $Rm", (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; - def : Pat<(A64setcc (and GPR:$Rn, GPR:$Rm), 0, signed_cond), - (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; + def : Pat<(A64setcc (and ty:$Rn, ty:$Rm), 0, signed_cond), + (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>; } -defm TSTxx : tst_shifts<"TSTxx", 0b1, "i64", GPR64>; -defm TSTww : tst_shifts<"TSTww", 0b0, "i32", GPR32>; +defm TSTxx : tst_shifts<"TSTxx", 0b1, i64, GPR64>; +defm TSTww : tst_shifts<"TSTww", 0b0, i32, GPR32>; -multiclass mvn_shifts<string prefix, bit sf, string sty, RegisterClass GPR> { +multiclass mvn_shifts<string prefix, bit sf, ValueType ty, RegisterClass GPR> { let isCommutable = 0, Rn = 0b11111 in { def _lsl : A64I_logicalshift<sf, 0b01, 0b00, 0b1, (outs GPR:$Rd), (ins GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6), + !cast<Operand>("lsl_operand_" # ty):$Imm6), "mvn\t$Rd, $Rm, $Imm6", - [(set GPR:$Rd, (not (shl GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)))], + [(set ty:$Rd, (not (shl ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)))], NoItinerary>; def _lsr : A64I_logicalshift<sf, 0b01, 0b01, 0b1, (outs GPR:$Rd), (ins GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6), + !cast<Operand>("lsr_operand_" # ty):$Imm6), "mvn\t$Rd, $Rm, $Imm6", - [(set GPR:$Rd, (not (srl GPR:$Rm, - !cast<Operand>("lsr_operand_" # sty):$Imm6)))], + [(set ty:$Rd, (not (srl ty:$Rm, + !cast<Operand>("lsr_operand_" # ty):$Imm6)))], NoItinerary>; def _asr : A64I_logicalshift<sf, 0b01, 0b10, 0b1, (outs GPR:$Rd), (ins GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6), + !cast<Operand>("asr_operand_" # ty):$Imm6), "mvn\t$Rd, $Rm, $Imm6", - [(set GPR:$Rd, (not (sra GPR:$Rm, - !cast<Operand>("asr_operand_" # sty):$Imm6)))], + [(set ty:$Rd, (not (sra ty:$Rm, + !cast<Operand>("asr_operand_" # ty):$Imm6)))], NoItinerary>; def _ror : A64I_logicalshift<sf, 0b01, 0b11, 0b1, (outs GPR:$Rd), (ins GPR:$Rm, - !cast<Operand>("ror_operand_" # sty):$Imm6), + !cast<Operand>("ror_operand_" # ty):$Imm6), "mvn\t$Rd, $Rm, $Imm6", - [(set GPR:$Rd, (not (rotr GPR:$Rm, - !cast<Operand>("lsl_operand_" # sty):$Imm6)))], + [(set ty:$Rd, (not (rotr ty:$Rm, + !cast<Operand>("lsl_operand_" # ty):$Imm6)))], NoItinerary>; } def _noshift : InstAlias<"mvn $Rn, $Rm", (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>; - def : Pat<(not GPR:$Rm), - (!cast<Instruction>(prefix # "_lsl") GPR:$Rm, 0)>; + def : Pat<(not ty:$Rm), + (!cast<Instruction>(prefix # "_lsl") $Rm, 0)>; } -defm MVNxx : mvn_shifts<"MVNxx", 0b1, "i64", GPR64>; -defm MVNww : mvn_shifts<"MVNww", 0b0, "i32", GPR32>; +defm MVNxx : mvn_shifts<"MVNxx", 0b1, i64, GPR64>; +defm MVNww : mvn_shifts<"MVNww", 0b0, i32, GPR32>; def MOVxx :InstAlias<"mov $Rd, $Rm", (ORRxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>; def MOVww :InstAlias<"mov $Rd, $Rm", (ORRwww_lsl GPR32:$Rd, WZR, GPR32:$Rm, 0)>; @@ -4279,14 +4284,14 @@ let isBranch = 1, isTerminator = 1 in { def TBZxii : A64I_TBimm<0b0, (outs), (ins GPR64:$Rt, uimm6:$Imm, tbimm_target:$Label), "tbz\t$Rt, $Imm, $Label", - [(A64br_cc (A64cmp (and GPR64:$Rt, tstb64_pat:$Imm), 0), + [(A64br_cc (A64cmp (and i64:$Rt, tstb64_pat:$Imm), 0), A64eq, bb:$Label)], NoItinerary>; def TBNZxii : A64I_TBimm<0b1, (outs), (ins GPR64:$Rt, uimm6:$Imm, tbimm_target:$Label), "tbnz\t$Rt, $Imm, $Label", - [(A64br_cc (A64cmp (and GPR64:$Rt, tstb64_pat:$Imm), 0), + [(A64br_cc (A64cmp (and i64:$Rt, tstb64_pat:$Imm), 0), A64ne, bb:$Label)], NoItinerary>; @@ -4298,7 +4303,7 @@ let isBranch = 1, isTerminator = 1 in { def TBZwii : A64I_TBimm<0b0, (outs), (ins GPR32:$Rt, uimm5:$Imm, tbimm_target:$Label), "tbz\t$Rt, $Imm, $Label", - [(A64br_cc (A64cmp (and GPR32:$Rt, tstb32_pat:$Imm), 0), + [(A64br_cc (A64cmp (and i32:$Rt, tstb32_pat:$Imm), 0), A64eq, bb:$Label)], NoItinerary> { let Imm{5} = 0b0; @@ -4307,7 +4312,7 @@ let isBranch = 1, isTerminator = 1 in { def TBNZwii : A64I_TBimm<0b1, (outs), (ins GPR32:$Rt, uimm5:$Imm, tbimm_target:$Label), "tbnz\t$Rt, $Imm, $Label", - [(A64br_cc (A64cmp (and GPR32:$Rt, tstb32_pat:$Imm), 0), + [(A64br_cc (A64cmp (and i32:$Rt, tstb32_pat:$Imm), 0), A64ne, bb:$Label)], NoItinerary> { let Imm{5} = 0b0; @@ -4383,13 +4388,13 @@ class A64I_BregImpl<bits<4> opc, let isBranch = 1 in { def BRx : A64I_BregImpl<0b0000,(outs), (ins GPR64:$Rn), - "br\t$Rn", [(brind GPR64:$Rn)]> { + "br\t$Rn", [(brind i64:$Rn)]> { let isBarrier = 1; let isTerminator = 1; } def BLRx : A64I_BregImpl<0b0001, (outs), (ins GPR64:$Rn), - "blr\t$Rn", [(AArch64Call GPR64:$Rn)]> { + "blr\t$Rn", [(AArch64Call i64:$Rn)]> { let isBarrier = 0; let isCall = 1; let Defs = [X30]; @@ -4457,8 +4462,6 @@ def : ADRP_ADD<A64WrapperSmall, tjumptable>; // GOT access patterns //===----------------------------------------------------------------------===// -// FIXME: Wibble - class GOTLoadSmall<SDNode addrfrag> : Pat<(A64GOTLoad (A64WrapperSmall addrfrag:$Hi, addrfrag:$Lo12, 8)), (LS64_LDR (ADRPxi addrfrag:$Hi), addrfrag:$Lo12)>; @@ -4478,7 +4481,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [XSP] in { def TC_RETURNxi : PseudoInst<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), - [(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff))]>; + [(AArch64tcret i64:$dst, (i32 timm:$FPDiff))]>; } let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, @@ -4510,13 +4513,13 @@ def TLSDESCCALL : PseudoInst<(outs), (ins i64imm:$Lbl), []> { } def TLSDESC_BLRx : PseudoInst<(outs), (ins GPR64:$Rn, i64imm:$Var), - [(A64tlsdesc_blr GPR64:$Rn, tglobaltlsaddr:$Var)]> { + [(A64tlsdesc_blr i64:$Rn, tglobaltlsaddr:$Var)]> { let isCall = 1; let Defs = [X30]; } -def : Pat<(A64tlsdesc_blr GPR64:$Rn, texternalsym:$Var), - (TLSDESC_BLRx GPR64:$Rn, texternalsym:$Var)>; +def : Pat<(A64tlsdesc_blr i64:$Rn, texternalsym:$Var), + (TLSDESC_BLRx $Rn, texternalsym:$Var)>; //===----------------------------------------------------------------------===// // Bitfield patterns @@ -4539,22 +4542,22 @@ def bfi_width_to_imms : SDNodeXForm<imm, [{ // (either all bits are used or the low 32 bits are used). let AddedComplexity = 10 in { -def : Pat<(A64Bfi GPR64:$src, GPR64:$Rn, imm:$ImmR, imm:$ImmS), - (BFIxxii GPR64:$src, GPR64:$Rn, +def : Pat<(A64Bfi i64:$src, i64:$Rn, imm:$ImmR, imm:$ImmS), + (BFIxxii $src, $Rn, (bfi64_lsb_to_immr (i64 imm:$ImmR)), (bfi_width_to_imms (i64 imm:$ImmS)))>; -def : Pat<(A64Bfi GPR32:$src, GPR32:$Rn, imm:$ImmR, imm:$ImmS), - (BFIwwii GPR32:$src, GPR32:$Rn, +def : Pat<(A64Bfi i32:$src, i32:$Rn, imm:$ImmR, imm:$ImmS), + (BFIwwii $src, $Rn, (bfi32_lsb_to_immr (i64 imm:$ImmR)), (bfi_width_to_imms (i64 imm:$ImmS)))>; -def : Pat<(and (A64Bfi GPR64:$src, GPR64:$Rn, imm:$ImmR, imm:$ImmS), +def : Pat<(and (A64Bfi i64:$src, i64:$Rn, imm:$ImmR, imm:$ImmS), (i64 4294967295)), (SUBREG_TO_REG (i64 0), - (BFIwwii (EXTRACT_SUBREG GPR64:$src, sub_32), - (EXTRACT_SUBREG GPR64:$Rn, sub_32), + (BFIwwii (EXTRACT_SUBREG $src, sub_32), + (EXTRACT_SUBREG $Rn, sub_32), (bfi32_lsb_to_immr (i64 imm:$ImmR)), (bfi_width_to_imms (i64 imm:$ImmS))), sub_32)>; @@ -4566,20 +4569,19 @@ def : Pat<(and (A64Bfi GPR64:$src, GPR64:$Rn, imm:$ImmR, imm:$ImmS), //===----------------------------------------------------------------------===// // Truncation from 64 to 32-bits just involves renaming your register. -def : Pat<(i32 (trunc (i64 GPR64:$val))), (EXTRACT_SUBREG GPR64:$val, sub_32)>; +def : Pat<(i32 (trunc i64:$val)), (EXTRACT_SUBREG $val, sub_32)>; // Similarly, extension where we don't care about the high bits is // just a rename. -def : Pat<(i64 (anyext (i32 GPR32:$val))), - (INSERT_SUBREG (IMPLICIT_DEF), GPR32:$val, sub_32)>; +def : Pat<(i64 (anyext i32:$val)), + (INSERT_SUBREG (IMPLICIT_DEF), $val, sub_32)>; // SELECT instructions providing f128 types need to be handled by a // pseudo-instruction since the eventual code will need to introduce basic // blocks and control flow. def F128CSEL : PseudoInst<(outs FPR128:$Rd), - (ins FPR128:$Rn, FPR128:$Rm, cond_code_op:$Cond), - [(set FPR128:$Rd, (simple_select (f128 FPR128:$Rn), - FPR128:$Rm))]> { + (ins FPR128:$Rn, FPR128:$Rm, cond_code_op:$Cond), + [(set f128:$Rd, (simple_select f128:$Rn, f128:$Rm))]> { let Uses = [NZCV]; let usesCustomInserter = 1; } @@ -4691,13 +4693,13 @@ def atomic_store_simple_i64 : simple_store<atomic_store_64>; // Atomic patterns can be shared between integer operations of all sizes, a // quick multiclass here allows reuse. multiclass ls_atomic_pats<Instruction LOAD, Instruction STORE, dag Base, - dag Offset, dag address, RegisterClass TPR, + dag Offset, dag address, ValueType transty, ValueType sty> { def : Pat<(!cast<PatFrag>("atomic_load_simple_" # sty) address), (LOAD Base, Offset)>; - def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, TPR:$Rt), - (STORE TPR:$Rt, Base, Offset)>; + def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, transty:$Rt), + (STORE $Rt, Base, Offset)>; } // Instructions accessing a memory chunk smaller than a register (or, in a @@ -4709,7 +4711,7 @@ multiclass ls_atomic_pats<Instruction LOAD, Instruction STORE, dag Base, multiclass ls_small_pats<Instruction LOAD, Instruction STORE, dag Base, dag Offset, dag address, ValueType sty> - : ls_atomic_pats<LOAD, STORE, Base, Offset, address, GPR32, sty> { + : ls_atomic_pats<LOAD, STORE, Base, Offset, address, i32, sty> { def : Pat<(!cast<SDNode>(zextload # sty) address), (LOAD Base, Offset)>; def : Pat<(!cast<SDNode>(extload # sty) address), (LOAD Base, Offset)>; @@ -4722,13 +4724,13 @@ multiclass ls_small_pats<Instruction LOAD, Instruction STORE, def : Pat<(i64 (!cast<SDNode>(extload # sty) address)), (SUBREG_TO_REG (i64 0), (LOAD Base, Offset), sub_32)>; - def : Pat<(!cast<SDNode>(truncstore # sty) GPR32:$Rt, address), - (STORE GPR32:$Rt, Base, Offset)>; + def : Pat<(!cast<SDNode>(truncstore # sty) i32:$Rt, address), + (STORE $Rt, Base, Offset)>; // For truncating store from 64-bits, we have to manually tell LLVM to // ignore the high bits of the x register. - def : Pat<(!cast<SDNode>(truncstore # sty) GPR64:$Rt, address), - (STORE (EXTRACT_SUBREG GPR64:$Rt, sub_32), Base, Offset)>; + def : Pat<(!cast<SDNode>(truncstore # sty) i64:$Rt, address), + (STORE (EXTRACT_SUBREG $Rt, sub_32), Base, Offset)>; } // Next come patterns for sign-extending loads. @@ -4744,18 +4746,16 @@ multiclass load_signed_pats<string T, string U, dag Base, dag Offset, // and finally "natural-width" loads and stores come next. multiclass ls_neutral_pats<Instruction LOAD, Instruction STORE, dag Base, - dag Offset, dag address, RegisterClass TPR, - ValueType sty> { + dag Offset, dag address, ValueType sty> { def : Pat<(sty (load address)), (LOAD Base, Offset)>; - def : Pat<(store (sty TPR:$Rt), address), (STORE TPR:$Rt, Base, Offset)>; + def : Pat<(store sty:$Rt, address), (STORE $Rt, Base, Offset)>; } // Integer operations also get atomic instructions to select for. multiclass ls_int_neutral_pats<Instruction LOAD, Instruction STORE, dag Base, - dag Offset, dag address, RegisterClass TPR, - ValueType sty> - : ls_neutral_pats<LOAD, STORE, Base, Offset, address, TPR, sty>, - ls_atomic_pats<LOAD, STORE, Base, Offset, address, TPR, sty>; + dag Offset, dag address, ValueType sty> + : ls_neutral_pats<LOAD, STORE, Base, Offset, address, sty>, + ls_atomic_pats<LOAD, STORE, Base, Offset, address, sty, sty>; //===------------------------------ // 2.2. Addressing-mode instantiations @@ -4790,7 +4790,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, word_uimm12, !subst(ALIGN, min_align4, decls.pattern))), - GPR32, i32>; + i32>; defm : ls_int_neutral_pats<LS64_LDR, LS64_STR, Base, !foreach(decls.pattern, Offset, @@ -4798,7 +4798,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, dword_uimm12, !subst(ALIGN, min_align8, decls.pattern))), - GPR64, i64>; + i64>; defm : ls_neutral_pats<LSFP16_LDR, LSFP16_STR, Base, !foreach(decls.pattern, Offset, @@ -4806,7 +4806,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, hword_uimm12, !subst(ALIGN, min_align2, decls.pattern))), - FPR16, f16>; + f16>; defm : ls_neutral_pats<LSFP32_LDR, LSFP32_STR, Base, !foreach(decls.pattern, Offset, @@ -4814,7 +4814,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, word_uimm12, !subst(ALIGN, min_align4, decls.pattern))), - FPR32, f32>; + f32>; defm : ls_neutral_pats<LSFP64_LDR, LSFP64_STR, Base, !foreach(decls.pattern, Offset, @@ -4822,7 +4822,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, dword_uimm12, !subst(ALIGN, min_align8, decls.pattern))), - FPR64, f64>; + f64>; defm : ls_neutral_pats<LSFP128_LDR, LSFP128_STR, Base, !foreach(decls.pattern, Offset, @@ -4830,7 +4830,7 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { !foreach(decls.pattern, address, !subst(OFFSET, qword_uimm12, !subst(ALIGN, min_align16, decls.pattern))), - FPR128, f128>; + f128>; defm : load_signed_pats<"B", "", Base, !foreach(decls.pattern, Offset, @@ -4857,13 +4857,13 @@ multiclass uimm12_pats<dag address, dag Base, dag Offset> { // Straightforward patterns of last resort: a pointer with or without an // appropriate offset. -defm : uimm12_pats<(i64 GPR64xsp:$Rn), (i64 GPR64xsp:$Rn), (i64 0)>; -defm : uimm12_pats<(add GPR64xsp:$Rn, OFFSET:$UImm12), - (i64 GPR64xsp:$Rn), (i64 OFFSET:$UImm12)>; +defm : uimm12_pats<(i64 i64:$Rn), (i64 i64:$Rn), (i64 0)>; +defm : uimm12_pats<(add i64:$Rn, OFFSET:$UImm12), + (i64 i64:$Rn), (i64 OFFSET:$UImm12)>; // The offset could be hidden behind an "or", of course: -defm : uimm12_pats<(add_like_or GPR64xsp:$Rn, OFFSET:$UImm12), - (i64 GPR64xsp:$Rn), (i64 OFFSET:$UImm12)>; +defm : uimm12_pats<(add_like_or i64:$Rn, OFFSET:$UImm12), + (i64 i64:$Rn), (i64 OFFSET:$UImm12)>; // Global addresses under the small-absolute model should use these // instructions. There are ELF relocations specifically for it. @@ -4897,36 +4897,31 @@ multiclass simm9_pats<dag address, dag Base, dag Offset> { defm : ls_small_pats<LS8_LDUR, LS8_STUR, Base, Offset, address, i8>; defm : ls_small_pats<LS16_LDUR, LS16_STUR, Base, Offset, address, i16>; - defm : ls_int_neutral_pats<LS32_LDUR, LS32_STUR, Base, Offset, address, - GPR32, i32>; - defm : ls_int_neutral_pats<LS64_LDUR, LS64_STUR, Base, Offset, address, - GPR64, i64>; - - defm : ls_neutral_pats<LSFP16_LDUR, LSFP16_STUR, Base, Offset, address, - FPR16, f16>; - defm : ls_neutral_pats<LSFP32_LDUR, LSFP32_STUR, Base, Offset, address, - FPR32, f32>; - defm : ls_neutral_pats<LSFP64_LDUR, LSFP64_STUR, Base, Offset, address, - FPR64, f64>; + defm : ls_int_neutral_pats<LS32_LDUR, LS32_STUR, Base, Offset, address, i32>; + defm : ls_int_neutral_pats<LS64_LDUR, LS64_STUR, Base, Offset, address, i64>; + + defm : ls_neutral_pats<LSFP16_LDUR, LSFP16_STUR, Base, Offset, address, f16>; + defm : ls_neutral_pats<LSFP32_LDUR, LSFP32_STUR, Base, Offset, address, f32>; + defm : ls_neutral_pats<LSFP64_LDUR, LSFP64_STUR, Base, Offset, address, f64>; defm : ls_neutral_pats<LSFP128_LDUR, LSFP128_STUR, Base, Offset, address, - FPR128, f128>; + f128>; def : Pat<(i64 (zextloadi32 address)), (SUBREG_TO_REG (i64 0), (LS32_LDUR Base, Offset), sub_32)>; - def : Pat<(truncstorei32 GPR64:$Rt, address), - (LS32_STUR (EXTRACT_SUBREG GPR64:$Rt, sub_32), Base, Offset)>; + def : Pat<(truncstorei32 i64:$Rt, address), + (LS32_STUR (EXTRACT_SUBREG $Rt, sub_32), Base, Offset)>; defm : load_signed_pats<"B", "_U", Base, Offset, address, i8>; defm : load_signed_pats<"H", "_U", Base, Offset, address, i16>; def : Pat<(sextloadi32 address), (LDURSWx Base, Offset)>; } -defm : simm9_pats<(add GPR64xsp:$Rn, simm9:$SImm9), - (i64 GPR64xsp:$Rn), (SDXF_simm9 simm9:$SImm9)>; +defm : simm9_pats<(add i64:$Rn, simm9:$SImm9), + (i64 $Rn), (SDXF_simm9 simm9:$SImm9)>; -defm : simm9_pats<(add_like_or GPR64xsp:$Rn, simm9:$SImm9), - (i64 GPR64xsp:$Rn), (SDXF_simm9 simm9:$SImm9)>; +defm : simm9_pats<(add_like_or i64:$Rn, simm9:$SImm9), + (i64 $Rn), (SDXF_simm9 simm9:$SImm9)>; //===------------------------------ @@ -4937,12 +4932,12 @@ defm : simm9_pats<(add_like_or GPR64xsp:$Rn, simm9:$SImm9), // quick multiclass here allows reuse. multiclass ro_atomic_pats<Instruction LOAD, Instruction STORE, dag Base, dag Offset, dag Extend, dag address, - RegisterClass TPR, ValueType sty> { + ValueType transty, ValueType sty> { def : Pat<(!cast<PatFrag>("atomic_load_simple_" # sty) address), (LOAD Base, Offset, Extend)>; - def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, TPR:$Rt), - (STORE TPR:$Rt, Base, Offset, Extend)>; + def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, transty:$Rt), + (STORE $Rt, Base, Offset, Extend)>; } // The register offset instructions take three operands giving the instruction, @@ -4953,7 +4948,7 @@ multiclass ro_atomic_pats<Instruction LOAD, Instruction STORE, dag Base, multiclass ro_small_pats<Instruction LOAD, Instruction STORE, dag Base, dag Offset, dag Extend, dag address, ValueType sty> - : ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, GPR32, sty> { + : ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, i32, sty> { def : Pat<(!cast<SDNode>(zextload # sty) address), (LOAD Base, Offset, Extend)>; @@ -4968,13 +4963,13 @@ multiclass ro_small_pats<Instruction LOAD, Instruction STORE, def : Pat<(i64 (!cast<SDNode>(extload # sty) address)), (SUBREG_TO_REG (i64 0), (LOAD Base, Offset, Extend), sub_32)>; - def : Pat<(!cast<SDNode>(truncstore # sty) GPR32:$Rt, address), - (STORE GPR32:$Rt, Base, Offset, Extend)>; + def : Pat<(!cast<SDNode>(truncstore # sty) i32:$Rt, address), + (STORE $Rt, Base, Offset, Extend)>; // For truncating store from 64-bits, we have to manually tell LLVM to // ignore the high bits of the x register. - def : Pat<(!cast<SDNode>(truncstore # sty) GPR64:$Rt, address), - (STORE (EXTRACT_SUBREG GPR64:$Rt, sub_32), Base, Offset, Extend)>; + def : Pat<(!cast<SDNode>(truncstore # sty) i64:$Rt, address), + (STORE (EXTRACT_SUBREG $Rt, sub_32), Base, Offset, Extend)>; } @@ -4993,17 +4988,17 @@ multiclass ro_signed_pats<string T, string Rm, dag Base, dag Offset, dag Extend, // and finally "natural-width" loads and stores come next. multiclass ro_neutral_pats<Instruction LOAD, Instruction STORE, dag Base, dag Offset, dag Extend, dag address, - RegisterClass TPR, ValueType sty> { + ValueType sty> { def : Pat<(sty (load address)), (LOAD Base, Offset, Extend)>; - def : Pat<(store (sty TPR:$Rt), address), - (STORE TPR:$Rt, Base, Offset, Extend)>; + def : Pat<(store sty:$Rt, address), + (STORE $Rt, Base, Offset, Extend)>; } multiclass ro_int_neutral_pats<Instruction LOAD, Instruction STORE, dag Base, dag Offset, dag Extend, dag address, - RegisterClass TPR, ValueType sty> - : ro_neutral_pats<LOAD, STORE, Base, Offset, Extend, address, TPR, sty>, - ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, TPR, sty>; + ValueType sty> + : ro_neutral_pats<LOAD, STORE, Base, Offset, Extend, address, sty>, + ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, sty, sty>; multiclass regoff_pats<string Rm, dag address, dag Base, dag Offset, dag Extend> { @@ -5032,7 +5027,7 @@ multiclass regoff_pats<string Rm, dag address, dag Base, dag Offset, Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq2, decls.pattern)), - GPR32, i32>; + i32>; defm : ro_int_neutral_pats< !cast<Instruction>("LS64_" # Rm # "_RegOffset_LDR"), @@ -5040,45 +5035,45 @@ multiclass regoff_pats<string Rm, dag address, dag Base, dag Offset, Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq3, decls.pattern)), - GPR64, i64>; + i64>; defm : ro_neutral_pats<!cast<Instruction>("LSFP16_" # Rm # "_RegOffset_LDR"), !cast<Instruction>("LSFP16_" # Rm # "_RegOffset_STR"), Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq1, decls.pattern)), - FPR16, f16>; + f16>; defm : ro_neutral_pats<!cast<Instruction>("LSFP32_" # Rm # "_RegOffset_LDR"), !cast<Instruction>("LSFP32_" # Rm # "_RegOffset_STR"), Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq2, decls.pattern)), - FPR32, f32>; + f32>; defm : ro_neutral_pats<!cast<Instruction>("LSFP64_" # Rm # "_RegOffset_LDR"), !cast<Instruction>("LSFP64_" # Rm # "_RegOffset_STR"), Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq3, decls.pattern)), - FPR64, f64>; + f64>; defm : ro_neutral_pats<!cast<Instruction>("LSFP128_" # Rm # "_RegOffset_LDR"), !cast<Instruction>("LSFP128_" # Rm # "_RegOffset_STR"), Base, Offset, Extend, !foreach(decls.pattern, address, !subst(SHIFT, imm_eq4, decls.pattern)), - FPR128, f128>; + f128>; defm : ro_signed_pats<"B", Rm, Base, Offset, Extend, - !foreach(decls.pattern, address, - !subst(SHIFT, imm_eq0, decls.pattern)), - i8>; + !foreach(decls.pattern, address, + !subst(SHIFT, imm_eq0, decls.pattern)), + i8>; defm : ro_signed_pats<"H", Rm, Base, Offset, Extend, - !foreach(decls.pattern, address, - !subst(SHIFT, imm_eq1, decls.pattern)), - i16>; + !foreach(decls.pattern, address, + !subst(SHIFT, imm_eq1, decls.pattern)), + i16>; def : Pat<(sextloadi32 !foreach(decls.pattern, address, !subst(SHIFT, imm_eq2, decls.pattern))), @@ -5091,20 +5086,20 @@ multiclass regoff_pats<string Rm, dag address, dag Base, dag Offset, // using register-offset instructions. Essentially a base plus a possibly // extended, possibly shifted (by access size) offset. -defm : regoff_pats<"Wm", (add GPR64xsp:$Rn, (sext GPR32:$Rm)), - (i64 GPR64xsp:$Rn), (i32 GPR32:$Rm), (i64 6)>; +defm : regoff_pats<"Wm", (add i64:$Rn, (sext i32:$Rm)), + (i64 i64:$Rn), (i32 i32:$Rm), (i64 6)>; -defm : regoff_pats<"Wm", (add GPR64xsp:$Rn, (shl (sext GPR32:$Rm), SHIFT)), - (i64 GPR64xsp:$Rn), (i32 GPR32:$Rm), (i64 7)>; +defm : regoff_pats<"Wm", (add i64:$Rn, (shl (sext i32:$Rm), SHIFT)), + (i64 i64:$Rn), (i32 i32:$Rm), (i64 7)>; -defm : regoff_pats<"Wm", (add GPR64xsp:$Rn, (zext GPR32:$Rm)), - (i64 GPR64xsp:$Rn), (i32 GPR32:$Rm), (i64 2)>; +defm : regoff_pats<"Wm", (add i64:$Rn, (zext i32:$Rm)), + (i64 i64:$Rn), (i32 i32:$Rm), (i64 2)>; -defm : regoff_pats<"Wm", (add GPR64xsp:$Rn, (shl (zext GPR32:$Rm), SHIFT)), - (i64 GPR64xsp:$Rn), (i32 GPR32:$Rm), (i64 3)>; +defm : regoff_pats<"Wm", (add i64:$Rn, (shl (zext i32:$Rm), SHIFT)), + (i64 i64:$Rn), (i32 i32:$Rm), (i64 3)>; -defm : regoff_pats<"Xm", (add GPR64xsp:$Rn, GPR64:$Rm), - (i64 GPR64xsp:$Rn), (i64 GPR64:$Rm), (i64 2)>; +defm : regoff_pats<"Xm", (add i64:$Rn, i64:$Rm), + (i64 i64:$Rn), (i64 i64:$Rm), (i64 2)>; -defm : regoff_pats<"Xm", (add GPR64xsp:$Rn, (shl GPR64:$Rm, SHIFT)), - (i64 GPR64xsp:$Rn), (i64 GPR64:$Rm), (i64 3)>; +defm : regoff_pats<"Xm", (add i64:$Rn, (shl i64:$Rm, SHIFT)), + (i64 i64:$Rn), (i64 i64:$Rm), (i64 3)>; diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp index b83577a..3b811df 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp @@ -63,14 +63,15 @@ public: ~AArch64ELFStreamer() {} - virtual void ChangeSection(const MCSection *Section) { + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { // We have to keep track of the mapping symbol state of any sections we // use. Each one should start off as EMS_None, which is provided as the // default constructor by DenseMap::lookup. - LastMappingSymbols[getPreviousSection()] = LastEMS; + LastMappingSymbols[getPreviousSection().first] = LastEMS; LastEMS = LastMappingSymbols.lookup(Section); - MCELFStreamer::ChangeSection(Section); + MCELFStreamer::ChangeSection(Section, Subsection); } /// This function is the one used to emit instruction data into the ELF @@ -129,7 +130,7 @@ private: MCELF::SetType(SD, ELF::STT_NOTYPE); MCELF::SetBinding(SD, ELF::STB_LOCAL); SD.setExternal(false); - Symbol->setSection(*getCurrentSection()); + Symbol->setSection(*getCurrentSection().first); const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext()); Symbol->setVariableValue(Value); diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp index ab9bba1..bedccb5 100644 --- a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp +++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp @@ -194,7 +194,55 @@ const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = { {"rvbar_el3", RVBAR_EL3}, {"isr_el1", ISR_EL1}, {"cntpct_el0", CNTPCT_EL0}, - {"cntvct_el0", CNTVCT_EL0} + {"cntvct_el0", CNTVCT_EL0}, + + // Trace registers + {"trcstatr", TRCSTATR}, + {"trcidr8", TRCIDR8}, + {"trcidr9", TRCIDR9}, + {"trcidr10", TRCIDR10}, + {"trcidr11", TRCIDR11}, + {"trcidr12", TRCIDR12}, + {"trcidr13", TRCIDR13}, + {"trcidr0", TRCIDR0}, + {"trcidr1", TRCIDR1}, + {"trcidr2", TRCIDR2}, + {"trcidr3", TRCIDR3}, + {"trcidr4", TRCIDR4}, + {"trcidr5", TRCIDR5}, + {"trcidr6", TRCIDR6}, + {"trcidr7", TRCIDR7}, + {"trcoslsr", TRCOSLSR}, + {"trcpdsr", TRCPDSR}, + {"trcdevaff0", TRCDEVAFF0}, + {"trcdevaff1", TRCDEVAFF1}, + {"trclsr", TRCLSR}, + {"trcauthstatus", TRCAUTHSTATUS}, + {"trcdevarch", TRCDEVARCH}, + {"trcdevid", TRCDEVID}, + {"trcdevtype", TRCDEVTYPE}, + {"trcpidr4", TRCPIDR4}, + {"trcpidr5", TRCPIDR5}, + {"trcpidr6", TRCPIDR6}, + {"trcpidr7", TRCPIDR7}, + {"trcpidr0", TRCPIDR0}, + {"trcpidr1", TRCPIDR1}, + {"trcpidr2", TRCPIDR2}, + {"trcpidr3", TRCPIDR3}, + {"trccidr0", TRCCIDR0}, + {"trccidr1", TRCCIDR1}, + {"trccidr2", TRCCIDR2}, + {"trccidr3", TRCCIDR3}, + + // GICv3 registers + {"icc_iar1_el1", ICC_IAR1_EL1}, + {"icc_iar0_el1", ICC_IAR0_EL1}, + {"icc_hppir1_el1", ICC_HPPIR1_EL1}, + {"icc_hppir0_el1", ICC_HPPIR0_EL1}, + {"icc_rpr_el1", ICC_RPR_EL1}, + {"ich_vtr_el2", ICH_VTR_EL2}, + {"ich_eisr_el2", ICH_EISR_EL2}, + {"ich_elsr_el2", ICH_ELSR_EL2} }; A64SysReg::MRSMapper::MRSMapper() { @@ -205,7 +253,19 @@ A64SysReg::MRSMapper::MRSMapper() { const NamedImmMapper::Mapping A64SysReg::MSRMapper::MSRPairs[] = { {"dbgdtrtx_el0", DBGDTRTX_EL0}, {"oslar_el1", OSLAR_EL1}, - {"pmswinc_el0", PMSWINC_EL0} + {"pmswinc_el0", PMSWINC_EL0}, + + // Trace registers + {"trcoslar", TRCOSLAR}, + {"trclar", TRCLAR}, + + // GICv3 registers + {"icc_eoir1_el1", ICC_EOIR1_EL1}, + {"icc_eoir0_el1", ICC_EOIR0_EL1}, + {"icc_dir_el1", ICC_DIR_EL1}, + {"icc_sgi1r_el1", ICC_SGI1R_EL1}, + {"icc_asgi1r_el1", ICC_ASGI1R_EL1}, + {"icc_sgi0r_el1", ICC_SGI0R_EL1} }; A64SysReg::MSRMapper::MSRMapper() { @@ -467,6 +527,230 @@ const NamedImmMapper::Mapping A64SysReg::SysRegMapper::SysRegPairs[] = { {"pmevtyper28_el0", PMEVTYPER28_EL0}, {"pmevtyper29_el0", PMEVTYPER29_EL0}, {"pmevtyper30_el0", PMEVTYPER30_EL0}, + + // Trace registers + {"trcprgctlr", TRCPRGCTLR}, + {"trcprocselr", TRCPROCSELR}, + {"trcconfigr", TRCCONFIGR}, + {"trcauxctlr", TRCAUXCTLR}, + {"trceventctl0r", TRCEVENTCTL0R}, + {"trceventctl1r", TRCEVENTCTL1R}, + {"trcstallctlr", TRCSTALLCTLR}, + {"trctsctlr", TRCTSCTLR}, + {"trcsyncpr", TRCSYNCPR}, + {"trcccctlr", TRCCCCTLR}, + {"trcbbctlr", TRCBBCTLR}, + {"trctraceidr", TRCTRACEIDR}, + {"trcqctlr", TRCQCTLR}, + {"trcvictlr", TRCVICTLR}, + {"trcviiectlr", TRCVIIECTLR}, + {"trcvissctlr", TRCVISSCTLR}, + {"trcvipcssctlr", TRCVIPCSSCTLR}, + {"trcvdctlr", TRCVDCTLR}, + {"trcvdsacctlr", TRCVDSACCTLR}, + {"trcvdarcctlr", TRCVDARCCTLR}, + {"trcseqevr0", TRCSEQEVR0}, + {"trcseqevr1", TRCSEQEVR1}, + {"trcseqevr2", TRCSEQEVR2}, + {"trcseqrstevr", TRCSEQRSTEVR}, + {"trcseqstr", TRCSEQSTR}, + {"trcextinselr", TRCEXTINSELR}, + {"trccntrldvr0", TRCCNTRLDVR0}, + {"trccntrldvr1", TRCCNTRLDVR1}, + {"trccntrldvr2", TRCCNTRLDVR2}, + {"trccntrldvr3", TRCCNTRLDVR3}, + {"trccntctlr0", TRCCNTCTLR0}, + {"trccntctlr1", TRCCNTCTLR1}, + {"trccntctlr2", TRCCNTCTLR2}, + {"trccntctlr3", TRCCNTCTLR3}, + {"trccntvr0", TRCCNTVR0}, + {"trccntvr1", TRCCNTVR1}, + {"trccntvr2", TRCCNTVR2}, + {"trccntvr3", TRCCNTVR3}, + {"trcimspec0", TRCIMSPEC0}, + {"trcimspec1", TRCIMSPEC1}, + {"trcimspec2", TRCIMSPEC2}, + {"trcimspec3", TRCIMSPEC3}, + {"trcimspec4", TRCIMSPEC4}, + {"trcimspec5", TRCIMSPEC5}, + {"trcimspec6", TRCIMSPEC6}, + {"trcimspec7", TRCIMSPEC7}, + {"trcrsctlr2", TRCRSCTLR2}, + {"trcrsctlr3", TRCRSCTLR3}, + {"trcrsctlr4", TRCRSCTLR4}, + {"trcrsctlr5", TRCRSCTLR5}, + {"trcrsctlr6", TRCRSCTLR6}, + {"trcrsctlr7", TRCRSCTLR7}, + {"trcrsctlr8", TRCRSCTLR8}, + {"trcrsctlr9", TRCRSCTLR9}, + {"trcrsctlr10", TRCRSCTLR10}, + {"trcrsctlr11", TRCRSCTLR11}, + {"trcrsctlr12", TRCRSCTLR12}, + {"trcrsctlr13", TRCRSCTLR13}, + {"trcrsctlr14", TRCRSCTLR14}, + {"trcrsctlr15", TRCRSCTLR15}, + {"trcrsctlr16", TRCRSCTLR16}, + {"trcrsctlr17", TRCRSCTLR17}, + {"trcrsctlr18", TRCRSCTLR18}, + {"trcrsctlr19", TRCRSCTLR19}, + {"trcrsctlr20", TRCRSCTLR20}, + {"trcrsctlr21", TRCRSCTLR21}, + {"trcrsctlr22", TRCRSCTLR22}, + {"trcrsctlr23", TRCRSCTLR23}, + {"trcrsctlr24", TRCRSCTLR24}, + {"trcrsctlr25", TRCRSCTLR25}, + {"trcrsctlr26", TRCRSCTLR26}, + {"trcrsctlr27", TRCRSCTLR27}, + {"trcrsctlr28", TRCRSCTLR28}, + {"trcrsctlr29", TRCRSCTLR29}, + {"trcrsctlr30", TRCRSCTLR30}, + {"trcrsctlr31", TRCRSCTLR31}, + {"trcssccr0", TRCSSCCR0}, + {"trcssccr1", TRCSSCCR1}, + {"trcssccr2", TRCSSCCR2}, + {"trcssccr3", TRCSSCCR3}, + {"trcssccr4", TRCSSCCR4}, + {"trcssccr5", TRCSSCCR5}, + {"trcssccr6", TRCSSCCR6}, + {"trcssccr7", TRCSSCCR7}, + {"trcsscsr0", TRCSSCSR0}, + {"trcsscsr1", TRCSSCSR1}, + {"trcsscsr2", TRCSSCSR2}, + {"trcsscsr3", TRCSSCSR3}, + {"trcsscsr4", TRCSSCSR4}, + {"trcsscsr5", TRCSSCSR5}, + {"trcsscsr6", TRCSSCSR6}, + {"trcsscsr7", TRCSSCSR7}, + {"trcsspcicr0", TRCSSPCICR0}, + {"trcsspcicr1", TRCSSPCICR1}, + {"trcsspcicr2", TRCSSPCICR2}, + {"trcsspcicr3", TRCSSPCICR3}, + {"trcsspcicr4", TRCSSPCICR4}, + {"trcsspcicr5", TRCSSPCICR5}, + {"trcsspcicr6", TRCSSPCICR6}, + {"trcsspcicr7", TRCSSPCICR7}, + {"trcpdcr", TRCPDCR}, + {"trcacvr0", TRCACVR0}, + {"trcacvr1", TRCACVR1}, + {"trcacvr2", TRCACVR2}, + {"trcacvr3", TRCACVR3}, + {"trcacvr4", TRCACVR4}, + {"trcacvr5", TRCACVR5}, + {"trcacvr6", TRCACVR6}, + {"trcacvr7", TRCACVR7}, + {"trcacvr8", TRCACVR8}, + {"trcacvr9", TRCACVR9}, + {"trcacvr10", TRCACVR10}, + {"trcacvr11", TRCACVR11}, + {"trcacvr12", TRCACVR12}, + {"trcacvr13", TRCACVR13}, + {"trcacvr14", TRCACVR14}, + {"trcacvr15", TRCACVR15}, + {"trcacatr0", TRCACATR0}, + {"trcacatr1", TRCACATR1}, + {"trcacatr2", TRCACATR2}, + {"trcacatr3", TRCACATR3}, + {"trcacatr4", TRCACATR4}, + {"trcacatr5", TRCACATR5}, + {"trcacatr6", TRCACATR6}, + {"trcacatr7", TRCACATR7}, + {"trcacatr8", TRCACATR8}, + {"trcacatr9", TRCACATR9}, + {"trcacatr10", TRCACATR10}, + {"trcacatr11", TRCACATR11}, + {"trcacatr12", TRCACATR12}, + {"trcacatr13", TRCACATR13}, + {"trcacatr14", TRCACATR14}, + {"trcacatr15", TRCACATR15}, + {"trcdvcvr0", TRCDVCVR0}, + {"trcdvcvr1", TRCDVCVR1}, + {"trcdvcvr2", TRCDVCVR2}, + {"trcdvcvr3", TRCDVCVR3}, + {"trcdvcvr4", TRCDVCVR4}, + {"trcdvcvr5", TRCDVCVR5}, + {"trcdvcvr6", TRCDVCVR6}, + {"trcdvcvr7", TRCDVCVR7}, + {"trcdvcmr0", TRCDVCMR0}, + {"trcdvcmr1", TRCDVCMR1}, + {"trcdvcmr2", TRCDVCMR2}, + {"trcdvcmr3", TRCDVCMR3}, + {"trcdvcmr4", TRCDVCMR4}, + {"trcdvcmr5", TRCDVCMR5}, + {"trcdvcmr6", TRCDVCMR6}, + {"trcdvcmr7", TRCDVCMR7}, + {"trccidcvr0", TRCCIDCVR0}, + {"trccidcvr1", TRCCIDCVR1}, + {"trccidcvr2", TRCCIDCVR2}, + {"trccidcvr3", TRCCIDCVR3}, + {"trccidcvr4", TRCCIDCVR4}, + {"trccidcvr5", TRCCIDCVR5}, + {"trccidcvr6", TRCCIDCVR6}, + {"trccidcvr7", TRCCIDCVR7}, + {"trcvmidcvr0", TRCVMIDCVR0}, + {"trcvmidcvr1", TRCVMIDCVR1}, + {"trcvmidcvr2", TRCVMIDCVR2}, + {"trcvmidcvr3", TRCVMIDCVR3}, + {"trcvmidcvr4", TRCVMIDCVR4}, + {"trcvmidcvr5", TRCVMIDCVR5}, + {"trcvmidcvr6", TRCVMIDCVR6}, + {"trcvmidcvr7", TRCVMIDCVR7}, + {"trccidcctlr0", TRCCIDCCTLR0}, + {"trccidcctlr1", TRCCIDCCTLR1}, + {"trcvmidcctlr0", TRCVMIDCCTLR0}, + {"trcvmidcctlr1", TRCVMIDCCTLR1}, + {"trcitctrl", TRCITCTRL}, + {"trcclaimset", TRCCLAIMSET}, + {"trcclaimclr", TRCCLAIMCLR}, + + // GICv3 registers + {"icc_bpr1_el1", ICC_BPR1_EL1}, + {"icc_bpr0_el1", ICC_BPR0_EL1}, + {"icc_pmr_el1", ICC_PMR_EL1}, + {"icc_ctlr_el1", ICC_CTLR_EL1}, + {"icc_ctlr_el3", ICC_CTLR_EL3}, + {"icc_sre_el1", ICC_SRE_EL1}, + {"icc_sre_el2", ICC_SRE_EL2}, + {"icc_sre_el3", ICC_SRE_EL3}, + {"icc_igrpen0_el1", ICC_IGRPEN0_EL1}, + {"icc_igrpen1_el1", ICC_IGRPEN1_EL1}, + {"icc_igrpen1_el3", ICC_IGRPEN1_EL3}, + {"icc_seien_el1", ICC_SEIEN_EL1}, + {"icc_ap0r0_el1", ICC_AP0R0_EL1}, + {"icc_ap0r1_el1", ICC_AP0R1_EL1}, + {"icc_ap0r2_el1", ICC_AP0R2_EL1}, + {"icc_ap0r3_el1", ICC_AP0R3_EL1}, + {"icc_ap1r0_el1", ICC_AP1R0_EL1}, + {"icc_ap1r1_el1", ICC_AP1R1_EL1}, + {"icc_ap1r2_el1", ICC_AP1R2_EL1}, + {"icc_ap1r3_el1", ICC_AP1R3_EL1}, + {"ich_ap0r0_el2", ICH_AP0R0_EL2}, + {"ich_ap0r1_el2", ICH_AP0R1_EL2}, + {"ich_ap0r2_el2", ICH_AP0R2_EL2}, + {"ich_ap0r3_el2", ICH_AP0R3_EL2}, + {"ich_ap1r0_el2", ICH_AP1R0_EL2}, + {"ich_ap1r1_el2", ICH_AP1R1_EL2}, + {"ich_ap1r2_el2", ICH_AP1R2_EL2}, + {"ich_ap1r3_el2", ICH_AP1R3_EL2}, + {"ich_hcr_el2", ICH_HCR_EL2}, + {"ich_misr_el2", ICH_MISR_EL2}, + {"ich_vmcr_el2", ICH_VMCR_EL2}, + {"ich_vseir_el2", ICH_VSEIR_EL2}, + {"ich_lr0_el2", ICH_LR0_EL2}, + {"ich_lr1_el2", ICH_LR1_EL2}, + {"ich_lr2_el2", ICH_LR2_EL2}, + {"ich_lr3_el2", ICH_LR3_EL2}, + {"ich_lr4_el2", ICH_LR4_EL2}, + {"ich_lr5_el2", ICH_LR5_EL2}, + {"ich_lr6_el2", ICH_LR6_EL2}, + {"ich_lr7_el2", ICH_LR7_EL2}, + {"ich_lr8_el2", ICH_LR8_EL2}, + {"ich_lr9_el2", ICH_LR9_EL2}, + {"ich_lr10_el2", ICH_LR10_EL2}, + {"ich_lr11_el2", ICH_LR11_EL2}, + {"ich_lr12_el2", ICH_LR12_EL2}, + {"ich_lr13_el2", ICH_LR13_EL2}, + {"ich_lr14_el2", ICH_LR14_EL2}, + {"ich_lr15_el2", ICH_LR15_EL2} }; uint32_t @@ -697,8 +981,11 @@ bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) { Rotation = RepeatWidth - Rotation; } - uint64_t ReplicatedOnes = (ReplicatedMask >> Rotation) - | ((ReplicatedMask << (RepeatWidth - Rotation)) & RepeatMask); + uint64_t ReplicatedOnes = ReplicatedMask; + if (Rotation != 0 && Rotation != 64) + ReplicatedOnes = (ReplicatedMask >> Rotation) + | ((ReplicatedMask << (RepeatWidth - Rotation)) & RepeatMask); + // Of course, they may not actually be ones, so we have to check that: if (!isMask_64(ReplicatedOnes)) continue; @@ -767,13 +1054,14 @@ bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits, int Rotation = (ImmR & (Width - 1)); uint64_t Mask = (1ULL << Num1s) - 1; uint64_t WidthMask = Width == 64 ? -1 : (1ULL << Width) - 1; - Mask = (Mask >> Rotation) - | ((Mask << (Width - Rotation)) & WidthMask); + if (Rotation != 0 && Rotation != 64) + Mask = (Mask >> Rotation) + | ((Mask << (Width - Rotation)) & WidthMask); - Imm = 0; - for (unsigned i = 0; i < RegWidth / Width; ++i) { - Imm |= Mask; + Imm = Mask; + for (unsigned i = 1; i < RegWidth / Width; ++i) { Mask <<= Width; + Imm |= Mask; } return true; diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h index 5eebf44..1b773d6 100644 --- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -354,13 +354,73 @@ namespace A64SysReg { RVBAR_EL3 = 0xf601, // 11 110 1100 0000 001 ISR_EL1 = 0xc608, // 11 000 1100 0001 000 CNTPCT_EL0 = 0xdf01, // 11 011 1110 0000 001 - CNTVCT_EL0 = 0xdf02 // 11 011 1110 0000 010 + CNTVCT_EL0 = 0xdf02, // 11 011 1110 0000 010 + + // Trace registers + TRCSTATR = 0x8818, // 10 001 0000 0011 000 + TRCIDR8 = 0x8806, // 10 001 0000 0000 110 + TRCIDR9 = 0x880e, // 10 001 0000 0001 110 + TRCIDR10 = 0x8816, // 10 001 0000 0010 110 + TRCIDR11 = 0x881e, // 10 001 0000 0011 110 + TRCIDR12 = 0x8826, // 10 001 0000 0100 110 + TRCIDR13 = 0x882e, // 10 001 0000 0101 110 + TRCIDR0 = 0x8847, // 10 001 0000 1000 111 + TRCIDR1 = 0x884f, // 10 001 0000 1001 111 + TRCIDR2 = 0x8857, // 10 001 0000 1010 111 + TRCIDR3 = 0x885f, // 10 001 0000 1011 111 + TRCIDR4 = 0x8867, // 10 001 0000 1100 111 + TRCIDR5 = 0x886f, // 10 001 0000 1101 111 + TRCIDR6 = 0x8877, // 10 001 0000 1110 111 + TRCIDR7 = 0x887f, // 10 001 0000 1111 111 + TRCOSLSR = 0x888c, // 10 001 0001 0001 100 + TRCPDSR = 0x88ac, // 10 001 0001 0101 100 + TRCDEVAFF0 = 0x8bd6, // 10 001 0111 1010 110 + TRCDEVAFF1 = 0x8bde, // 10 001 0111 1011 110 + TRCLSR = 0x8bee, // 10 001 0111 1101 110 + TRCAUTHSTATUS = 0x8bf6, // 10 001 0111 1110 110 + TRCDEVARCH = 0x8bfe, // 10 001 0111 1111 110 + TRCDEVID = 0x8b97, // 10 001 0111 0010 111 + TRCDEVTYPE = 0x8b9f, // 10 001 0111 0011 111 + TRCPIDR4 = 0x8ba7, // 10 001 0111 0100 111 + TRCPIDR5 = 0x8baf, // 10 001 0111 0101 111 + TRCPIDR6 = 0x8bb7, // 10 001 0111 0110 111 + TRCPIDR7 = 0x8bbf, // 10 001 0111 0111 111 + TRCPIDR0 = 0x8bc7, // 10 001 0111 1000 111 + TRCPIDR1 = 0x8bcf, // 10 001 0111 1001 111 + TRCPIDR2 = 0x8bd7, // 10 001 0111 1010 111 + TRCPIDR3 = 0x8bdf, // 10 001 0111 1011 111 + TRCCIDR0 = 0x8be7, // 10 001 0111 1100 111 + TRCCIDR1 = 0x8bef, // 10 001 0111 1101 111 + TRCCIDR2 = 0x8bf7, // 10 001 0111 1110 111 + TRCCIDR3 = 0x8bff, // 10 001 0111 1111 111 + + // GICv3 registers + ICC_IAR1_EL1 = 0xc660, // 11 000 1100 1100 000 + ICC_IAR0_EL1 = 0xc640, // 11 000 1100 1000 000 + ICC_HPPIR1_EL1 = 0xc662, // 11 000 1100 1100 010 + ICC_HPPIR0_EL1 = 0xc642, // 11 000 1100 1000 010 + ICC_RPR_EL1 = 0xc65b, // 11 000 1100 1011 011 + ICH_VTR_EL2 = 0xe659, // 11 100 1100 1011 001 + ICH_EISR_EL2 = 0xe65b, // 11 100 1100 1011 011 + ICH_ELSR_EL2 = 0xe65d // 11 100 1100 1011 101 }; enum SysRegWOValues { DBGDTRTX_EL0 = 0x9828, // 10 011 0000 0101 000 OSLAR_EL1 = 0x8084, // 10 000 0001 0000 100 - PMSWINC_EL0 = 0xdce4 // 11 011 1001 1100 100 + PMSWINC_EL0 = 0xdce4, // 11 011 1001 1100 100 + + // Trace Registers + TRCOSLAR = 0x8884, // 10 001 0001 0000 100 + TRCLAR = 0x8be6, // 10 001 0111 1100 110 + + // GICv3 registers + ICC_EOIR1_EL1 = 0xc661, // 11 000 1100 1100 001 + ICC_EOIR0_EL1 = 0xc641, // 11 000 1100 1000 001 + ICC_DIR_EL1 = 0xc659, // 11 000 1100 1011 001 + ICC_SGI1R_EL1 = 0xc65d, // 11 000 1100 1011 101 + ICC_ASGI1R_EL1 = 0xc65e, // 11 000 1100 1011 110 + ICC_SGI0R_EL1 = 0xc65f // 11 000 1100 1011 111 }; enum SysRegValues { @@ -616,7 +676,231 @@ namespace A64SysReg { PMEVTYPER27_EL0 = 0xdf7b, // 11 011 1110 1111 011 PMEVTYPER28_EL0 = 0xdf7c, // 11 011 1110 1111 100 PMEVTYPER29_EL0 = 0xdf7d, // 11 011 1110 1111 101 - PMEVTYPER30_EL0 = 0xdf7e // 11 011 1110 1111 110 + PMEVTYPER30_EL0 = 0xdf7e, // 11 011 1110 1111 110 + + // Trace registers + TRCPRGCTLR = 0x8808, // 10 001 0000 0001 000 + TRCPROCSELR = 0x8810, // 10 001 0000 0010 000 + TRCCONFIGR = 0x8820, // 10 001 0000 0100 000 + TRCAUXCTLR = 0x8830, // 10 001 0000 0110 000 + TRCEVENTCTL0R = 0x8840, // 10 001 0000 1000 000 + TRCEVENTCTL1R = 0x8848, // 10 001 0000 1001 000 + TRCSTALLCTLR = 0x8858, // 10 001 0000 1011 000 + TRCTSCTLR = 0x8860, // 10 001 0000 1100 000 + TRCSYNCPR = 0x8868, // 10 001 0000 1101 000 + TRCCCCTLR = 0x8870, // 10 001 0000 1110 000 + TRCBBCTLR = 0x8878, // 10 001 0000 1111 000 + TRCTRACEIDR = 0x8801, // 10 001 0000 0000 001 + TRCQCTLR = 0x8809, // 10 001 0000 0001 001 + TRCVICTLR = 0x8802, // 10 001 0000 0000 010 + TRCVIIECTLR = 0x880a, // 10 001 0000 0001 010 + TRCVISSCTLR = 0x8812, // 10 001 0000 0010 010 + TRCVIPCSSCTLR = 0x881a, // 10 001 0000 0011 010 + TRCVDCTLR = 0x8842, // 10 001 0000 1000 010 + TRCVDSACCTLR = 0x884a, // 10 001 0000 1001 010 + TRCVDARCCTLR = 0x8852, // 10 001 0000 1010 010 + TRCSEQEVR0 = 0x8804, // 10 001 0000 0000 100 + TRCSEQEVR1 = 0x880c, // 10 001 0000 0001 100 + TRCSEQEVR2 = 0x8814, // 10 001 0000 0010 100 + TRCSEQRSTEVR = 0x8834, // 10 001 0000 0110 100 + TRCSEQSTR = 0x883c, // 10 001 0000 0111 100 + TRCEXTINSELR = 0x8844, // 10 001 0000 1000 100 + TRCCNTRLDVR0 = 0x8805, // 10 001 0000 0000 101 + TRCCNTRLDVR1 = 0x880d, // 10 001 0000 0001 101 + TRCCNTRLDVR2 = 0x8815, // 10 001 0000 0010 101 + TRCCNTRLDVR3 = 0x881d, // 10 001 0000 0011 101 + TRCCNTCTLR0 = 0x8825, // 10 001 0000 0100 101 + TRCCNTCTLR1 = 0x882d, // 10 001 0000 0101 101 + TRCCNTCTLR2 = 0x8835, // 10 001 0000 0110 101 + TRCCNTCTLR3 = 0x883d, // 10 001 0000 0111 101 + TRCCNTVR0 = 0x8845, // 10 001 0000 1000 101 + TRCCNTVR1 = 0x884d, // 10 001 0000 1001 101 + TRCCNTVR2 = 0x8855, // 10 001 0000 1010 101 + TRCCNTVR3 = 0x885d, // 10 001 0000 1011 101 + TRCIMSPEC0 = 0x8807, // 10 001 0000 0000 111 + TRCIMSPEC1 = 0x880f, // 10 001 0000 0001 111 + TRCIMSPEC2 = 0x8817, // 10 001 0000 0010 111 + TRCIMSPEC3 = 0x881f, // 10 001 0000 0011 111 + TRCIMSPEC4 = 0x8827, // 10 001 0000 0100 111 + TRCIMSPEC5 = 0x882f, // 10 001 0000 0101 111 + TRCIMSPEC6 = 0x8837, // 10 001 0000 0110 111 + TRCIMSPEC7 = 0x883f, // 10 001 0000 0111 111 + TRCRSCTLR2 = 0x8890, // 10 001 0001 0010 000 + TRCRSCTLR3 = 0x8898, // 10 001 0001 0011 000 + TRCRSCTLR4 = 0x88a0, // 10 001 0001 0100 000 + TRCRSCTLR5 = 0x88a8, // 10 001 0001 0101 000 + TRCRSCTLR6 = 0x88b0, // 10 001 0001 0110 000 + TRCRSCTLR7 = 0x88b8, // 10 001 0001 0111 000 + TRCRSCTLR8 = 0x88c0, // 10 001 0001 1000 000 + TRCRSCTLR9 = 0x88c8, // 10 001 0001 1001 000 + TRCRSCTLR10 = 0x88d0, // 10 001 0001 1010 000 + TRCRSCTLR11 = 0x88d8, // 10 001 0001 1011 000 + TRCRSCTLR12 = 0x88e0, // 10 001 0001 1100 000 + TRCRSCTLR13 = 0x88e8, // 10 001 0001 1101 000 + TRCRSCTLR14 = 0x88f0, // 10 001 0001 1110 000 + TRCRSCTLR15 = 0x88f8, // 10 001 0001 1111 000 + TRCRSCTLR16 = 0x8881, // 10 001 0001 0000 001 + TRCRSCTLR17 = 0x8889, // 10 001 0001 0001 001 + TRCRSCTLR18 = 0x8891, // 10 001 0001 0010 001 + TRCRSCTLR19 = 0x8899, // 10 001 0001 0011 001 + TRCRSCTLR20 = 0x88a1, // 10 001 0001 0100 001 + TRCRSCTLR21 = 0x88a9, // 10 001 0001 0101 001 + TRCRSCTLR22 = 0x88b1, // 10 001 0001 0110 001 + TRCRSCTLR23 = 0x88b9, // 10 001 0001 0111 001 + TRCRSCTLR24 = 0x88c1, // 10 001 0001 1000 001 + TRCRSCTLR25 = 0x88c9, // 10 001 0001 1001 001 + TRCRSCTLR26 = 0x88d1, // 10 001 0001 1010 001 + TRCRSCTLR27 = 0x88d9, // 10 001 0001 1011 001 + TRCRSCTLR28 = 0x88e1, // 10 001 0001 1100 001 + TRCRSCTLR29 = 0x88e9, // 10 001 0001 1101 001 + TRCRSCTLR30 = 0x88f1, // 10 001 0001 1110 001 + TRCRSCTLR31 = 0x88f9, // 10 001 0001 1111 001 + TRCSSCCR0 = 0x8882, // 10 001 0001 0000 010 + TRCSSCCR1 = 0x888a, // 10 001 0001 0001 010 + TRCSSCCR2 = 0x8892, // 10 001 0001 0010 010 + TRCSSCCR3 = 0x889a, // 10 001 0001 0011 010 + TRCSSCCR4 = 0x88a2, // 10 001 0001 0100 010 + TRCSSCCR5 = 0x88aa, // 10 001 0001 0101 010 + TRCSSCCR6 = 0x88b2, // 10 001 0001 0110 010 + TRCSSCCR7 = 0x88ba, // 10 001 0001 0111 010 + TRCSSCSR0 = 0x88c2, // 10 001 0001 1000 010 + TRCSSCSR1 = 0x88ca, // 10 001 0001 1001 010 + TRCSSCSR2 = 0x88d2, // 10 001 0001 1010 010 + TRCSSCSR3 = 0x88da, // 10 001 0001 1011 010 + TRCSSCSR4 = 0x88e2, // 10 001 0001 1100 010 + TRCSSCSR5 = 0x88ea, // 10 001 0001 1101 010 + TRCSSCSR6 = 0x88f2, // 10 001 0001 1110 010 + TRCSSCSR7 = 0x88fa, // 10 001 0001 1111 010 + TRCSSPCICR0 = 0x8883, // 10 001 0001 0000 011 + TRCSSPCICR1 = 0x888b, // 10 001 0001 0001 011 + TRCSSPCICR2 = 0x8893, // 10 001 0001 0010 011 + TRCSSPCICR3 = 0x889b, // 10 001 0001 0011 011 + TRCSSPCICR4 = 0x88a3, // 10 001 0001 0100 011 + TRCSSPCICR5 = 0x88ab, // 10 001 0001 0101 011 + TRCSSPCICR6 = 0x88b3, // 10 001 0001 0110 011 + TRCSSPCICR7 = 0x88bb, // 10 001 0001 0111 011 + TRCPDCR = 0x88a4, // 10 001 0001 0100 100 + TRCACVR0 = 0x8900, // 10 001 0010 0000 000 + TRCACVR1 = 0x8910, // 10 001 0010 0010 000 + TRCACVR2 = 0x8920, // 10 001 0010 0100 000 + TRCACVR3 = 0x8930, // 10 001 0010 0110 000 + TRCACVR4 = 0x8940, // 10 001 0010 1000 000 + TRCACVR5 = 0x8950, // 10 001 0010 1010 000 + TRCACVR6 = 0x8960, // 10 001 0010 1100 000 + TRCACVR7 = 0x8970, // 10 001 0010 1110 000 + TRCACVR8 = 0x8901, // 10 001 0010 0000 001 + TRCACVR9 = 0x8911, // 10 001 0010 0010 001 + TRCACVR10 = 0x8921, // 10 001 0010 0100 001 + TRCACVR11 = 0x8931, // 10 001 0010 0110 001 + TRCACVR12 = 0x8941, // 10 001 0010 1000 001 + TRCACVR13 = 0x8951, // 10 001 0010 1010 001 + TRCACVR14 = 0x8961, // 10 001 0010 1100 001 + TRCACVR15 = 0x8971, // 10 001 0010 1110 001 + TRCACATR0 = 0x8902, // 10 001 0010 0000 010 + TRCACATR1 = 0x8912, // 10 001 0010 0010 010 + TRCACATR2 = 0x8922, // 10 001 0010 0100 010 + TRCACATR3 = 0x8932, // 10 001 0010 0110 010 + TRCACATR4 = 0x8942, // 10 001 0010 1000 010 + TRCACATR5 = 0x8952, // 10 001 0010 1010 010 + TRCACATR6 = 0x8962, // 10 001 0010 1100 010 + TRCACATR7 = 0x8972, // 10 001 0010 1110 010 + TRCACATR8 = 0x8903, // 10 001 0010 0000 011 + TRCACATR9 = 0x8913, // 10 001 0010 0010 011 + TRCACATR10 = 0x8923, // 10 001 0010 0100 011 + TRCACATR11 = 0x8933, // 10 001 0010 0110 011 + TRCACATR12 = 0x8943, // 10 001 0010 1000 011 + TRCACATR13 = 0x8953, // 10 001 0010 1010 011 + TRCACATR14 = 0x8963, // 10 001 0010 1100 011 + TRCACATR15 = 0x8973, // 10 001 0010 1110 011 + TRCDVCVR0 = 0x8904, // 10 001 0010 0000 100 + TRCDVCVR1 = 0x8924, // 10 001 0010 0100 100 + TRCDVCVR2 = 0x8944, // 10 001 0010 1000 100 + TRCDVCVR3 = 0x8964, // 10 001 0010 1100 100 + TRCDVCVR4 = 0x8905, // 10 001 0010 0000 101 + TRCDVCVR5 = 0x8925, // 10 001 0010 0100 101 + TRCDVCVR6 = 0x8945, // 10 001 0010 1000 101 + TRCDVCVR7 = 0x8965, // 10 001 0010 1100 101 + TRCDVCMR0 = 0x8906, // 10 001 0010 0000 110 + TRCDVCMR1 = 0x8926, // 10 001 0010 0100 110 + TRCDVCMR2 = 0x8946, // 10 001 0010 1000 110 + TRCDVCMR3 = 0x8966, // 10 001 0010 1100 110 + TRCDVCMR4 = 0x8907, // 10 001 0010 0000 111 + TRCDVCMR5 = 0x8927, // 10 001 0010 0100 111 + TRCDVCMR6 = 0x8947, // 10 001 0010 1000 111 + TRCDVCMR7 = 0x8967, // 10 001 0010 1100 111 + TRCCIDCVR0 = 0x8980, // 10 001 0011 0000 000 + TRCCIDCVR1 = 0x8990, // 10 001 0011 0010 000 + TRCCIDCVR2 = 0x89a0, // 10 001 0011 0100 000 + TRCCIDCVR3 = 0x89b0, // 10 001 0011 0110 000 + TRCCIDCVR4 = 0x89c0, // 10 001 0011 1000 000 + TRCCIDCVR5 = 0x89d0, // 10 001 0011 1010 000 + TRCCIDCVR6 = 0x89e0, // 10 001 0011 1100 000 + TRCCIDCVR7 = 0x89f0, // 10 001 0011 1110 000 + TRCVMIDCVR0 = 0x8981, // 10 001 0011 0000 001 + TRCVMIDCVR1 = 0x8991, // 10 001 0011 0010 001 + TRCVMIDCVR2 = 0x89a1, // 10 001 0011 0100 001 + TRCVMIDCVR3 = 0x89b1, // 10 001 0011 0110 001 + TRCVMIDCVR4 = 0x89c1, // 10 001 0011 1000 001 + TRCVMIDCVR5 = 0x89d1, // 10 001 0011 1010 001 + TRCVMIDCVR6 = 0x89e1, // 10 001 0011 1100 001 + TRCVMIDCVR7 = 0x89f1, // 10 001 0011 1110 001 + TRCCIDCCTLR0 = 0x8982, // 10 001 0011 0000 010 + TRCCIDCCTLR1 = 0x898a, // 10 001 0011 0001 010 + TRCVMIDCCTLR0 = 0x8992, // 10 001 0011 0010 010 + TRCVMIDCCTLR1 = 0x899a, // 10 001 0011 0011 010 + TRCITCTRL = 0x8b84, // 10 001 0111 0000 100 + TRCCLAIMSET = 0x8bc6, // 10 001 0111 1000 110 + TRCCLAIMCLR = 0x8bce, // 10 001 0111 1001 110 + + // GICv3 registers + ICC_BPR1_EL1 = 0xc663, // 11 000 1100 1100 011 + ICC_BPR0_EL1 = 0xc643, // 11 000 1100 1000 011 + ICC_PMR_EL1 = 0xc230, // 11 000 0100 0110 000 + ICC_CTLR_EL1 = 0xc664, // 11 000 1100 1100 100 + ICC_CTLR_EL3 = 0xf664, // 11 110 1100 1100 100 + ICC_SRE_EL1 = 0xc665, // 11 000 1100 1100 101 + ICC_SRE_EL2 = 0xe64d, // 11 100 1100 1001 101 + ICC_SRE_EL3 = 0xf665, // 11 110 1100 1100 101 + ICC_IGRPEN0_EL1 = 0xc666, // 11 000 1100 1100 110 + ICC_IGRPEN1_EL1 = 0xc667, // 11 000 1100 1100 111 + ICC_IGRPEN1_EL3 = 0xf667, // 11 110 1100 1100 111 + ICC_SEIEN_EL1 = 0xc668, // 11 000 1100 1101 000 + ICC_AP0R0_EL1 = 0xc644, // 11 000 1100 1000 100 + ICC_AP0R1_EL1 = 0xc645, // 11 000 1100 1000 101 + ICC_AP0R2_EL1 = 0xc646, // 11 000 1100 1000 110 + ICC_AP0R3_EL1 = 0xc647, // 11 000 1100 1000 111 + ICC_AP1R0_EL1 = 0xc648, // 11 000 1100 1001 000 + ICC_AP1R1_EL1 = 0xc649, // 11 000 1100 1001 001 + ICC_AP1R2_EL1 = 0xc64a, // 11 000 1100 1001 010 + ICC_AP1R3_EL1 = 0xc64b, // 11 000 1100 1001 011 + ICH_AP0R0_EL2 = 0xe640, // 11 100 1100 1000 000 + ICH_AP0R1_EL2 = 0xe641, // 11 100 1100 1000 001 + ICH_AP0R2_EL2 = 0xe642, // 11 100 1100 1000 010 + ICH_AP0R3_EL2 = 0xe643, // 11 100 1100 1000 011 + ICH_AP1R0_EL2 = 0xe648, // 11 100 1100 1001 000 + ICH_AP1R1_EL2 = 0xe649, // 11 100 1100 1001 001 + ICH_AP1R2_EL2 = 0xe64a, // 11 100 1100 1001 010 + ICH_AP1R3_EL2 = 0xe64b, // 11 100 1100 1001 011 + ICH_HCR_EL2 = 0xe658, // 11 100 1100 1011 000 + ICH_MISR_EL2 = 0xe65a, // 11 100 1100 1011 010 + ICH_VMCR_EL2 = 0xe65f, // 11 100 1100 1011 111 + ICH_VSEIR_EL2 = 0xe64c, // 11 100 1100 1001 100 + ICH_LR0_EL2 = 0xe660, // 11 100 1100 1100 000 + ICH_LR1_EL2 = 0xe661, // 11 100 1100 1100 001 + ICH_LR2_EL2 = 0xe662, // 11 100 1100 1100 010 + ICH_LR3_EL2 = 0xe663, // 11 100 1100 1100 011 + ICH_LR4_EL2 = 0xe664, // 11 100 1100 1100 100 + ICH_LR5_EL2 = 0xe665, // 11 100 1100 1100 101 + ICH_LR6_EL2 = 0xe666, // 11 100 1100 1100 110 + ICH_LR7_EL2 = 0xe667, // 11 100 1100 1100 111 + ICH_LR8_EL2 = 0xe668, // 11 100 1100 1101 000 + ICH_LR9_EL2 = 0xe669, // 11 100 1100 1101 001 + ICH_LR10_EL2 = 0xe66a, // 11 100 1100 1101 010 + ICH_LR11_EL2 = 0xe66b, // 11 100 1100 1101 011 + ICH_LR12_EL2 = 0xe66c, // 11 100 1100 1101 100 + ICH_LR13_EL2 = 0xe66d, // 11 100 1100 1101 101 + ICH_LR14_EL2 = 0xe66e, // 11 100 1100 1101 110 + ICH_LR15_EL2 = 0xe66f // 11 100 1100 1101 111 }; // Note that these do not inherit from NamedImmMapper. This class is diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 46915ee..2d747091 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -59,6 +59,8 @@ def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true", "FP compare + branch is slow">; def FeatureVFPOnlySP : SubtargetFeature<"fp-only-sp", "FPOnlySP", "true", "Floating point unit supports single precision only">; +def FeatureTrustZone : SubtargetFeature<"trustzone", "HasTrustZone", "true", + "Enable support for TrustZone security extensions">; // Some processors have FP multiply-accumulate instructions that don't // play nicely with other VFP / NEON instructions, and it's generally better @@ -143,32 +145,34 @@ include "ARMSchedule.td" // ARM processor families. def ProcA5 : SubtargetFeature<"a5", "ARMProcFamily", "CortexA5", "Cortex-A5 ARM processors", - [FeatureSlowFPBrcc, FeatureNEONForFP, - FeatureHasSlowFPVMLx, FeatureVMLxForwarding, - FeatureT2XtPk]>; + [FeatureSlowFPBrcc, FeatureHasSlowFPVMLx, + FeatureVMLxForwarding, FeatureT2XtPk, + FeatureTrustZone]>; def ProcA8 : SubtargetFeature<"a8", "ARMProcFamily", "CortexA8", "Cortex-A8 ARM processors", - [FeatureSlowFPBrcc, FeatureNEONForFP, - FeatureHasSlowFPVMLx, FeatureVMLxForwarding, - FeatureT2XtPk]>; + [FeatureSlowFPBrcc, FeatureHasSlowFPVMLx, + FeatureVMLxForwarding, FeatureT2XtPk, + FeatureTrustZone]>; def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9", "Cortex-A9 ARM processors", [FeatureVMLxForwarding, FeatureT2XtPk, FeatureFP16, - FeatureAvoidPartialCPSR]>; + FeatureAvoidPartialCPSR, + FeatureTrustZone]>; def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift", "Swift ARM processors", [FeatureNEONForFP, FeatureT2XtPk, FeatureVFP4, FeatureMP, FeatureHWDiv, FeatureHWDivARM, FeatureAvoidPartialCPSR, FeatureAvoidMOVsShOp, - FeatureHasSlowFPVMLx]>; + FeatureHasSlowFPVMLx, FeatureTrustZone]>; // FIXME: It has not been determined if A15 has these features. def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15", "Cortex-A15 ARM processors", [FeatureT2XtPk, FeatureFP16, - FeatureAvoidPartialCPSR]>; + FeatureAvoidPartialCPSR, + FeatureTrustZone]>; def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5", "Cortex-R5 ARM processors", [FeatureSlowFPBrcc, FeatureHWDivARM, diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index ed8b9cd..0d1417d 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -747,10 +747,10 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Mov->addRegisterKilled(SrcReg, TRI); } -static const -MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, - unsigned Reg, unsigned SubIdx, unsigned State, - const TargetRegisterInfo *TRI) { +const MachineInstrBuilder & +ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, + unsigned SubIdx, unsigned State, + const TargetRegisterInfo *TRI) const { if (!SubIdx) return MIB.addReg(Reg, State); @@ -795,12 +795,22 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA)) - .addFrameIndex(FI)) - .addMemOperand(MMO); - MIB = AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); - AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); + if (Subtarget.hasV5TEOps()) { + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::STRD)); + AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); + AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); + MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); + + AddDefaultPred(MIB); + } else { + // Fallback to STM instruction, which has existed since the dawn of + // time. + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA)) + .addFrameIndex(FI).addMemOperand(MMO)); + AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); + AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); + } } else llvm_unreachable("Unknown reg class!"); break; @@ -948,7 +958,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineFunction &MF = *MBB.getParent(); - ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); MachineFrameInfo &MFI = *MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FI); MachineMemOperand *MMO = @@ -975,12 +984,24 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { - unsigned LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA : ARM::LDMIA; - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(LdmOpc)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); - MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); - MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); + MachineInstrBuilder MIB; + + if (Subtarget.hasV5TEOps()) { + MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); + AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); + AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); + MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO); + + AddDefaultPred(MIB); + } else { + // Fallback to LDM instruction, which has existed since the dawn of + // time. + MIB = AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDMIA)) + .addFrameIndex(FI).addMemOperand(MMO)); + MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); + MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); + } + if (TargetRegisterInfo::isPhysicalRegister(DestReg)) MIB.addReg(DestReg, RegState::ImplicitDefine); } else @@ -3734,9 +3755,9 @@ ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); - // A9-like cores are particularly picky about mixing the two and want these + // CortexA9 is particularly picky about mixing the two and wants these // converted. - if (Subtarget.isLikeA9() && !isPredicated(MI) && + if (Subtarget.isCortexA9() && !isPredicated(MI) && (MI->getOpcode() == ARM::VMOVRS || MI->getOpcode() == ARM::VMOVSR || MI->getOpcode() == ARM::VMOVS)) @@ -4023,14 +4044,12 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. // // FCONSTD can be used as a dependency-breaking instruction. - - unsigned ARMBaseInstrInfo:: getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { - // Only Swift has partial register update problems. - if (!SwiftPartialUpdateClearance || !Subtarget.isSwift()) + if (!SwiftPartialUpdateClearance || + !(Subtarget.isSwift() || Subtarget.isCortexA15())) return 0; assert(TRI && "Need TRI instance"); @@ -4056,7 +4075,7 @@ getPartialRegUpdateClearance(const MachineInstr *MI, // Explicitly reads the dependency. case ARM::VLD1LNd32: - UseOp = 1; + UseOp = 3; break; default: return 0; @@ -4125,3 +4144,15 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, bool ARMBaseInstrInfo::hasNOP() const { return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; } + +bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { + unsigned ShOpVal = MI->getOperand(3).getImm(); + unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); + // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. + if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || + ((ShImm == 1 || ShImm == 2) && + ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) + return true; + + return false; +} diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 2698132..2ef659c 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -141,6 +141,10 @@ public: MachineInstr *commuteInstruction(MachineInstr*, bool=false) const; + const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg, + unsigned SubIdx, unsigned State, + const TargetRegisterInfo *TRI) const; + virtual bool produceSameValue(const MachineInstr *MI0, const MachineInstr *MI1, const MachineRegisterInfo *MRI) const; @@ -314,6 +318,10 @@ public: bool canCauseFpMLxStall(unsigned Opcode) const { return MLxHazardOpcodes.count(Opcode); } + + /// Returns true if the instruction has a shift by immediate that can be + /// executed in one cycle less. + bool isSwiftFastImmShift(const MachineInstr *MI) const; }; static inline diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index abdd251..b0d34a7 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -75,6 +75,12 @@ ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const { } const uint32_t* +ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const { + return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) + ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask; +} + +const uint32_t* ARMBaseRegisterInfo::getNoPreservedMask() const { return CSR_NoRegs_RegMask; } @@ -680,7 +686,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // means the stack pointer cannot be used to access the emergency spill slot // when !hasReservedCallFrame(). #ifndef NDEBUG - if (RS && FrameReg == ARM::SP && FrameIndex == RS->getScavengingFrameIndex()){ + if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ assert(TFI->hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in " "functions without a reserved call frame"); diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h index 725033b..0679919 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -96,6 +96,7 @@ public: /// Code Generation virtual methods... const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const; const uint32_t *getCallPreservedMask(CallingConv::ID) const; + const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const; const uint32_t *getNoPreservedMask() const; BitVector getReservedRegs(const MachineFunction &MF) const; diff --git a/lib/Target/ARM/ARMCallingConv.h b/lib/Target/ARM/ARMCallingConv.h index e6e8c3d..4f94ad2 100644 --- a/lib/Target/ARM/ARMCallingConv.h +++ b/lib/Target/ARM/ARMCallingConv.h @@ -74,9 +74,15 @@ static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, static const uint16_t HiRegList[] = { ARM::R0, ARM::R2 }; static const uint16_t LoRegList[] = { ARM::R1, ARM::R3 }; static const uint16_t ShadowRegList[] = { ARM::R0, ARM::R1 }; + static const uint16_t GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2); if (Reg == 0) { + + // If we had R3 unallocated only, now we still must to waste it. + Reg = State.AllocateReg(GPRArgRegs, 4); + assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64"); + // For the 2nd half of a v2f64, do not just fail. if (CanFail) return false; diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td index b378b96..8ff666e 100644 --- a/lib/Target/ARM/ARMCallingConv.td +++ b/lib/Target/ARM/ARMCallingConv.td @@ -111,8 +111,7 @@ def CC_ARM_AAPCS_Common : CallingConv<[ // i64 is 8-aligned i32 here, so we may need to eat R1 as a pad register // (and the same is true for f64 if VFP is not enabled) CCIfType<[i32], CCIfAlign<"8", CCAssignToRegWithShadow<[R0, R2], [R0, R1]>>>, - CCIfType<[i32], CCIf<"State.getNextStackOffset() == 0 &&" - "ArgFlags.getOrigAlign() != 8", + CCIfType<[i32], CCIf<"ArgFlags.getOrigAlign() != 8", CCAssignToReg<[R0, R1, R2, R3]>>>, CCIfType<[i32], CCIfAlign<"8", CCAssignToStackWithShadow<4, 8, R3>>>, @@ -195,10 +194,21 @@ def CSR_NoRegs : CalleeSavedRegs<(add)>; def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4, (sequence "D%u", 15, 8))>; +// Constructors and destructors return 'this' in the ARM C++ ABI; since 'this' +// and the pointer return value are both passed in R0 in these cases, this can +// be partially modelled by treating R0 as a callee-saved register +// Only the resulting RegMask is used; the SaveList is ignored +def CSR_AAPCS_ThisReturn : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, + R5, R4, (sequence "D%u", 15, 8), + R0)>; + // iOS ABI deviates from ARM standard ABI. R9 is not a callee-saved register. // Also save R7-R4 first to match the stack frame fixed spill areas. def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>; +def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4, + (sub CSR_AAPCS_ThisReturn, R9))>; + // GHC set of callee saved regs is empty as all those regs are // used for passing STG regs around // add is a workaround for not being able to compile empty list: diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 29fcd40..5d45f64 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -144,8 +144,8 @@ class ARMFastISel : public FastISel { virtual bool TargetSelectInstruction(const Instruction *I); virtual unsigned TargetMaterializeConstant(const Constant *C); virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); - virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, - const LoadInst *LI); + virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI); virtual bool FastLowerArguments(); private: #include "ARMGenFastISel.inc" @@ -2605,7 +2605,7 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned Opc; bool isBoolZext = false; - const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32); + const TargetRegisterClass *RC; switch (SrcVT.SimpleTy) { default: return 0; case MVT::i16: @@ -2797,12 +2797,12 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { return false; } -/// TryToFoldLoad - The specified machine instr operand is a vreg, and that +/// \brief The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, /// try to fold the load as an operand to the instruction, returning true if /// successful. -bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, - const LoadInst *LI) { +bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) { // Verify we have a legal type before going any further. MVT VT; if (!isLoadTypeLegal(LI->getType(), VT)) diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 3b12408..483802b 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -141,7 +141,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { assert(!AFI->isThumb1OnlyFunction() && "This emitPrologue does not support Thumb1!"); bool isARM = !AFI->isThumbFunction(); - unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); + unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); unsigned NumBytes = MFI->getStackSize(); const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); @@ -159,8 +159,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { return; // Allocate the vararg register save area. This is not counted in NumBytes. - if (VARegSaveSize) - emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize, + if (ArgRegsSaveSize) + emitSPUpdate(isARM, MBB, MBBI, dl, TII, -ArgRegsSaveSize, MachineInstr::FrameSetup); if (!AFI->hasStackFrame()) { @@ -357,7 +357,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, "This emitEpilogue does not support Thumb1!"); bool isARM = !AFI->isThumbFunction(); - unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); + unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); int NumBytes = (int)MFI->getStackSize(); unsigned FramePtr = RegInfo->getFrameRegister(MF); @@ -471,8 +471,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, MBBI = NewMI; } - if (VARegSaveSize) - emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize); + if (ArgRegsSaveSize) + emitSPUpdate(isARM, MBB, MBBI, dl, TII, ArgRegsSaveSize); } /// getFrameIndexReference - Provide a base+offset reference to an FI slot for @@ -1003,7 +1003,7 @@ bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineFunction &MF = *MBB.getParent(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); - bool isVarArg = AFI->getVarArgsRegSaveSize() > 0; + bool isVarArg = AFI->getArgRegsSaveSize() > 0; unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs(); // The emitPopInst calls below do not insert reloads for the aligned DPRCS2 @@ -1174,7 +1174,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, if (AFI->isThumb1OnlyFunction()) { // Spill LR if Thumb1 function uses variable length argument lists. - if (AFI->getVarArgsRegSaveSize() > 0) + if (AFI->getArgRegsSaveSize() > 0) MRI.setPhysRegUsed(ARM::LR); // Spill R4 if Thumb1 epilogue has to restore SP from FP. We don't know @@ -1368,7 +1368,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // note: Thumb1 functions spill to R12, not the stack. Reserve a slot // closest to SP or frame pointer. const TargetRegisterClass *RC = &ARM::GPRRegClass; - RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); } diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 2c51de2..9e1782e 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1469,14 +1469,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { SDValue Ops[]= { Base, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, - MVT::i32, MVT::Other, Ops, 5); + MVT::i32, MVT::Other, Ops); } else { SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, - MVT::i32, MVT::Other, Ops, 6); + MVT::i32, MVT::Other, Ops); } } @@ -1525,7 +1525,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { SDValue Ops[]= { Base, Offset, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, - MVT::Other, Ops, 5); + MVT::Other, Ops); } return NULL; @@ -1539,7 +1539,7 @@ SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form a D register from a pair of S registers. @@ -1550,7 +1550,7 @@ SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form a quad register from a pair of D registers. @@ -1560,7 +1560,7 @@ SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form 4 consecutive D registers from a pair of Q registers. @@ -1570,7 +1570,7 @@ SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form 4 consecutive S registers. @@ -1585,7 +1585,7 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form 4 consecutive D registers. @@ -1599,7 +1599,7 @@ SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// \brief Form 4 consecutive Q registers. @@ -1613,7 +1613,7 @@ SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32); const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand @@ -1761,7 +1761,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, Ops.push_back(Pred); Ops.push_back(Reg0); Ops.push_back(Chain); - VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); + VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); } else { // Otherwise, quad registers are loaded with two separate instructions, @@ -1774,7 +1774,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0); const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain }; SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, - ResTy, AddrTy, MVT::Other, OpsA, 7); + ResTy, AddrTy, MVT::Other, OpsA); Chain = SDValue(VLdA, 2); // Load the odd subregs. @@ -1791,8 +1791,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, Ops.push_back(Pred); Ops.push_back(Reg0); Ops.push_back(Chain); - VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, - Ops.data(), Ops.size()); + VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops); } // Transfer memoperands. @@ -1913,8 +1912,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, Ops.push_back(Pred); Ops.push_back(Reg0); Ops.push_back(Chain); - SDNode *VSt = - CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); + SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); // Transfer memoperands. cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1); @@ -1939,7 +1937,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain }; SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, MemAddr.getValueType(), - MVT::Other, OpsA, 7); + MVT::Other, OpsA); cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1); Chain = SDValue(VStA, 1); @@ -1958,7 +1956,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, Ops.push_back(Reg0); Ops.push_back(Chain); SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, - Ops.data(), Ops.size()); + Ops); cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1); return VStB; } @@ -2063,8 +2061,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : QOpcodes[OpcodeIndex]); - SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, - Ops.data(), Ops.size()); + SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1); if (!IsLoad) return VLdLn; @@ -2150,8 +2147,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, if (isUpdating) ResTys.push_back(MVT::i32); ResTys.push_back(MVT::Other); - SDNode *VLdDup = - CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); + SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1); SuperReg = SDValue(VLdDup, 0); @@ -2197,7 +2193,7 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, Ops.push_back(N->getOperand(FirstTblReg + NumVecs)); Ops.push_back(getAL(CurDAG)); // predicate Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register - return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size()); + return CurDAG->getMachineNode(Opc, dl, VT, Ops); } SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, @@ -2542,7 +2538,7 @@ SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), MVT::i32, MVT::i32, MVT::Other, - Ops.data() ,Ops.size()); + Ops); cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); return ResNode; } @@ -2599,7 +2595,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue PredReg = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() }; ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other, - Ops, 4); + Ops); } else { SDValue Ops[] = { CPIdx, @@ -2609,7 +2605,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { CurDAG->getEntryNode() }; ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, - Ops, 5); + Ops); } ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); return NULL; @@ -2719,7 +2715,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { MVT::i32); SDValue Ops[] = { N0.getOperand(0), Imm16, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; - return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4); + return CurDAG->getMachineNode(Opc, dl, VT, Ops); } } break; @@ -2733,16 +2729,15 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { break; if (Subtarget->isThumb()) { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), - getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), - CurDAG->getRegister(0, MVT::i32) }; - return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4); + getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; + return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops); } else { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? ARM::UMULL : ARM::UMULLv5, - dl, MVT::i32, MVT::i32, Ops, 5); + dl, MVT::i32, MVT::i32, Ops); } } case ISD::SMUL_LOHI: { @@ -2751,14 +2746,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { if (Subtarget->isThumb()) { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; - return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4); + return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops); } else { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? ARM::SMULL : ARM::SMULLv5, - dl, MVT::i32, MVT::i32, Ops, 5); + dl, MVT::i32, MVT::i32, Ops); } } case ARMISD::UMLAL:{ @@ -2766,7 +2761,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32)}; - return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops, 6); + return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops); }else{ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), getAL(CurDAG), @@ -2774,7 +2769,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, - dl, MVT::i32, MVT::i32, Ops, 7); + dl, MVT::i32, MVT::i32, Ops); } } case ARMISD::SMLAL:{ @@ -2782,7 +2777,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32)}; - return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops, 6); + return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops); }else{ SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3), getAL(CurDAG), @@ -2790,7 +2785,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, - dl, MVT::i32, MVT::i32, Ops, 7); + dl, MVT::i32, MVT::i32, Ops); } } case ISD::LOAD: { @@ -2833,7 +2828,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { MVT::i32); SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, - MVT::Glue, Ops, 5); + MVT::Glue, Ops); Chain = SDValue(ResNode, 0); if (N->getNumValues() == 2) { InFlag = SDValue(ResNode, 1); @@ -2863,7 +2858,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue Pred = getAL(CurDAG); SDValue PredReg = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; - return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); + return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); } case ARMISD::VUZP: { unsigned Opc = 0; @@ -2883,7 +2878,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue Pred = getAL(CurDAG); SDValue PredReg = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; - return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); + return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); } case ARMISD::VTRN: { unsigned Opc = 0; @@ -2902,7 +2897,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { SDValue Pred = getAL(CurDAG); SDValue PredReg = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; - return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); + return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops); } case ARMISD::BUILD_VECTOR: { EVT VecVT = N->getValueType(0); @@ -3147,8 +3142,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { Ops.push_back(getAL(CurDAG)); Ops.push_back(CurDAG->getRegister(0, MVT::i32)); Ops.push_back(Chain); - SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(), - Ops.size()); + SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops); // Transfer memoperands. MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); @@ -3211,8 +3205,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { unsigned NewOpc = isThumb ? ARM::t2STREXD : ARM::STREXD; - SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(), - Ops.size()); + SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops); // Transfer memoperands. MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); @@ -3398,7 +3391,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { Ops.push_back(N->getOperand(1)); Ops.push_back(getAL(CurDAG)); // Predicate Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register - return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size()); + return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops); } case ARMISD::VTBL2: { DebugLoc dl = N->getDebugLoc(); @@ -3414,8 +3407,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { Ops.push_back(N->getOperand(2)); Ops.push_back(getAL(CurDAG)); // Predicate Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register - return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, - Ops.data(), Ops.size()); + return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops); } case ISD::CONCAT_VECTORS: diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 514971f..9475f1b 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -564,6 +564,16 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); + // Custom expand long extensions to vectors. + setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); + setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); + // NEON does not have single instruction CTPOP for vectors with element // types wider than 8-bits. However, custom lowering can leverage the // v8i8/v16i8 vcnt instruction. @@ -719,7 +729,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { // membarrier needs custom lowering; the rest are legal and handled // normally. - setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); // Custom lowering for 64-bit ops setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); @@ -737,7 +746,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setInsertFencesForAtomic(true); } else { // Set them all for expansion, which will force libcalls. - setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); @@ -755,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // Unordered/Monotonic case. setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); - // Since the libcalls include locking, fold in the fences - setShouldFoldAtomicFences(true); } setOperationAction(ISD::PREFETCH, MVT::Other, Custom); @@ -870,8 +876,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // are at least 4 bytes aligned. setMinStackArgumentAlignment(4); - BenefitFromCodePlacementOpt = true; - // Prefer likely predicted branches to selects on out-of-order cores. PredictableSelectIsExpensive = Subtarget->isLikeA9(); @@ -1230,7 +1234,8 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { + SmallVectorImpl<SDValue> &InVals, + bool isThisReturn, SDValue ThisVal) const { // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; @@ -1244,6 +1249,15 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign VA = RVLocs[i]; + // Pass 'this' value directly from the argument to return value, to avoid + // reg unit interference + if (i == 0 && isThisReturn) { + assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && + "unexpected return calling convention register assignment"); + InVals.push_back(ThisVal); + continue; + } + SDValue Val; if (VA.needsCustom()) { // Handle f64 or half of a v2f64. @@ -1355,21 +1369,22 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool isVarArg = CLI.IsVarArg; MachineFunction &MF = DAG.getMachineFunction(); - bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); - bool IsSibCall = false; + bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); + bool isThisReturn = false; + bool isSibCall = false; // Disable tail calls if they're not supported. if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) isTailCall = false; if (isTailCall) { // Check if it's really possible to do a tail call. isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, - isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), + isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), Outs, OutVals, Ins, DAG); // We don't support GuaranteedTailCallOpt for ARM, only automatically // detected sibcalls. if (isTailCall) { ++NumTailCalls; - IsSibCall = true; + isSibCall = true; } } @@ -1385,12 +1400,12 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, unsigned NumBytes = CCInfo.getNextStackOffset(); // For tail calls, memory operands are available in our caller's stack. - if (IsSibCall) + if (isSibCall) NumBytes = 0; // Adjust the stack pointer for the new arguments... // These operations are automatically eliminated by the prolog/epilog pass - if (!IsSibCall) + if (!isSibCall) Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); @@ -1452,6 +1467,13 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, StackPtr, MemOpChains, Flags); } } else if (VA.isRegLoc()) { + if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { + assert(VA.getLocVT() == MVT::i32 && + "unexpected calling convention register assignment"); + assert(!Ins.empty() && Ins[0].VT == MVT::i32 && + "unexpected use of 'returned'"); + isThisReturn = true; + } RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); } else if (isByVal) { assert(VA.isMemLoc()); @@ -1491,7 +1513,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, Ops, array_lengthof(Ops))); } - } else if (!IsSibCall) { + } else if (!isSibCall) { assert(VA.isMemLoc()); MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, @@ -1531,7 +1553,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } - InFlag =SDValue(); + InFlag = SDValue(); } // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every @@ -1672,8 +1694,15 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, RegsToPass[i].second.getValueType())); // Add a register mask operand representing the call-preserved registers. + const uint32_t *Mask; const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); - const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); + const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI); + if (isThisReturn) + // For 'this' returns, use the R0-preserving mask + Mask = ARI->getThisReturnPreservedMask(CallConv); + else + Mask = ARI->getCallPreservedMask(CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); Ops.push_back(DAG.getRegisterMask(Mask)); @@ -1695,8 +1724,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Handle result values, copying them out of physregs into vregs that we // return. - return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, - dl, DAG, InVals); + return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, + InVals, isThisReturn, + isThisReturn ? OutVals[0] : SDValue()); } /// HandleByVal - Every parameter *after* a byval parameter is passed @@ -1711,6 +1741,7 @@ ARMTargetLowering::HandleByVal( State->getCallOrPrologue() == Call) && "unhandled ParmContext"); if ((!State->isFirstByValRegValid()) && + (!Subtarget->isAAPCS_ABI() || State->getNextStackOffset() == 0) && (ARM::R0 <= reg) && (reg <= ARM::R3)) { if (Subtarget->isAAPCS_ABI() && Align > 4) { unsigned AlignInRegs = Align / 4; @@ -1866,7 +1897,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // local frame. const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). getInfo<ARMFunctionInfo>(); - if (AFI_Caller->getVarArgsRegSaveSize()) + if (AFI_Caller->getArgRegsSaveSize()) return false; // If the callee takes no arguments then go on to check the results of the @@ -2453,35 +2484,6 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, } } -static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, - const ARMSubtarget *Subtarget) { - DebugLoc dl = Op.getDebugLoc(); - if (!Subtarget->hasDataBarrier()) { - // Some ARMv6 cpus can support data barriers with an mcr instruction. - // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get - // here. - assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && - "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); - return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), - DAG.getConstant(0, MVT::i32)); - } - - SDValue Op5 = Op.getOperand(5); - bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; - unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); - unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); - bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); - - ARM_MB::MemBOpt DMBOpt; - if (isDeviceBarrier) - DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; - else - DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; - return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), - DAG.getConstant(DMBOpt, MVT::i32)); -} - - static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) { // FIXME: handle "fence singlethread" more efficiently. @@ -2578,7 +2580,8 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, void ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, - unsigned &VARegSize, unsigned &VARegSaveSize) + unsigned &ArgRegsSize, + unsigned &ArgRegsSaveSize) const { unsigned NumGPRs; if (CCInfo.isFirstByValRegValid()) @@ -2592,8 +2595,8 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, } unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); - VARegSize = NumGPRs * 4; - VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); + ArgRegsSize = NumGPRs * 4; + ArgRegsSaveSize = (ArgRegsSize + Align - 1) & ~(Align - 1); } // The remaining GPRs hold either the beginning of variable-argument @@ -2603,13 +2606,26 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, // If this is a variadic function, the va_list pointer will begin with // these values; otherwise, this reassembles a (byval) structure that // was split between registers and memory. -void -ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, - const Value *OrigArg, - unsigned OffsetFromOrigArg, - unsigned ArgOffset, - bool ForceMutable) const { +// Return: The frame index registers were stored into. +int +ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, + DebugLoc dl, SDValue &Chain, + const Value *OrigArg, + unsigned OffsetFromOrigArg, + unsigned ArgOffset, + bool ForceMutable) const { + + // Currently, two use-cases possible: + // Case #1. Non var-args function, and we meet first byval parameter. + // Setup first unallocated register as first byval register; + // eat all remained registers + // (these two actions are performed by HandleByVal method). + // Then, here, we initialize stack frame with + // "store-reg" instructions. + // Case #2. Var-args function, that doesn't contain byval parameters. + // The same: eat all remained unallocated registers, + // initialize stack frame. + MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); @@ -2621,19 +2637,22 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); } - unsigned VARegSize, VARegSaveSize; - computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); - if (VARegSaveSize) { - // If this function is vararg, store any remaining integer argument regs - // to their spots on the stack so that they may be loaded by deferencing - // the result of va_next. - AFI->setVarArgsRegSaveSize(VARegSaveSize); - AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, - ArgOffset + VARegSaveSize - - VARegSize, - false)); - SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), - getPointerTy()); + unsigned ArgRegsSize, ArgRegsSaveSize; + computeRegArea(CCInfo, MF, ArgRegsSize, ArgRegsSaveSize); + + // Store any by-val regs to their spots on the stack so that they may be + // loaded by deferencing the result of formal parameter pointer or va_next. + // Note: once stack area for byval/varargs registers + // was initialized, it can't be initialized again. + if (!AFI->getArgRegsSaveSize() && ArgRegsSaveSize) { + + AFI->setArgRegsSaveSize(ArgRegsSaveSize); + + int FrameIndex = MFI->CreateFixedObject( + ArgRegsSaveSize, + ArgOffset + ArgRegsSaveSize - ArgRegsSize, + false); + SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy()); SmallVector<SDValue, 4> MemOps; for (unsigned i = 0; firstRegToSaveIndex < 4; ++firstRegToSaveIndex, ++i) { @@ -2656,10 +2675,30 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, if (!MemOps.empty()) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], MemOps.size()); + return FrameIndex; } else // This will point to the next argument passed via stack. - AFI->setVarArgsFrameIndex( - MFI->CreateFixedObject(4, ArgOffset, !ForceMutable)); + return MFI->CreateFixedObject(4, ArgOffset, !ForceMutable); +} + +// Setup stack frame, the va_list pointer will start from. +void +ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, + DebugLoc dl, SDValue &Chain, + unsigned ArgOffset, + bool ForceMutable) const { + MachineFunction &MF = DAG.getMachineFunction(); + ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); + + // Try to store any remaining integer argument regs + // to their spots on the stack so that they may be loaded by deferencing + // the result of va_next. + // If there is no regs to be stored, just point address after last + // argument passed via stack. + int FrameIndex = + StoreByValRegs(CCInfo, DAG, dl, Chain, 0, 0, ArgOffset, ForceMutable); + + AFI->setVarArgsFrameIndex(FrameIndex); } SDValue @@ -2785,20 +2824,12 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, // Since they could be overwritten by lowering of arguments in case of // a tail call. if (Flags.isByVal()) { - ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); - if (!AFI->getVarArgsFrameIndex()) { - VarArgStyleRegisters(CCInfo, DAG, - dl, Chain, CurOrigArg, - Ins[VA.getValNo()].PartOffset, - VA.getLocMemOffset(), - true /*force mutable frames*/); - int VAFrameIndex = AFI->getVarArgsFrameIndex(); - InVals.push_back(DAG.getFrameIndex(VAFrameIndex, getPointerTy())); - } else { - int FI = MFI->CreateFixedObject(Flags.getByValSize(), - VA.getLocMemOffset(), false); - InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); - } + int FrameIndex = StoreByValRegs( + CCInfo, DAG, dl, Chain, CurOrigArg, + Ins[VA.getValNo()].PartOffset, + VA.getLocMemOffset(), + true /*force mutable frames*/); + InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy())); } else { int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, VA.getLocMemOffset(), true); @@ -2816,7 +2847,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, // varargs if (isVarArg) - VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0, 0, + VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); return Chain; @@ -3433,6 +3464,47 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { return FrameAddr; } +/// Custom Expand long vector extensions, where size(DestVec) > 2*size(SrcVec), +/// and size(DestVec) > 128-bits. +/// This is achieved by doing the one extension from the SrcVec, splitting the +/// result, extending these parts, and then concatenating these into the +/// destination. +static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) { + SDValue Op = N->getOperand(0); + EVT SrcVT = Op.getValueType(); + EVT DestVT = N->getValueType(0); + + assert(DestVT.getSizeInBits() > 128 && + "Custom sext/zext expansion needs >128-bit vector."); + // If this is a normal length extension, use the default expansion. + if (SrcVT.getSizeInBits()*4 != DestVT.getSizeInBits() && + SrcVT.getSizeInBits()*8 != DestVT.getSizeInBits()) + return SDValue(); + + DebugLoc dl = N->getDebugLoc(); + unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); + unsigned DestEltSize = DestVT.getVectorElementType().getSizeInBits(); + unsigned NumElts = SrcVT.getVectorNumElements(); + LLVMContext &Ctx = *DAG.getContext(); + SDValue Mid, SplitLo, SplitHi, ExtLo, ExtHi; + + EVT MidVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), + NumElts); + EVT SplitVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), + NumElts/2); + EVT ExtVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, DestEltSize), + NumElts/2); + + Mid = DAG.getNode(N->getOpcode(), dl, MidVT, Op); + SplitLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, + DAG.getIntPtrConstant(0)); + SplitHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, + DAG.getIntPtrConstant(NumElts/2)); + ExtLo = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitLo); + ExtHi = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitHi); + return DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, ExtLo, ExtHi); +} + /// ExpandBITCAST - If the target supports VFP, this function is called to /// expand a bit convert where either the source or destination type is i64 to /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 @@ -5565,7 +5637,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::BR_CC: return LowerBR_CC(Op, DAG); case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); - case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); case ISD::SINT_TO_FP: @@ -5621,6 +5692,10 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N, case ISD::BITCAST: Res = ExpandBITCAST(N, DAG); break; + case ISD::SIGN_EXTEND: + case ISD::ZERO_EXTEND: + Res = ExpandVectorExtension(N, DAG); + break; case ISD::SRL: case ISD::SRA: Res = Expand64BitShift(N, DAG, Subtarget); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 9ee17f0..46b8438 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -464,7 +464,8 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const; + SmallVectorImpl<SDValue> &InVals, + bool isThisReturn, SDValue ThisVal) const; virtual SDValue LowerFormalArguments(SDValue Chain, @@ -473,16 +474,21 @@ namespace llvm { DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; + int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, + DebugLoc dl, SDValue &Chain, + const Value *OrigArg, + unsigned OffsetFromOrigArg, + unsigned ArgOffset, + bool ForceMutable) const; + void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, DebugLoc dl, SDValue &Chain, - const Value *OrigArg, - unsigned OffsetFromOrigArg, unsigned ArgOffset, - bool ForceMutable = false) - const; + bool ForceMutable = false) const; void computeRegArea(CCState &CCInfo, MachineFunction &MF, - unsigned &VARegSize, unsigned &VARegSaveSize) const; + unsigned &ArgRegsSize, + unsigned &ArgRegsSaveSize) const; virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 9409f35..1bd174e 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -221,6 +221,9 @@ def HasDB : Predicate<"Subtarget->hasDataBarrier()">, def HasMP : Predicate<"Subtarget->hasMPExtension()">, AssemblerPredicate<"FeatureMP", "mp-extensions">; +def HasTrustZone : Predicate<"Subtarget->hasTrustZone()">, + AssemblerPredicate<"FeatureTrustZone", + "TrustZone">; def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">; def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">; def IsThumb : Predicate<"Subtarget->isThumb()">, @@ -578,6 +581,17 @@ def imm0_1 : Operand<i32> { let ParserMatchClass = Imm0_1AsmOperand; } def Imm0_3AsmOperand: ImmAsmOperand { let Name = "Imm0_3"; } def imm0_3 : Operand<i32> { let ParserMatchClass = Imm0_3AsmOperand; } +/// imm0_4 predicate - Immediate in the range [0,4]. +def Imm0_4AsmOperand : ImmAsmOperand +{ + let Name = "Imm0_4"; + let DiagnosticType = "ImmRange0_4"; +} +def imm0_4 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 5; }]> { + let ParserMatchClass = Imm0_4AsmOperand; + let DecoderMethod = "DecodeImm0_4"; +} + /// imm0_7 predicate - Immediate in the range [0,7]. def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; } def imm0_7 : Operand<i32>, ImmLeaf<i32, [{ @@ -741,18 +755,26 @@ def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }], // addrmode_imm12 := reg +/- imm12 // def MemImm12OffsetAsmOperand : AsmOperandClass { let Name = "MemImm12Offset"; } -def addrmode_imm12 : Operand<i32>, +class AddrMode_Imm12 : Operand<i32>, ComplexPattern<i32, 2, "SelectAddrModeImm12", []> { // 12-bit immediate operand. Note that instructions using this encode // #0 and #-0 differently. We flag #-0 as the magic value INT32_MIN. All other // immediate values are as normal. let EncoderMethod = "getAddrModeImm12OpValue"; - let PrintMethod = "printAddrModeImm12Operand"; let DecoderMethod = "DecodeAddrModeImm12Operand"; let ParserMatchClass = MemImm12OffsetAsmOperand; let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm); } + +def addrmode_imm12 : AddrMode_Imm12 { + let PrintMethod = "printAddrModeImm12Operand<false>"; +} + +def addrmode_imm12_pre : AddrMode_Imm12 { + let PrintMethod = "printAddrModeImm12Operand<true>"; +} + // ldst_so_reg := reg +/- reg shop imm // def MemRegOffsetAsmOperand : AsmOperandClass { let Name = "MemRegOffset"; } @@ -852,14 +874,23 @@ def am2offset_imm : Operand<i32>, // // FIXME: split into imm vs. reg versions. def AddrMode3AsmOperand : AsmOperandClass { let Name = "AddrMode3"; } -def addrmode3 : Operand<i32>, - ComplexPattern<i32, 3, "SelectAddrMode3", []> { +class AddrMode3 : Operand<i32>, + ComplexPattern<i32, 3, "SelectAddrMode3", []> { let EncoderMethod = "getAddrMode3OpValue"; - let PrintMethod = "printAddrMode3Operand"; let ParserMatchClass = AddrMode3AsmOperand; let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm); } +def addrmode3 : AddrMode3 +{ + let PrintMethod = "printAddrMode3Operand<false>"; +} + +def addrmode3_pre : AddrMode3 +{ + let PrintMethod = "printAddrMode3Operand<true>"; +} + // FIXME: split into imm vs. reg versions. // FIXME: parser method to handle +/- register. def AM3OffsetAsmOperand : AsmOperandClass { @@ -885,15 +916,22 @@ def ldstm_mode : OptionalDefOperand<OtherVT, (ops i32), (ops (i32 1))> { // addrmode5 := reg +/- imm8*4 // def AddrMode5AsmOperand : AsmOperandClass { let Name = "AddrMode5"; } -def addrmode5 : Operand<i32>, - ComplexPattern<i32, 2, "SelectAddrMode5", []> { - let PrintMethod = "printAddrMode5Operand"; +class AddrMode5 : Operand<i32>, + ComplexPattern<i32, 2, "SelectAddrMode5", []> { let EncoderMethod = "getAddrMode5OpValue"; let DecoderMethod = "DecodeAddrMode5Operand"; let ParserMatchClass = AddrMode5AsmOperand; let MIOperandInfo = (ops GPR:$base, i32imm); } +def addrmode5 : AddrMode5 { + let PrintMethod = "printAddrMode5Operand<false>"; +} + +def addrmode5_pre : AddrMode5 { + let PrintMethod = "printAddrMode5Operand<true>"; +} + // addrmode6 := reg with optional alignment // def AddrMode6AsmOperand : AsmOperandClass { let Name = "AlignedMemory"; } @@ -1010,7 +1048,8 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc, let isReMaterializable = 1 in { def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii, opc, "\t$Rd, $Rn, $imm", - [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]> { + [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>, + Sched<[WriteALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> imm; @@ -1022,7 +1061,8 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc, } def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir, opc, "\t$Rd, $Rn, $Rm", - [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> { + [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>, + Sched<[WriteALU, ReadALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<4> Rm; @@ -1037,7 +1077,8 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc, def rsi : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis, opc, "\t$Rd, $Rn, $shift", - [(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]> { + [(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]>, + Sched<[WriteALUsi, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1052,7 +1093,8 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc, def rsr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis, opc, "\t$Rd, $Rn, $shift", - [(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]> { + [(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]>, + Sched<[WriteALUsr, ReadALUsr]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1079,7 +1121,8 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc, let isReMaterializable = 1 in { def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii, opc, "\t$Rd, $Rn, $imm", - [(set GPR:$Rd, (opnode so_imm:$imm, GPR:$Rn))]> { + [(set GPR:$Rd, (opnode so_imm:$imm, GPR:$Rn))]>, + Sched<[WriteALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> imm; @@ -1091,7 +1134,8 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc, } def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir, opc, "\t$Rd, $Rn, $Rm", - [/* pattern left blank */]> { + [/* pattern left blank */]>, + Sched<[WriteALU, ReadALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<4> Rm; @@ -1105,7 +1149,8 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc, def rsi : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis, opc, "\t$Rd, $Rn, $shift", - [(set GPR:$Rd, (opnode so_reg_imm:$shift, GPR:$Rn))]> { + [(set GPR:$Rd, (opnode so_reg_imm:$shift, GPR:$Rn))]>, + Sched<[WriteALUsi, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1120,7 +1165,8 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc, def rsr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis, opc, "\t$Rd, $Rn, $shift", - [(set GPR:$Rd, (opnode so_reg_reg:$shift, GPR:$Rn))]> { + [(set GPR:$Rd, (opnode so_reg_reg:$shift, GPR:$Rn))]>, + Sched<[WriteALUsr, ReadALUsr]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1145,24 +1191,28 @@ multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir, bit Commutable = 0> { def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p), 4, iii, - [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>; + [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>, + Sched<[WriteALU, ReadALU]>; def rr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, pred:$p), 4, iir, - [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm))]> { + [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm))]>, + Sched<[WriteALU, ReadALU, ReadALU]> { let isCommutable = Commutable; } def rsi : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift, pred:$p), 4, iis, [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, - so_reg_imm:$shift))]>; + so_reg_imm:$shift))]>, + Sched<[WriteALUsi, ReadALU]>; def rsr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift, pred:$p), 4, iis, [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, - so_reg_reg:$shift))]>; + so_reg_reg:$shift))]>, + Sched<[WriteALUSsr, ReadALUsr]>; } } @@ -1174,19 +1224,22 @@ multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir, bit Commutable = 0> { def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p), 4, iii, - [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>; + [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>, + Sched<[WriteALU, ReadALU]>; def rsi : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift, pred:$p), 4, iis, [(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift, - GPR:$Rn))]>; + GPR:$Rn))]>, + Sched<[WriteALUsi, ReadALU]>; def rsr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift, pred:$p), 4, iis, [(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift, - GPR:$Rn))]>; + GPR:$Rn))]>, + Sched<[WriteALUSsr, ReadALUsr]>; } } @@ -1199,7 +1252,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, PatFrag opnode, bit Commutable = 0> { def ri : AI1<opcod, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii, opc, "\t$Rn, $imm", - [(opnode GPR:$Rn, so_imm:$imm)]> { + [(opnode GPR:$Rn, so_imm:$imm)]>, + Sched<[WriteCMP, ReadALU]> { bits<4> Rn; bits<12> imm; let Inst{25} = 1; @@ -1212,7 +1266,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, } def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir, opc, "\t$Rn, $Rm", - [(opnode GPR:$Rn, GPR:$Rm)]> { + [(opnode GPR:$Rn, GPR:$Rm)]>, + Sched<[WriteCMP, ReadALU, ReadALU]> { bits<4> Rn; bits<4> Rm; let isCommutable = Commutable; @@ -1228,7 +1283,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, def rsi : AI1<opcod, (outs), (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis, opc, "\t$Rn, $shift", - [(opnode GPR:$Rn, so_reg_imm:$shift)]> { + [(opnode GPR:$Rn, so_reg_imm:$shift)]>, + Sched<[WriteCMPsi, ReadALU]> { bits<4> Rn; bits<12> shift; let Inst{25} = 0; @@ -1244,7 +1300,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, def rsr : AI1<opcod, (outs), (ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis, opc, "\t$Rn, $shift", - [(opnode GPRnopc:$Rn, so_reg_reg:$shift)]> { + [(opnode GPRnopc:$Rn, so_reg_reg:$shift)]>, + Sched<[WriteCMPsr, ReadALU]> { bits<4> Rn; bits<12> shift; let Inst{25} = 0; @@ -1326,7 +1383,8 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode, def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm", [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> imm; @@ -1338,7 +1396,8 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode, def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm", [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALU, ReadALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<4> Rm; @@ -1353,7 +1412,8 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode, (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift", [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_reg_imm:$shift, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALUsi, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1369,7 +1429,8 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode, DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift", [(set GPRnopc:$Rd, CPSR, (opnode GPRnopc:$Rn, so_reg_reg:$shift, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALUsr, ReadALUsr]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1392,7 +1453,8 @@ multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> { def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm", [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> imm; @@ -1403,7 +1465,8 @@ multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> { } def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm", - [/* pattern left blank */]> { + [/* pattern left blank */]>, + Sched<[WriteALU, ReadALU, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<4> Rm; @@ -1416,7 +1479,8 @@ multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> { def rsi : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift", [(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift, GPR:$Rn, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALUsi, ReadALU]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1430,7 +1494,8 @@ multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> { def rsr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift", [(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift, GPR:$Rn, CPSR))]>, - Requires<[IsARM]> { + Requires<[IsARM]>, + Sched<[WriteALUsr, ReadALUsr]> { bits<4> Rd; bits<4> Rn; bits<12> shift; @@ -1641,11 +1706,11 @@ def ATOMUMAX6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2), NoItinerary, []>; } -def HINT : AI<(outs), (ins imm0_255:$imm), MiscFrm, NoItinerary, +def HINT : AI<(outs), (ins imm0_4:$imm), MiscFrm, NoItinerary, "hint", "\t$imm", []>, Requires<[IsARM, HasV6]> { - bits<8> imm; - let Inst{27-8} = 0b00110010000011110000; - let Inst{7-0} = imm; + bits<3> imm; + let Inst{27-3} = 0b0011001000001111000000000; + let Inst{2-0} = imm; } def : InstAlias<"nop$p", (HINT 0, pred:$p)>, Requires<[IsARM, HasV6T2]>; @@ -1842,7 +1907,8 @@ let neverHasSideEffects = 1, isReMaterializable = 1 in // the instruction. The {24-21} opcode bits are set by the fixup, as we don't // know until then which form of the instruction will be used. def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label), - MiscFrm, IIC_iALUi, "adr", "\t$Rd, $label", []> { + MiscFrm, IIC_iALUi, "adr", "\t$Rd, $label", []>, + Sched<[WriteALU, ReadALU]> { bits<4> Rd; bits<14> label; let Inst{27-25} = 0b001; @@ -2049,7 +2115,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in { // Secure Monitor Call is a system instruction. def SMC : ABI<0b0001, (outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt", - []> { + []>, Requires<[IsARM, HasTrustZone]> { bits<4> opt; let Inst{23-4} = 0b01100000000000000111; let Inst{3-0} = opt; @@ -2210,7 +2276,7 @@ def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2), multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass iii, InstrItinClass iir> { def _PRE_IMM : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins addrmode_imm12:$addr), IndexModePre, LdFrm, iii, + (ins addrmode_imm12_pre:$addr), IndexModePre, LdFrm, iii, opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> { bits<17> addr; let Inst{25} = 0; @@ -2247,6 +2313,7 @@ multiclass AI2_ldridx<bit isByte, string opc, let Inst{23} = offset{12}; let Inst{19-16} = addr; let Inst{11-0} = offset{11-0}; + let Inst{4} = 0; let DecoderMethod = "DecodeAddrMode2IdxInstruction"; } @@ -2279,7 +2346,7 @@ defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_iu, IIC_iLoad_bh_ru>; multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> { def _PRE : AI3ldstidx<op, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb), - (ins addrmode3:$addr), IndexModePre, + (ins addrmode3_pre:$addr), IndexModePre, LdMiscFrm, itin, opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> { bits<14> addr; @@ -2313,7 +2380,7 @@ defm LDRSH : AI3_ldridx<0b1111, "ldrsh", IIC_iLoad_bh_ru>; defm LDRSB : AI3_ldridx<0b1101, "ldrsb", IIC_iLoad_bh_ru>; let hasExtraDefRegAllocReq = 1 in { def LDRD_PRE : AI3ldstidx<0b1101, 0, 1, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb), - (ins addrmode3:$addr), IndexModePre, + (ins addrmode3_pre:$addr), IndexModePre, LdMiscFrm, IIC_iLoad_d_ru, "ldrd", "\t$Rt, $Rt2, $addr!", "$addr.base = $Rn_wb", []> { @@ -2469,7 +2536,7 @@ def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$src2, addrmode3:$addr), multiclass AI2_stridx<bit isByte, string opc, InstrItinClass iii, InstrItinClass iir> { def _PRE_IMM : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb), - (ins GPR:$Rt, addrmode_imm12:$addr), IndexModePre, + (ins GPR:$Rt, addrmode_imm12_pre:$addr), IndexModePre, StFrm, iii, opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> { bits<17> addr; @@ -2591,7 +2658,7 @@ def STRH_preidx: ARMPseudoInst<(outs GPR:$Rn_wb), def STRH_PRE : AI3ldstidx<0b1011, 0, 1, (outs GPR:$Rn_wb), - (ins GPR:$Rt, addrmode3:$addr), IndexModePre, + (ins GPR:$Rt, addrmode3_pre:$addr), IndexModePre, StMiscFrm, IIC_iStore_bh_ru, "strh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> { bits<14> addr; @@ -2623,7 +2690,7 @@ def STRH_POST : AI3ldstidx<0b1011, 0, 0, (outs GPR:$Rn_wb), let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in { def STRD_PRE : AI3ldstidx<0b1111, 0, 1, (outs GPR:$Rn_wb), - (ins GPR:$Rt, GPR:$Rt2, addrmode3:$addr), + (ins GPR:$Rt, GPR:$Rt2, addrmode3_pre:$addr), IndexModePre, StMiscFrm, IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, $addr!", "$addr.base = $Rn_wb", []> { @@ -3866,28 +3933,33 @@ def UDIV : ADivA1I<0b011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV, def CLZ : AMiscA1I<0b000010110, 0b0001, (outs GPR:$Rd), (ins GPR:$Rm), IIC_iUNAr, "clz", "\t$Rd, $Rm", - [(set GPR:$Rd, (ctlz GPR:$Rm))]>, Requires<[IsARM, HasV5T]>; + [(set GPR:$Rd, (ctlz GPR:$Rm))]>, Requires<[IsARM, HasV5T]>, + Sched<[WriteALU]>; def RBIT : AMiscA1I<0b01101111, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm), IIC_iUNAr, "rbit", "\t$Rd, $Rm", [(set GPR:$Rd, (ARMrbit GPR:$Rm))]>, - Requires<[IsARM, HasV6T2]>; + Requires<[IsARM, HasV6T2]>, + Sched<[WriteALU]>; def REV : AMiscA1I<0b01101011, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm), IIC_iUNAr, "rev", "\t$Rd, $Rm", - [(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>; + [(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>, + Sched<[WriteALU]>; let AddedComplexity = 5 in def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm), IIC_iUNAr, "rev16", "\t$Rd, $Rm", [(set GPR:$Rd, (rotr (bswap GPR:$Rm), (i32 16)))]>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6]>, + Sched<[WriteALU]>; let AddedComplexity = 5 in def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm), IIC_iUNAr, "revsh", "\t$Rd, $Rm", [(set GPR:$Rd, (sra (bswap GPR:$Rm), (i32 16)))]>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6]>, + Sched<[WriteALU]>; def : ARMV6Pat<(or (sra (shl GPR:$Rm, (i32 24)), (i32 16)), (and (srl GPR:$Rm, (i32 8)), 0xFF)), @@ -3899,7 +3971,8 @@ def PKHBT : APKHI<0b01101000, 0, (outs GPRnopc:$Rd), [(set GPRnopc:$Rd, (or (and GPRnopc:$Rn, 0xFFFF), (and (shl GPRnopc:$Rm, pkh_lsl_amt:$sh), 0xFFFF0000)))]>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6]>, + Sched<[WriteALUsi, ReadALU]>; // Alternate cases for PKHBT where identities eliminate some nodes. def : ARMV6Pat<(or (and GPRnopc:$Rn, 0xFFFF), (and GPRnopc:$Rm, 0xFFFF0000)), @@ -3915,7 +3988,8 @@ def PKHTB : APKHI<0b01101000, 1, (outs GPRnopc:$Rd), [(set GPRnopc:$Rd, (or (and GPRnopc:$Rn, 0xFFFF0000), (and (sra GPRnopc:$Rm, pkh_asr_amt:$sh), 0xFFFF)))]>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6]>, + Sched<[WriteALUsi, ReadALU]>; // Alternate cases for PKHTB where identities eliminate some nodes. Note that // a shift amount of 0 is *not legal* here, it is PKHBT instead. @@ -4391,7 +4465,7 @@ multiclass LdStCop<bit load, bit Dbit, string asm> { let Inst{7-0} = addr{7-0}; let DecoderMethod = "DecodeCopMemInstruction"; } - def _PRE : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr), + def _PRE : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr), asm, "\t$cop, $CRd, $addr!", IndexModePre> { bits<13> addr; bits<4> cop; @@ -4462,7 +4536,7 @@ multiclass LdSt2Cop<bit load, bit Dbit, string asm> { let Inst{7-0} = addr{7-0}; let DecoderMethod = "DecodeCopMemInstruction"; } - def _PRE : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr), + def _PRE : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr), asm, "\t$cop, $CRd, $addr!", IndexModePre> { bits<13> addr; bits<4> cop; diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 0411ac4..896fd0f 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -4316,6 +4316,24 @@ def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt", defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q, "vtst", "", NEONvtst, 1>; +def: NEONInstAlias<"vaclt${p}.f32 $Vd, $Vn, $Vm", + (VACGTd DPR:$Vd, DPR:$Vm, DPR:$Vn, pred:$p)>; +def: NEONInstAlias<"vaclt${p}.f32 $Vd, $Vn, $Vm", + (VACGTq QPR:$Vd, QPR:$Vm, QPR:$Vn, pred:$p)>; +def: NEONInstAlias<"vacle${p}.f32 $Vd, $Vn, $Vm", + (VACGEd DPR:$Vd, DPR:$Vm, DPR:$Vn, pred:$p)>; +def: NEONInstAlias<"vacle${p}.f32 $Vd, $Vn, $Vm", + (VACGEq QPR:$Vd, QPR:$Vm, QPR:$Vn, pred:$p)>; + +def: NEONInstAlias<"vaclt${p}.f32 $Vd, $Vm", + (VACGTd DPR:$Vd, DPR:$Vm, DPR:$Vd, pred:$p)>; +def: NEONInstAlias<"vaclt${p}.f32 $Vd, $Vm", + (VACGTq QPR:$Vd, QPR:$Vm, QPR:$Vd, pred:$p)>; +def: NEONInstAlias<"vacle${p}.f32 $Vd, $Vm", + (VACGEd DPR:$Vd, DPR:$Vm, DPR:$Vd, pred:$p)>; +def: NEONInstAlias<"vacle${p}.f32 $Vd, $Vm", + (VACGEq QPR:$Vd, QPR:$Vm, QPR:$Vd, pred:$p)>; + // Vector Bitwise Operations. def vnotd : PatFrag<(ops node:$in), @@ -4889,6 +4907,29 @@ def VABSfq : N2VQ<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs", "f32", v4f32, v4f32, fabs>; +def : Pat<(xor (v2i32 (bitconvert (v8i8 (NEONvshrs DPR:$src, (i32 7))))), + (v2i32 (bitconvert (v8i8 (add DPR:$src, + (NEONvshrs DPR:$src, (i32 7))))))), + (VABSv8i8 DPR:$src)>; +def : Pat<(xor (v2i32 (bitconvert (v4i16 (NEONvshrs DPR:$src, (i32 15))))), + (v2i32 (bitconvert (v4i16 (add DPR:$src, + (NEONvshrs DPR:$src, (i32 15))))))), + (VABSv4i16 DPR:$src)>; +def : Pat<(xor (v2i32 (NEONvshrs DPR:$src, (i32 31))), + (v2i32 (add DPR:$src, (NEONvshrs DPR:$src, (i32 31))))), + (VABSv2i32 DPR:$src)>; +def : Pat<(xor (v4i32 (bitconvert (v16i8 (NEONvshrs QPR:$src, (i32 7))))), + (v4i32 (bitconvert (v16i8 (add QPR:$src, + (NEONvshrs QPR:$src, (i32 7))))))), + (VABSv16i8 QPR:$src)>; +def : Pat<(xor (v4i32 (bitconvert (v8i16 (NEONvshrs QPR:$src, (i32 15))))), + (v4i32 (bitconvert (v8i16 (add QPR:$src, + (NEONvshrs QPR:$src, (i32 15))))))), + (VABSv8i16 QPR:$src)>; +def : Pat<(xor (v4i32 (NEONvshrs QPR:$src, (i32 31))), + (v4i32 (add QPR:$src, (NEONvshrs QPR:$src, (i32 31))))), + (VABSv4i32 QPR:$src)>; + def : Pat<(v2f32 (int_arm_neon_vabs (v2f32 DPR:$src))), (VABSfd DPR:$src)>; def : Pat<(v4f32 (int_arm_neon_vabs (v4f32 QPR:$src))), (VABSfq QPR:$src)>; diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index c9d709e..4dacb86 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -150,7 +150,7 @@ def lo5AllOne : PatLeaf<(i32 imm), [{ def t2addrmode_imm12_asmoperand : AsmOperandClass {let Name="MemUImm12Offset";} def t2addrmode_imm12 : Operand<i32>, ComplexPattern<i32, 2, "SelectT2AddrModeImm12", []> { - let PrintMethod = "printAddrModeImm12Operand"; + let PrintMethod = "printAddrModeImm12Operand<false>"; let EncoderMethod = "getAddrModeImm12OpValue"; let DecoderMethod = "DecodeT2AddrModeImm12"; let ParserMatchClass = t2addrmode_imm12_asmoperand; @@ -3401,12 +3401,7 @@ class t2CPS<dag iops, string asm_op> : T2XI<(outs), iops, NoItinerary, bits<5> mode; bit M; - let Inst{31-27} = 0b11110; - let Inst{26} = 0; - let Inst{25-20} = 0b111010; - let Inst{19-16} = 0b1111; - let Inst{15-14} = 0b10; - let Inst{12} = 0; + let Inst{31-11} = 0b111100111010111110000; let Inst{10-9} = imod; let Inst{8} = M; let Inst{7-5} = iflags; @@ -3425,13 +3420,13 @@ let imod = 0, iflags = 0, M = 1 in // A6.3.4 Branches and miscellaneous control // Table A6-14 Change Processor State, and hint instructions -def t2HINT : T2I<(outs), (ins imm0_255:$imm), NoItinerary, "hint", "\t$imm",[]>{ - bits<8> imm; - let Inst{31-8} = 0b111100111010111110000000; - let Inst{7-0} = imm; +def t2HINT : T2I<(outs), (ins imm0_4:$imm), NoItinerary, "hint", "\t$imm",[]> { + bits<3> imm; + let Inst{31-3} = 0b11110011101011111000000000000; + let Inst{2-0} = imm; } -def : t2InstAlias<"hint$p.w $imm", (t2HINT imm0_255:$imm, pred:$p)>; +def : t2InstAlias<"hint$p.w $imm", (t2HINT imm0_4:$imm, pred:$p)>; def : t2InstAlias<"nop$p.w", (t2HINT 0, pred:$p)>; def : t2InstAlias<"yield$p.w", (t2HINT 1, pred:$p)>; def : t2InstAlias<"wfe$p.w", (t2HINT 2, pred:$p)>; @@ -3449,7 +3444,8 @@ def t2DBG : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "dbg", "\t$opt", []> { // Secure Monitor Call is a system instruction. // Option = Inst{19-16} -def t2SMC : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt", []> { +def t2SMC : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt", + []>, Requires<[IsThumb2, HasTrustZone]> { let Inst{31-27} = 0b11110; let Inst{26-20} = 0b1111111; let Inst{15-12} = 0b1000; diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 98bd6c1..c8ed576 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -865,7 +865,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB, bool isLd = isi32Load(Opcode) || Opcode == ARM::VLDRS || Opcode == ARM::VLDRD; // Can't do the merge if the destination register is the same as the would-be // writeback register. - if (isLd && MI->getOperand(0).getReg() == Base) + if (MI->getOperand(0).getReg() == Base) return false; unsigned PredReg = 0; @@ -1258,6 +1258,22 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { // merge the ldr's so far, including this one. But don't try to // combine the following ldr(s). Clobber = (isi32Load(Opcode) && Base == MBBI->getOperand(0).getReg()); + + // Watch out for: + // r4 := ldr [r0, #8] + // r4 := ldr [r0, #4] + // + // The optimization may reorder the second ldr in front of the first + // ldr, which violates write after write(WAW) dependence. The same as + // str. Try to merge inst(s) already in MemOps. + bool Overlap = false; + for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end(); I != E; ++I) { + if (TRI->regsOverlap(Reg, I->MBBI->getOperand(0).getReg())) { + Overlap = true; + break; + } + } + if (CurrBase == 0 && !Clobber) { // Start of a new chain. CurrBase = Base; @@ -1268,7 +1284,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill, Position, MBBI)); ++NumMemOps; Advance = true; - } else { + } else if (!Overlap) { if (Clobber) { TryMerge = true; Advance = true; diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h index 88d96c0..f4248fc 100644 --- a/lib/Target/ARM/ARMMachineFunctionInfo.h +++ b/lib/Target/ARM/ARMMachineFunctionInfo.h @@ -38,7 +38,7 @@ class ARMFunctionInfo : public MachineFunctionInfo { /// VarArgsRegSaveSize - Size of the register save area for vararg functions. /// - unsigned VarArgsRegSaveSize; + unsigned ArgRegsSaveSize; /// HasStackFrame - True if this function has a stack frame. Set by /// processFunctionBeforeCalleeSavedScan(). @@ -117,7 +117,7 @@ public: ARMFunctionInfo() : isThumb(false), hasThumb2(false), - VarArgsRegSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false), + ArgRegsSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false), LRSpilledForFarJump(false), FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0), GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0), @@ -129,7 +129,7 @@ public: explicit ARMFunctionInfo(MachineFunction &MF) : isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()), hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()), - VarArgsRegSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false), + ArgRegsSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false), LRSpilledForFarJump(false), FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0), GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0), @@ -141,8 +141,8 @@ public: bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; } bool isThumb2Function() const { return isThumb && hasThumb2; } - unsigned getVarArgsRegSaveSize() const { return VarArgsRegSaveSize; } - void setVarArgsRegSaveSize(unsigned s) { VarArgsRegSaveSize = s; } + unsigned getArgRegsSaveSize() const { return ArgRegsSaveSize; } + void setArgRegsSaveSize(unsigned s) { ArgRegsSaveSize = s; } bool hasStackFrame() const { return HasStackFrame; } void setHasStackFrame(bool s) { HasStackFrame = s; } diff --git a/lib/Target/ARM/ARMSchedule.td b/lib/Target/ARM/ARMSchedule.td index 02196d0..2d088de 100644 --- a/lib/Target/ARM/ARMSchedule.td +++ b/lib/Target/ARM/ARMSchedule.td @@ -6,6 +6,77 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// Instruction scheduling annotations for out-of-order CPUs. +// These annotations are independent of the itinerary class defined below. +// Here we define the subtarget independent read/write per-operand resources. +// The subtarget schedule definitions will then map these to the subtarget's +// resource usages. +// For example: +// The instruction cycle timings table might contain an entry for an operation +// like the following: +// Rd <- ADD Rn, Rm, <shift> Rs +// Uops | Latency from register | Uops - resource requirements - latency +// 2 | Rn: 1 Rm: 4 Rs: 4 | uop T0, Rm, Rs - P01 - 3 +// | | uopc Rd, Rn, T0 - P01 - 1 +// This is telling us that the result will be available in destination register +// Rd after a minimum of three cycles after the result in Rm and Rs is available +// and one cycle after the result in Rn is available. The micro-ops can execute +// on resource P01. +// To model this, we need to express that we need to dispatch two micro-ops, +// that the resource P01 is needed and that the latency to Rn is different than +// the latency to Rm and Rs. The scheduler can decrease Rn's producer latency by +// two. +// We will do this by assigning (abstract) resources to register defs/uses. +// ARMSchedule.td: +// def WriteALUsr : SchedWrite; +// def ReadAdvanceALUsr : ScheRead; +// +// ARMInstrInfo.td: +// def ADDrs : I<>, Sched<[WriteALUsr, ReadAdvanceALUsr, ReadDefault, +// ReadDefault]> { ...} +// ReadAdvance read resources allow us to define "pipeline by-passes" or +// shorter latencies to certain registers as needed in the example above. +// The "ReadDefault" can be omitted. +// Next, the subtarget td file assigns resources to the abstract resources +// defined here. +// ARMScheduleSubtarget.td: +// // Resources. +// def P01 : ProcResource<3>; // ALU unit (3 of it). +// ... +// // Resource usages. +// def : WriteRes<WriteALUsr, [P01, P01]> { +// Latency = 4; // Latency of 4. +// NumMicroOps = 2; // Dispatch 2 micro-ops. +// // The two instances of resource P01 are occupied for one cycle. It is one +// // cycle because these resources happen to be pipelined. +// ResourceCycles = [1, 1]; +// } +// def : ReadAdvance<ReadAdvanceALUsr, 3>; + +// Basic ALU operation. +def WriteALU : SchedWrite; +def ReadALU : SchedRead; + +// Basic ALU with shifts. +def WriteALUsi : SchedWrite; // Shift by immediate. +def WriteALUsr : SchedWrite; // Shift by register. +def WriteALUSsr : SchedWrite; // Shift by register (flag setting). +def ReadALUsr : SchedRead; // Some operands are read later. + +// Compares. +def WriteCMP : SchedWrite; +def WriteCMPsi : SchedWrite; +def WriteCMPsr : SchedWrite; + +// Define TII for use in SchedVariant Predicates. +def : PredicateProlog<[{ + const ARMBaseInstrInfo *TII = + static_cast<const ARMBaseInstrInfo*>(SchedModel->getInstrInfo()); + (void)TII; +}]>; + +def IsPredicatedPred : SchedPredicate<[{TII->isPredicated(MI)}]>; //===----------------------------------------------------------------------===// // Instruction Itinerary classes used for ARM diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td index 4191931..9739ed2 100644 --- a/lib/Target/ARM/ARMScheduleA9.td +++ b/lib/Target/ARM/ARMScheduleA9.td @@ -1898,6 +1898,8 @@ def CortexA9Model : SchedMachineModel { //===----------------------------------------------------------------------===// // Define each kind of processor resource and number available. +let SchedModel = CortexA9Model in { + def A9UnitALU : ProcResource<2>; def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; } def A9UnitAGU : ProcResource<1>; @@ -1918,11 +1920,11 @@ def A9WriteI : SchedWriteRes<[A9UnitALU]>; def A9WriteIsr : SchedWriteRes<[A9UnitALU]> { let Latency = 2; } // Basic ALU. -def A9WriteA : SchedWriteRes<[A9UnitALU]>; +def A9WriteALU : SchedWriteRes<[A9UnitALU]>; // ALU with operand shifted by immediate. -def A9WriteAsi : SchedWriteRes<[A9UnitALU]> { let Latency = 2; } +def : WriteRes<WriteALUsi, [A9UnitALU]> { let Latency = 2; } // ALU with operand shifted by register. -def A9WriteAsr : SchedWriteRes<[A9UnitALU]> { let Latency = 3; } +def A9WriteALUsr : SchedWriteRes<[A9UnitALU]> { let Latency = 3; } // Multiplication def A9WriteM : SchedWriteRes<[A9UnitMul, A9UnitMul]> { let Latency = 4; } @@ -2003,13 +2005,6 @@ foreach NumCycles = 2-8 in { def A9WriteCycle#NumCycles : WriteSequence<[A9WriteCycle1], NumCycles>; } // foreach NumCycles -// Define TII for use in SchedVariant Predicates. -def : PredicateProlog<[{ - const ARMBaseInstrInfo *TII = - static_cast<const ARMBaseInstrInfo*>(SchedModel->getInstrInfo()); - (void)TII; -}]>; - // Define address generation sequences and predicates for 8 flavors of LDMs. foreach NumAddr = 1-8 in { @@ -2254,11 +2249,11 @@ def A9WriteLMfp : SchedWriteVariant<[ // These mov immediate writers are unconditionally expanded with // additive latency. def A9WriteI2 : WriteSequence<[A9WriteI, A9WriteI]>; -def A9WriteI2pc : WriteSequence<[A9WriteI, A9WriteI, A9WriteA]>; +def A9WriteI2pc : WriteSequence<[A9WriteI, A9WriteI, WriteALU]>; def A9WriteI2ld : WriteSequence<[A9WriteI, A9WriteI, A9WriteL]>; // Some ALU operations can read loaded integer values one cycle early. -def A9ReadA : SchedReadAdvance<1, +def A9ReadALU : SchedReadAdvance<1, [A9WriteL, A9WriteLHi, A9WriteLsi, A9WriteLb, A9WriteLbsi, A9WriteL1, A9WriteL2, A9WriteL3, A9WriteL4, A9WriteL5, A9WriteL6, A9WriteL7, A9WriteL8, @@ -2279,26 +2274,25 @@ def A9Read4 : SchedReadAdvance<3>; // This table follows the ARM Cortex-A9 Technical Reference Manuals, // mostly in order. -let SchedModel = CortexA9Model in { def :ItinRW<[A9WriteI], [IIC_iMOVi,IIC_iMOVr,IIC_iMOVsi, IIC_iMVNi,IIC_iMVNsi, IIC_iCMOVi,IIC_iCMOVr,IIC_iCMOVsi]>; -def :ItinRW<[A9WriteI,A9ReadA],[IIC_iMVNr]>; +def :ItinRW<[A9WriteI,A9ReadALU],[IIC_iMVNr]>; def :ItinRW<[A9WriteIsr], [IIC_iMOVsr,IIC_iMVNsr,IIC_iCMOVsr]>; def :ItinRW<[A9WriteI2], [IIC_iMOVix2,IIC_iCMOVix2]>; def :ItinRW<[A9WriteI2pc], [IIC_iMOVix2addpc]>; def :ItinRW<[A9WriteI2ld], [IIC_iMOVix2ld]>; -def :ItinRW<[A9WriteA], [IIC_iBITi,IIC_iBITr,IIC_iUNAr,IIC_iTSTi,IIC_iTSTr]>; -def :ItinRW<[A9WriteA, A9ReadA], [IIC_iALUi, IIC_iCMPi, IIC_iCMPsi]>; -def :ItinRW<[A9WriteA, A9ReadA, A9ReadA],[IIC_iALUr,IIC_iCMPr]>; -def :ItinRW<[A9WriteAsi], [IIC_iBITsi,IIC_iUNAsi,IIC_iEXTr,IIC_iTSTsi]>; -def :ItinRW<[A9WriteAsi, A9ReadA], [IIC_iALUsi]>; -def :ItinRW<[A9WriteAsi, ReadDefault, A9ReadA], [IIC_iALUsir]>; // RSB -def :ItinRW<[A9WriteAsr], [IIC_iBITsr,IIC_iTSTsr,IIC_iEXTAr,IIC_iEXTAsr]>; -def :ItinRW<[A9WriteAsr, A9ReadA], [IIC_iALUsr,IIC_iCMPsr]>; +def :ItinRW<[WriteALU], [IIC_iBITi,IIC_iBITr,IIC_iUNAr,IIC_iTSTi,IIC_iTSTr]>; +def :ItinRW<[WriteALU, A9ReadALU], [IIC_iALUi, IIC_iCMPi, IIC_iCMPsi]>; +def :ItinRW<[WriteALU, A9ReadALU, A9ReadALU],[IIC_iALUr,IIC_iCMPr]>; +def :ItinRW<[WriteALUsi], [IIC_iBITsi,IIC_iUNAsi,IIC_iEXTr,IIC_iTSTsi]>; +def :ItinRW<[WriteALUsi, A9ReadALU], [IIC_iALUsi]>; +def :ItinRW<[WriteALUsi, ReadDefault, A9ReadALU], [IIC_iALUsir]>; // RSB +def :ItinRW<[A9WriteALUsr], [IIC_iBITsr,IIC_iTSTsr,IIC_iEXTAr,IIC_iEXTAsr]>; +def :ItinRW<[A9WriteALUsr, A9ReadALU], [IIC_iALUsr,IIC_iCMPsr]>; // A9WriteHi ignored for MUL32. def :ItinRW<[A9WriteM, A9WriteMHi], [IIC_iMUL32,IIC_iMAC32, @@ -2371,7 +2365,7 @@ def :ItinRW<[A9WriteLMAdr, A9WriteLM, A9WriteIssue], [IIC_iLoad_mu, IIC_iStore_m, IIC_iStore_mu]>; def :ItinRW<[A9WriteLM, A9WriteLMAdr, A9WriteB], [IIC_iLoad_mBr, IIC_iPop_Br]>; -def :ItinRW<[A9WriteL, A9WriteAdr, A9WriteA], [IIC_iLoadiALU]>; +def :ItinRW<[A9WriteL, A9WriteAdr, WriteALU], [IIC_iLoadiALU]>; def :ItinRW<[A9WriteLSfp, A9WriteAdr], [IIC_fpLoad32, IIC_fpLoad64]>; @@ -2486,4 +2480,17 @@ def :ItinRW<[A9WriteV9, A9Read3, A9Read2], [IIC_VMACD, IIC_VFMACD]>; def :ItinRW<[A9WriteV10, A9Read3, A9Read2], [IIC_VMACQ, IIC_VFMACQ]>; def :ItinRW<[A9WriteV9, A9Read2, A9Read2], [IIC_VRECSD]>; def :ItinRW<[A9WriteV10, A9Read2, A9Read2], [IIC_VRECSQ]>; + +// Map SchedRWs that are identical for cortexa9 to existing resources. +def : SchedAlias<WriteALU, A9WriteALU>; +def : SchedAlias<WriteALUsr, A9WriteALUsr>; +def : SchedAlias<WriteALUSsr, A9WriteALUsr>; +def : SchedAlias<ReadALU, A9ReadALU>; +def : SchedAlias<ReadALUsr, A9ReadALU>; +// FIXME: need to special case AND, ORR, EOR, BIC because they don't read +// advance. But our instrinfo claims it does. + +def : SchedAlias<WriteCMP, A9WriteALU>; +def : SchedAlias<WriteCMPsi, A9WriteALU>; +def : SchedAlias<WriteCMPsr, A9WriteALU>; } // SchedModel = CortexA9Model diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td index e9bc3e0..7c6df41 100644 --- a/lib/Target/ARM/ARMScheduleSwift.td +++ b/lib/Target/ARM/ARMScheduleSwift.td @@ -1078,8 +1078,67 @@ def SwiftModel : SchedMachineModel { let IssueWidth = 3; // 3 micro-ops are dispatched per cycle. let MinLatency = 0; // Data dependencies are allowed within dispatch groups. let LoadLatency = 3; + let MispredictPenalty = 14; // A branch direction mispredict. let Itineraries = SwiftItineraries; } -// TODO: Add Swift processor and scheduler resources. +// Swift predicates. +def IsFastImmShiftSwiftPred : SchedPredicate<[{TII->isSwiftFastImmShift(MI)}]>; + +// Swift resource mapping. +let SchedModel = SwiftModel in { + // Processor resources. + def SwiftUnitP01 : ProcResource<2>; // ALU unit. + def SwiftUnitP0 : ProcResource<1> { let Super = SwiftUnitP01; } // Mul unit. + def SwiftUnitP1 : ProcResource<1> { let Super = SwiftUnitP01; } // Br unit. + def SwiftUnitP2 : ProcResource<1>; // LS unit. + def SwiftUnitDiv : ProcResource<1>; + + // Generic resource requirements. + def SwiftWriteP01TwoCycle : SchedWriteRes<[SwiftUnitP01]> { let Latency = 2; } + def SwiftWriteP01ThreeCycleTwoUops : + SchedWriteRes<[SwiftUnitP01, SwiftUnitP01]> { + let Latency = 3; + let NumMicroOps = 2; + } + def SwiftWriteP0ThreeCycleThreeUops : SchedWriteRes<[SwiftUnitP0]> { + let Latency = 3; + let NumMicroOps = 3; + let ResourceCycles = [3]; + } + + // 4.2.4 Arithmetic and Logical. + // ALU operation register shifted by immediate variant. + def SwiftWriteALUsi : SchedWriteVariant<[ + // lsl #2, lsl #1, or lsr #1. + SchedVar<IsFastImmShiftSwiftPred, [SwiftWriteP01TwoCycle]>, + SchedVar<NoSchedPred, [WriteALU]> + ]>; + def SwiftWriteALUsr : SchedWriteVariant<[ + SchedVar<IsPredicatedPred, [SwiftWriteP01ThreeCycleTwoUops]>, + SchedVar<NoSchedPred, [SwiftWriteP01TwoCycle]> + ]>; + def SwiftWriteALUSsr : SchedWriteVariant<[ + SchedVar<IsPredicatedPred, [SwiftWriteP0ThreeCycleThreeUops]>, + SchedVar<NoSchedPred, [SwiftWriteP01TwoCycle]> + ]>; + def SwiftReadAdvanceALUsr : SchedReadVariant<[ + SchedVar<IsPredicatedPred, [SchedReadAdvance<2>]>, + SchedVar<NoSchedPred, [NoReadAdvance]> + ]>; + // ADC,ADD,NEG,RSB,RSC,SBC,SUB,ADR + // AND,BIC,EOR,ORN,ORR + // CLZ,RBIT,REV,REV16,REVSH,PKH + def : WriteRes<WriteALU, [SwiftUnitP01]>; + def : SchedAlias<WriteALUsi, SwiftWriteALUsi>; + def : SchedAlias<WriteALUsr, SwiftWriteALUsr>; + def : SchedAlias<WriteALUSsr, SwiftWriteALUSsr>; + def : ReadAdvance<ReadALU, 0>; + def : SchedAlias<ReadALUsr, SwiftReadAdvanceALUsr>; + + // 4.2.5 Integer comparison + def : WriteRes<WriteCMP, [SwiftUnitP01]>; + def : WriteRes<WriteCMPsi, [SwiftUnitP01]>; + def : WriteRes<WriteCMPsr, [SwiftUnitP01]>; +} diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index f4d568c..3b8e56f 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -19,6 +19,7 @@ #include "llvm/IR/Function.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetOptions.h" #define GET_SUBTARGETINFO_TARGET_DESC #define GET_SUBTARGETINFO_CTOR @@ -42,12 +43,13 @@ StrictAlign("arm-strict-align", cl::Hidden, cl::desc("Disallow all unaligned memory accesses")); ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS) + const std::string &FS, const TargetOptions &Options) : ARMGenSubtargetInfo(TT, CPU, FS) , ARMProcFamily(Others) , stackAlignment(4) , CPUString(CPU) , TargetTriple(TT) + , Options(Options) , TargetABI(ARM_ABI_APCS) { initializeEnvironment(); resetSubtargetFeatures(CPU, FS); @@ -89,9 +91,11 @@ void ARMSubtarget::initializeEnvironment() { HasRAS = false; HasMPExtension = false; FPOnlySP = false; + HasTrustZone = false; AllowsUnalignedMem = false; Thumb2DSP = false; UseNaClTrap = false; + UnsafeFPMath = false; } void ARMSubtarget::resetSubtargetFeatures(const MachineFunction *MF) { @@ -162,6 +166,12 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) { // configuration. if (!StrictAlign && hasV6Ops() && isTargetDarwin()) AllowsUnalignedMem = true; + + // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. + uint64_t Bits = getFeatureBits(); + if ((Bits & ARM::ProcA5 || Bits & ARM::ProcA8) && // Where this matters + (Options.UnsafeFPMath || isTargetDarwin())) + UseNEONForSinglePrecisionFP = true; } /// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol. diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index 8ce22e1..038eb76 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -26,6 +26,7 @@ namespace llvm { class GlobalValue; class StringRef; +class TargetOptions; class ARMSubtarget : public ARMGenSubtargetInfo { protected: @@ -147,6 +148,9 @@ protected: /// precision. bool FPOnlySP; + /// HasTrustZone - if true, processor supports TrustZone security extensions + bool HasTrustZone; + /// AllowsUnalignedMem - If true, the subtarget allows unaligned memory /// accesses for some types. For details, see /// ARMTargetLowering::allowsUnalignedMemoryAccesses(). @@ -159,6 +163,9 @@ protected: /// NaCl TRAP instruction is generated instead of the regular TRAP. bool UseNaClTrap; + /// Target machine allowed unsafe FP math (such as use of NEON fp) + bool UnsafeFPMath; + /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -175,6 +182,9 @@ protected: /// Selected instruction itineraries (one entry per itinerary class.) InstrItineraryData InstrItins; + /// Options passed via command line that could influence the target + const TargetOptions &Options; + public: enum { isELF, isDarwin @@ -189,7 +199,7 @@ protected: /// of the specified triple. /// ARMSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS); + const std::string &FS, const TargetOptions &Options); /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size /// that still makes it profitable to inline the call. @@ -244,6 +254,7 @@ public: bool hasVMLxForwarding() const { return HasVMLxForwarding; } bool isFPBrccSlow() const { return SlowFPBrcc; } bool isFPOnlySP() const { return FPOnlySP; } + bool hasTrustZone() const { return HasTrustZone; } bool prefers32BitThumb() const { return Pref32BitThumb; } bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; } bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; } diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 3003760..42c7d2c 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -48,7 +48,7 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS), + Subtarget(TT, CPU, FS, Options), JITInfo(), InstrItins(Subtarget.getInstrItineraryData()) { // Default to soft float ABI @@ -185,8 +185,7 @@ bool ARMPassConfig::addPreSched2() { addPass(createARMLoadStoreOptimizationPass()); printAndVerify("After ARM load / store optimizer"); } - if ((DisableA15SDOptimization || !getARMSubtarget().isCortexA15()) && - getARMSubtarget().hasNEON()) + if (getARMSubtarget().hasNEON()) addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); } diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 140a8db..53ece66 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -125,6 +125,10 @@ public: unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const; unsigned getAddressComputationCost(Type *Val) const; + + unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind Op1Info = OK_AnyValue, + OperandValueKind Op2Info = OK_AnyValue) const; /// @} }; @@ -211,13 +215,21 @@ unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst, { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, - // Operations that we legalize using load/stores to the stack. - { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 16*2 + 4*4 }, - { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 16*2 + 4*3 }, - { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 8*2 + 2*4 }, - { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 8*2 + 2*3 }, - { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4*1 + 16*2 + 2*1 }, - { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2*1 + 8*2 + 1 }, + // The number of vmovl instructions for the extension. + { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, + { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, + { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, + { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, + { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, + { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, + { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, + { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, + { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, + { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, + + // Operations that we legalize using splitting. + { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, + { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, // Vector float <-> i32 conversions. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, @@ -448,3 +460,67 @@ unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, return LT.first * NEONShuffleTbl[Idx].Cost; } + +unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Op1Info, + OperandValueKind Op2Info) const { + + int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); + std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); + + const unsigned FunctionCallDivCost = 20; + const unsigned ReciprocalDivCost = 10; + static const CostTblEntry<MVT> CostTbl[] = { + // Division. + // These costs are somewhat random. Choose a cost of 20 to indicate that + // vectorizing devision (added function call) is going to be very expensive. + // Double registers types. + { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, + { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, + { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, + { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, + { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, + { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, + { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, + { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, + { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, + { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, + { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, + // Quad register types. + { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, + { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, + { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, + { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, + { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, + { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, + { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, + { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, + { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, + { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, + { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, + // Multiplication. + }; + + int Idx = -1; + + if (ST->hasNEON()) + Idx = CostTableLookup<MVT>(CostTbl, array_lengthof(CostTbl), ISDOpcode, + LT.second); + + if (Idx != -1) + return LT.first * CostTbl[Idx].Cost; + + + return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, + Op2Info); +} + diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index c897efd..114cc9e 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -610,6 +610,13 @@ public: int64_t Value = CE->getValue(); return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; } + bool isImm0_4() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return Value >= 0 && Value < 5; + } bool isImm0_1020s4() const { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); @@ -4593,20 +4600,26 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, Error(Parser.getTok().getLoc(), "unexpected token in operand"); return true; case AsmToken::Identifier: { - if (!tryParseRegisterWithWriteBack(Operands)) - return false; - int Res = tryParseShiftRegister(Operands); - if (Res == 0) // success - return false; - else if (Res == -1) // irrecoverable error - return true; - // If this is VMRS, check for the apsr_nzcv operand. - if (Mnemonic == "vmrs" && - Parser.getTok().getString().equals_lower("apsr_nzcv")) { - S = Parser.getTok().getLoc(); - Parser.Lex(); - Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); - return false; + // If we've seen a branch mnemonic, the next operand must be a label. This + // is true even if the label is a register name. So "br r1" means branch to + // label "r1". + bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; + if (!ExpectLabel) { + if (!tryParseRegisterWithWriteBack(Operands)) + return false; + int Res = tryParseShiftRegister(Operands); + if (Res == 0) // success + return false; + else if (Res == -1) // irrecoverable error + return true; + // If this is VMRS, check for the apsr_nzcv operand. + if (Mnemonic == "vmrs" && + Parser.getTok().getString().equals_lower("apsr_nzcv")) { + S = Parser.getTok().getLoc(); + Parser.Lex(); + Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); + return false; + } } // Fall though for the Identifier case that is not a register or a @@ -4739,6 +4752,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || + Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || @@ -5008,8 +5022,8 @@ static bool isDataTypeToken(StringRef Tok) { static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); } - -static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); +static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features, + unsigned VariantID); /// Parse an arm instruction mnemonic followed by its operands. bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, @@ -5020,7 +5034,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // MatchInstructionImpl(), but that's too late for aliases that include // any sort of suffix. unsigned AvailableFeatures = getAvailableFeatures(); - applyMnemonicAliases(Name, AvailableFeatures); + unsigned AssemblerDialect = getParser().getAssemblerDialect(); + applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); // First check for the ARM-specific .req directive. if (Parser.getTok().is(AsmToken::Identifier) && @@ -7607,6 +7622,11 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, "instruction variant requires ARMv6 or later"); case Match_RequiresThumb2: return Error(IDLoc, "instruction variant requires Thumb2"); + case Match_ImmRange0_4: { + SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); + if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; + return Error(ErrorLoc, "immediate operand must be in the range [0,4]"); + } case Match_ImmRange0_15: { SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index 31a3b0b..ac937f3 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -308,6 +308,8 @@ static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeImm0_4(MCInst &Inst, unsigned Insn, uint64_t Address, + const void *Decoder); static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn, @@ -1951,10 +1953,12 @@ static DecodeStatus DecodeT2CPSInstruction(MCInst &Inst, unsigned Insn, Inst.addOperand(MCOperand::CreateImm(mode)); if (iflags) S = MCDisassembler::SoftFail; } else { - // imod == '00' && M == '0' --> UNPREDICTABLE - Inst.setOpcode(ARM::t2CPS1p); - Inst.addOperand(MCOperand::CreateImm(mode)); - S = MCDisassembler::SoftFail; + // imod == '00' && M == '0' --> this is a HINT instruction + int imm = fieldFromInstruction(Insn, 0, 8); + // HINT are defined only for immediate in [0..4] + if(imm > 4) return MCDisassembler::Fail; + Inst.setOpcode(ARM::t2HINT); + Inst.addOperand(MCOperand::CreateImm(imm)); } return S; @@ -1996,9 +2000,10 @@ static DecodeStatus DecodeArmMOVTWInstruction(MCInst &Inst, unsigned Insn, imm |= (fieldFromInstruction(Insn, 16, 4) << 12); if (Inst.getOpcode() == ARM::MOVTi16) - if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder))) + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder))) return MCDisassembler::Fail; - if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder))) + + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder))) return MCDisassembler::Fail; if (!tryAddingSymbolicOperand(Address, imm, false, 4, Inst, Decoder)) @@ -3049,9 +3054,9 @@ static DecodeStatus DecodeT2BROperand(MCInst &Inst, unsigned Val, static DecodeStatus DecodeThumbCmpBROperand(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder) { - if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<7>(Val<<1) + 4, + if (!tryAddingSymbolicOperand(Address, Address + (Val<<1) + 4, true, 2, Inst, Decoder)) - Inst.addOperand(MCOperand::CreateImm(SignExtend32<7>(Val << 1))); + Inst.addOperand(MCOperand::CreateImm(Val << 1)); return MCDisassembler::Success; } @@ -3278,7 +3283,7 @@ static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; } - if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder))) + if (!Check(S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) return MCDisassembler::Fail; if (load) { @@ -3570,7 +3575,7 @@ static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn, unsigned Rn = fieldFromInstruction(Insn, 16, 4); unsigned pred = fieldFromInstruction(Insn, 28, 4); - if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder))) + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder))) return MCDisassembler::Fail; if ((Rt & 1) || Rt == 0xE || Rn == 0xF) return MCDisassembler::Fail; @@ -4496,6 +4501,15 @@ static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn, return S; } +static DecodeStatus DecodeImm0_4(MCInst &Inst, unsigned Insn, uint64_t Address, + const void *Decoder) +{ + unsigned Imm = fieldFromInstruction(Insn, 0, 3); + if (Imm > 4) return MCDisassembler::Fail; + Inst.addOperand(MCOperand::CreateImm(Imm)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler::Success; diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp index 2afb20d..3bcd083 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp @@ -490,7 +490,8 @@ void ARMInstPrinter::printAM3PostIndexOp(const MCInst *MI, unsigned Op, } void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, - raw_ostream &O) { + raw_ostream &O, + bool AlwaysPrintImm0) { const MCOperand &MO1 = MI->getOperand(Op); const MCOperand &MO2 = MI->getOperand(Op+1); const MCOperand &MO3 = MI->getOperand(Op+2); @@ -509,7 +510,7 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()); ARM_AM::AddrOpc op = ARM_AM::getAM3Op(MO3.getImm()); - if (ImmOffs || (op == ARM_AM::sub)) { + if (AlwaysPrintImm0 || ImmOffs || (op == ARM_AM::sub)) { O << ", " << markup("<imm:") << "#" @@ -520,6 +521,7 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, O << ']' << markup(">"); } +template <bool AlwaysPrintImm0> void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned Op, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(Op); @@ -535,7 +537,7 @@ void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned Op, printAM3PostIndexOp(MI, Op, O); return; } - printAM3PreOrOffsetIndexOp(MI, Op, O); + printAM3PreOrOffsetIndexOp(MI, Op, O, AlwaysPrintImm0); } void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI, @@ -593,6 +595,7 @@ void ARMInstPrinter::printLdStmModeOperand(const MCInst *MI, unsigned OpNum, O << ARM_AM::getAMSubModeStr(Mode); } +template <bool AlwaysPrintImm0> void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); @@ -608,7 +611,7 @@ void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum, unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm()); unsigned Op = ARM_AM::getAM5Op(MO2.getImm()); - if (ImmOffs || Op == ARM_AM::sub) { + if (AlwaysPrintImm0 || ImmOffs || Op == ARM_AM::sub) { O << ", " << markup("<imm:") << "#" @@ -1022,6 +1025,7 @@ void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum, ARM_AM::getSORegOffset(MO2.getImm()), UseMarkup); } +template <bool AlwaysPrintImm0> void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); @@ -1042,13 +1046,13 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, OffImm = 0; if (isSub) { O << ", " - << markup("<imm:") + << markup("<imm:") << "#-" << -OffImm << markup(">"); } - else if (OffImm > 0) { + else if (AlwaysPrintImm0 || OffImm > 0) { O << ", " - << markup("<imm:") + << markup("<imm:") << "#" << OffImm << markup(">"); } diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h index edff75d..344104e 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h @@ -47,12 +47,13 @@ public: raw_ostream &O); void printAddrMode2OffsetOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); - + template <bool AlwaysPrintImm0> void printAddrMode3Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printAddrMode3OffsetOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printAM3PostIndexOp(const MCInst *MI, unsigned Op, raw_ostream &O); - void printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,raw_ostream &O); + void printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, raw_ostream &O, + bool AlwaysPrintImm0); void printPostIdxImm8Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printPostIdxRegOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); @@ -60,6 +61,7 @@ public: raw_ostream &O); void printLdStmModeOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + template <bool AlwaysPrintImm0> void printAddrMode5Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printAddrMode6Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printAddrMode7Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); @@ -91,6 +93,7 @@ public: raw_ostream &O); void printT2SOOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + template<bool AlwaysPrintImm0> void printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum, diff --git a/lib/Target/ARM/LICENSE.TXT b/lib/Target/ARM/LICENSE.TXT index 68afea1..68afea1 100755..100644 --- a/lib/Target/ARM/LICENSE.TXT +++ b/lib/Target/ARM/LICENSE.TXT diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 418971d..6c3d247 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -13,7 +13,9 @@ // //===----------------------------------------------------------------------===// +#include "ARMRegisterInfo.h" #include "ARMUnwindOp.h" +#include "ARMUnwindOpAsm.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Twine.h" #include "llvm/MC/MCAsmBackend.h" @@ -26,6 +28,7 @@ #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCObjectStreamer.h" +#include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCStreamer.h" @@ -33,11 +36,15 @@ #include "llvm/MC/MCValue.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ELF.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; +static std::string GetAEABIUnwindPersonalityName(unsigned Index) { + assert(Index < NUM_PERSONALITY_INDEX && "Invalid personality index"); + return (Twine("__aeabi_unwind_cpp_pr") + Twine(Index)).str(); +} + namespace { /// Extend the generic ELFStreamer class so that it can emit mapping symbols at @@ -57,8 +64,9 @@ public: ARMELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS, MCCodeEmitter *Emitter, bool IsThumb) : MCELFStreamer(SK_ARMELFStreamer, Context, TAB, OS, Emitter), - IsThumb(IsThumb), MappingSymbolCounter(0), LastEMS(EMS_None), ExTab(0), - FnStart(0), Personality(0), CantUnwind(false) {} + IsThumb(IsThumb), MappingSymbolCounter(0), LastEMS(EMS_None) { + Reset(); + } ~ARMELFStreamer() {} @@ -75,14 +83,15 @@ public: virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList, bool isVector); - virtual void ChangeSection(const MCSection *Section) { + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { // We have to keep track of the mapping symbol state of any sections we // use. Each one should start off as EMS_None, which is provided as the // default constructor by DenseMap::lookup. - LastMappingSymbols[getPreviousSection()] = LastEMS; + LastMappingSymbols[getPreviousSection().first] = LastEMS; LastEMS = LastMappingSymbols.lookup(Section); - MCELFStreamer::ChangeSection(Section); + MCELFStreamer::ChangeSection(Section, Subsection); } /// This function is the one used to emit instruction data into the ELF @@ -175,7 +184,7 @@ private: MCELF::SetType(SD, ELF::STT_NOTYPE); MCELF::SetBinding(SD, ELF::STB_LOCAL); SD.setExternal(false); - Symbol->setSection(*getCurrentSection()); + Symbol->setSection(*getCurrentSection().first); const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext()); Symbol->setVariableValue(Value); @@ -194,6 +203,7 @@ private: void Reset(); void EmitPersonalityFixup(StringRef Name); + void CollectUnwindOpcodes(); void SwitchToEHSection(const char *Prefix, unsigned Type, unsigned Flags, SectionKind Kind, const MCSymbol &Fn); @@ -210,9 +220,16 @@ private: MCSymbol *ExTab; MCSymbol *FnStart; const MCSymbol *Personality; + uint32_t VFPRegSave; // Register mask for {d31-d0} + uint32_t RegSave; // Register mask for {r15-r0} + int64_t SPOffset; + uint16_t FPReg; + int64_t FPOffset; + bool UsedFP; bool CantUnwind; + UnwindOpcodeAssembler UnwindOpAsm; }; -} +} // end anonymous namespace inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix, unsigned Type, @@ -238,7 +255,7 @@ inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix, } else { EHSection = getContext().getELFSection(EHSecName, Type, Flags, Kind); } - assert(EHSection); + assert(EHSection && "Failed to get the required EH section"); // Switch to .ARM.extab or .ARM.exidx section SwitchSection(EHSection); @@ -262,10 +279,20 @@ inline void ARMELFStreamer::SwitchToExIdxSection(const MCSymbol &FnStart) { } void ARMELFStreamer::Reset() { + const MCRegisterInfo &MRI = getContext().getRegisterInfo(); + ExTab = NULL; FnStart = NULL; Personality = NULL; + VFPRegSave = 0; + RegSave = 0; + FPReg = MRI.getEncodingValue(ARM::SP); + FPOffset = 0; + SPOffset = 0; + UsedFP = false; CantUnwind = false; + + UnwindOpAsm.Reset(); } // Add the R_ARM_NONE fixup at the same position @@ -284,6 +311,18 @@ void ARMELFStreamer::EmitPersonalityFixup(StringRef Name) { MCFixup::getKindForSize(4, false))); } +void ARMELFStreamer::CollectUnwindOpcodes() { + if (UsedFP) { + UnwindOpAsm.EmitSetFP(FPReg); + UnwindOpAsm.EmitSPOffset(-FPOffset); + } else { + UnwindOpAsm.EmitSPOffset(SPOffset); + } + UnwindOpAsm.EmitVFPRegSave(VFPRegSave); + UnwindOpAsm.EmitRegSave(RegSave); + UnwindOpAsm.Finalize(); +} + void ARMELFStreamer::EmitFnStart() { assert(FnStart == 0); FnStart = getContext().CreateTempSymbol(); @@ -294,35 +333,29 @@ void ARMELFStreamer::EmitFnEnd() { assert(FnStart && ".fnstart must preceeds .fnend"); // Emit unwind opcodes if there is no .handlerdata directive - int PersonalityIndex = -1; if (!ExTab && !CantUnwind) { - // For __aeabi_unwind_cpp_pr1, we have to emit opcodes in .ARM.extab. - SwitchToExTabSection(*FnStart); - - // Create .ARM.extab label for offset in .ARM.exidx - ExTab = getContext().CreateTempSymbol(); - EmitLabel(ExTab); - - PersonalityIndex = 1; - - uint32_t Entry = 0; - uint32_t NumExtraEntryWords = 0; - Entry |= NumExtraEntryWords << 24; - Entry |= (EHT_COMPACT | PersonalityIndex) << 16; - - // TODO: This should be generated according to .save, .vsave, .setfp - // directives. Currently, we are simply generating FINISH opcode. - Entry |= UNWIND_OPCODE_FINISH << 8; - Entry |= UNWIND_OPCODE_FINISH; - - EmitIntValue(Entry, 4, 0); + CollectUnwindOpcodes(); + + unsigned PersonalityIndex = UnwindOpAsm.getPersonalityIndex(); + if (PersonalityIndex == AEABI_UNWIND_CPP_PR1 || + PersonalityIndex == AEABI_UNWIND_CPP_PR2) { + // For the __aeabi_unwind_cpp_pr1 and __aeabi_unwind_cpp_pr2, we have to + // emit the unwind opcodes in the corresponding ".ARM.extab" section, and + // then emit a reference to these unwind opcodes in the second word of + // the exception index table entry. + SwitchToExTabSection(*FnStart); + ExTab = getContext().CreateTempSymbol(); + EmitLabel(ExTab); + EmitBytes(UnwindOpAsm.data(), 0); + } } // Emit the exception index table entry SwitchToExIdxSection(*FnStart); - if (PersonalityIndex == 1) - EmitPersonalityFixup("__aeabi_unwind_cpp_pr1"); + unsigned PersonalityIndex = UnwindOpAsm.getPersonalityIndex(); + if (PersonalityIndex < NUM_PERSONALITY_INDEX) + EmitPersonalityFixup(GetAEABIUnwindPersonalityName(PersonalityIndex)); const MCSymbolRefExpr *FnStartRef = MCSymbolRefExpr::Create(FnStart, @@ -333,12 +366,22 @@ void ARMELFStreamer::EmitFnEnd() { if (CantUnwind) { EmitIntValue(EXIDX_CANTUNWIND, 4, 0); - } else { + } else if (ExTab) { + // Emit a reference to the unwind opcodes in the ".ARM.extab" section. const MCSymbolRefExpr *ExTabEntryRef = MCSymbolRefExpr::Create(ExTab, MCSymbolRefExpr::VK_ARM_PREL31, getContext()); EmitValue(ExTabEntryRef, 4, 0); + } else { + // For the __aeabi_unwind_cpp_pr0, we have to emit the unwind opcodes in + // the second word of exception index table entry. The size of the unwind + // opcodes should always be 4 bytes. + assert(PersonalityIndex == AEABI_UNWIND_CPP_PR0 && + "Compact model must use __aeabi_cpp_unwind_pr0 as personality"); + assert(UnwindOpAsm.size() == 4u && + "Unwind opcode size for __aeabi_cpp_unwind_pr0 must be equal to 4"); + EmitBytes(UnwindOpAsm.data(), 0); } // Clean exception handling frame information @@ -368,36 +411,54 @@ void ARMELFStreamer::EmitHandlerData() { EmitValue(PersonalityRef, 4, 0); // Emit unwind opcodes - uint32_t Entry = 0; - uint32_t NumExtraEntryWords = 0; - - // TODO: This should be generated according to .save, .vsave, .setfp - // directives. Currently, we are simply generating FINISH opcode. - Entry |= NumExtraEntryWords << 24; - Entry |= UNWIND_OPCODE_FINISH << 16; - Entry |= UNWIND_OPCODE_FINISH << 8; - Entry |= UNWIND_OPCODE_FINISH; - - EmitIntValue(Entry, 4, 0); + CollectUnwindOpcodes(); + EmitBytes(UnwindOpAsm.data(), 0); } void ARMELFStreamer::EmitPersonality(const MCSymbol *Per) { Personality = Per; + UnwindOpAsm.setPersonality(Per); } -void ARMELFStreamer::EmitSetFP(unsigned NewFpReg, - unsigned NewSpReg, +void ARMELFStreamer::EmitSetFP(unsigned NewFPReg, + unsigned NewSPReg, int64_t Offset) { - // TODO: Not implemented + assert(SPOffset == 0 && + "Current implementation assumes .setfp precedes .pad"); + + const MCRegisterInfo &MRI = getContext().getRegisterInfo(); + + uint16_t NewFPRegEncVal = MRI.getEncodingValue(NewFPReg); +#ifndef NDEBUG + uint16_t NewSPRegEncVal = MRI.getEncodingValue(NewSPReg); +#endif + + assert((NewSPReg == ARM::SP || NewSPRegEncVal == FPReg) && + "the operand of .setfp directive should be either $sp or $fp"); + + UsedFP = true; + FPReg = NewFPRegEncVal; + FPOffset = Offset; } void ARMELFStreamer::EmitPad(int64_t Offset) { - // TODO: Not implemented + SPOffset += Offset; } void ARMELFStreamer::EmitRegSave(const SmallVectorImpl<unsigned> &RegList, bool IsVector) { - // TODO: Not implemented + const MCRegisterInfo &MRI = getContext().getRegisterInfo(); + +#ifndef NDEBUG + unsigned Max = IsVector ? 32 : 16; +#endif + uint32_t &RegMask = IsVector ? VFPRegSave : RegSave; + + for (size_t i = 0; i < RegList.size(); ++i) { + unsigned Reg = MRI.getEncodingValue(RegList[i]); + assert(Reg < Max && "Register encoded value out of range"); + RegMask |= 1u << Reg; + } } namespace llvm { diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h b/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h index dad5576..fa4add6 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h +++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h @@ -107,6 +107,19 @@ namespace llvm { UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0 }; + /// ARM-defined Personality Routine Index + enum ARMPersonalityRoutineIndex { + // To make the exception handling table become more compact, ARM defined + // several personality routines in EHABI. There are 3 different + // personality routines in ARM EHABI currently. It is possible to have 16 + // pre-defined personality routines at most. + AEABI_UNWIND_CPP_PR0 = 0, + AEABI_UNWIND_CPP_PR1 = 1, + AEABI_UNWIND_CPP_PR2 = 2, + + NUM_PERSONALITY_INDEX + }; + } #endif // ARM_UNWIND_OP_H diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp new file mode 100644 index 0000000..191db69 --- /dev/null +++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp @@ -0,0 +1,198 @@ +//===-- ARMUnwindOpAsm.cpp - ARM Unwind Opcodes Assembler -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the unwind opcode assmebler for ARM exception handling +// table. +// +//===----------------------------------------------------------------------===// + +#include "ARMUnwindOpAsm.h" + +#include "ARMUnwindOp.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LEB128.h" + +using namespace llvm; + +void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) { + if (RegSave == 0u) + return; + + // One byte opcode to save register r14 and r11-r4 + if (RegSave & (1u << 4)) { + // The one byte opcode will always save r4, thus we can't use the one byte + // opcode when r4 is not in .save directive. + + // Compute the consecutive registers from r4 to r11. + uint32_t Range = 0; + uint32_t Mask = (1u << 4); + for (uint32_t Bit = (1u << 5); Bit < (1u << 12); Bit <<= 1) { + if ((RegSave & Bit) == 0u) + break; + ++Range; + Mask |= Bit; + } + + // Emit this opcode when the mask covers every registers. + uint32_t UnmaskedReg = RegSave & 0xfff0u & (~Mask); + if (UnmaskedReg == 0u) { + // Pop r[4 : (4 + n)] + Ops.push_back(UNWIND_OPCODE_POP_REG_RANGE_R4 | Range); + RegSave &= 0x000fu; + } else if (UnmaskedReg == (1u << 14)) { + // Pop r[14] + r[4 : (4 + n)] + Ops.push_back(UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range); + RegSave &= 0x000fu; + } + } + + // Two bytes opcode to save register r15-r4 + if ((RegSave & 0xfff0u) != 0) { + uint32_t Op = UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4); + Ops.push_back(static_cast<uint8_t>(Op >> 8)); + Ops.push_back(static_cast<uint8_t>(Op & 0xff)); + } + + // Opcode to save register r3-r0 + if ((RegSave & 0x000fu) != 0) { + uint32_t Op = UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu); + Ops.push_back(static_cast<uint8_t>(Op >> 8)); + Ops.push_back(static_cast<uint8_t>(Op & 0xff)); + } +} + +/// Emit unwind opcodes for .vsave directives +void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) { + size_t i = 32; + + while (i > 16) { + uint32_t Bit = 1u << (i - 1); + if ((VFPRegSave & Bit) == 0u) { + --i; + continue; + } + + uint32_t Range = 0; + + --i; + Bit >>= 1; + + while (i > 16 && (VFPRegSave & Bit)) { + --i; + ++Range; + Bit >>= 1; + } + + uint32_t Op = + UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 | ((i - 16) << 4) | Range; + Ops.push_back(static_cast<uint8_t>(Op >> 8)); + Ops.push_back(static_cast<uint8_t>(Op & 0xff)); + } + + while (i > 0) { + uint32_t Bit = 1u << (i - 1); + if ((VFPRegSave & Bit) == 0u) { + --i; + continue; + } + + uint32_t Range = 0; + + --i; + Bit >>= 1; + + while (i > 0 && (VFPRegSave & Bit)) { + --i; + ++Range; + Bit >>= 1; + } + + uint32_t Op = UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD | (i << 4) | Range; + Ops.push_back(static_cast<uint8_t>(Op >> 8)); + Ops.push_back(static_cast<uint8_t>(Op & 0xff)); + } +} + +/// Emit unwind opcodes for .setfp directives +void UnwindOpcodeAssembler::EmitSetFP(uint16_t FPReg) { + Ops.push_back(UNWIND_OPCODE_SET_VSP | FPReg); +} + +/// Emit unwind opcodes to update stack pointer +void UnwindOpcodeAssembler::EmitSPOffset(int64_t Offset) { + if (Offset > 0x200) { + uint8_t Buff[10]; + size_t Size = encodeULEB128((Offset - 0x204) >> 2, Buff); + Ops.push_back(UNWIND_OPCODE_INC_VSP_ULEB128); + Ops.append(Buff, Buff + Size); + } else if (Offset > 0) { + if (Offset > 0x100) { + Ops.push_back(UNWIND_OPCODE_INC_VSP | 0x3fu); + Offset -= 0x100; + } + Ops.push_back(UNWIND_OPCODE_INC_VSP | + static_cast<uint8_t>((Offset - 4) >> 2)); + } else if (Offset < 0) { + while (Offset < -0x100) { + Ops.push_back(UNWIND_OPCODE_DEC_VSP | 0x3fu); + Offset += 0x100; + } + Ops.push_back(UNWIND_OPCODE_DEC_VSP | + static_cast<uint8_t>(((-Offset) - 4) >> 2)); + } +} + +void UnwindOpcodeAssembler::AddOpcodeSizePrefix(size_t Pos) { + size_t SizeInWords = (size() + 3) / 4; + assert(SizeInWords <= 0x100u && + "Only 256 additional words are allowed for unwind opcodes"); + Ops[Pos] = static_cast<uint8_t>(SizeInWords - 1); +} + +void UnwindOpcodeAssembler::AddPersonalityIndexPrefix(size_t Pos, unsigned PI) { + assert(PI < NUM_PERSONALITY_INDEX && "Invalid personality prefix"); + Ops[Pos] = EHT_COMPACT | PI; +} + +void UnwindOpcodeAssembler::EmitFinishOpcodes() { + for (size_t i = (0x4u - (size() & 0x3u)) & 0x3u; i > 0; --i) + Ops.push_back(UNWIND_OPCODE_FINISH); +} + +void UnwindOpcodeAssembler::Finalize() { + if (HasPersonality) { + // Personality specified by .personality directive + Offset = 1; + AddOpcodeSizePrefix(1); + } else { + if (getOpcodeSize() <= 3) { + // __aeabi_unwind_cpp_pr0: [ 0x80 , OP1 , OP2 , OP3 ] + Offset = 1; + PersonalityIndex = AEABI_UNWIND_CPP_PR0; + AddPersonalityIndexPrefix(Offset, PersonalityIndex); + } else { + // __aeabi_unwind_cpp_pr1: [ 0x81 , SIZE , OP1 , OP2 , ... ] + Offset = 0; + PersonalityIndex = AEABI_UNWIND_CPP_PR1; + AddPersonalityIndexPrefix(Offset, PersonalityIndex); + AddOpcodeSizePrefix(1); + } + } + + // Emit the padding finish opcodes if the size() is not multiple of 4. + EmitFinishOpcodes(); + + // Swap the byte order + uint8_t *Ptr = Ops.begin() + Offset; + assert(size() % 4 == 0 && "Final unwind opcodes should align to 4"); + for (size_t i = 0, n = size(); i < n; i += 4) { + std::swap(Ptr[i], Ptr[i + 3]); + std::swap(Ptr[i + 1], Ptr[i + 2]); + } +} diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h new file mode 100644 index 0000000..f6ecaeb --- /dev/null +++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h @@ -0,0 +1,114 @@ +//===-- ARMUnwindOpAsm.h - ARM Unwind Opcodes Assembler ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the unwind opcode assmebler for ARM exception handling +// table. +// +//===----------------------------------------------------------------------===// + +#ifndef ARM_UNWIND_OP_ASM_H +#define ARM_UNWIND_OP_ASM_H + +#include "ARMUnwindOp.h" + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class MCSymbol; + +class UnwindOpcodeAssembler { +private: + llvm::SmallVector<uint8_t, 8> Ops; + + unsigned Offset; + unsigned PersonalityIndex; + bool HasPersonality; + + enum { + // The number of bytes to be preserved for the size and personality index + // prefix of unwind opcodes. + NUM_PRESERVED_PREFIX_BUF = 2 + }; + +public: + UnwindOpcodeAssembler() + : Ops(NUM_PRESERVED_PREFIX_BUF), Offset(NUM_PRESERVED_PREFIX_BUF), + PersonalityIndex(NUM_PERSONALITY_INDEX), HasPersonality(0) { + } + + /// Reset the unwind opcode assembler. + void Reset() { + Ops.resize(NUM_PRESERVED_PREFIX_BUF); + Offset = NUM_PRESERVED_PREFIX_BUF; + PersonalityIndex = NUM_PERSONALITY_INDEX; + HasPersonality = 0; + } + + /// Get the size of the payload (including the size byte) + size_t size() const { + return Ops.size() - Offset; + } + + /// Get the beginning of the payload + const uint8_t *begin() const { + return Ops.begin() + Offset; + } + + /// Get the payload + StringRef data() const { + return StringRef(reinterpret_cast<const char *>(begin()), size()); + } + + /// Set the personality index + void setPersonality(const MCSymbol *Per) { + HasPersonality = 1; + } + + /// Get the personality index + unsigned getPersonalityIndex() const { + return PersonalityIndex; + } + + /// Emit unwind opcodes for .save directives + void EmitRegSave(uint32_t RegSave); + + /// Emit unwind opcodes for .vsave directives + void EmitVFPRegSave(uint32_t VFPRegSave); + + /// Emit unwind opcodes for .setfp directives + void EmitSetFP(uint16_t FPReg); + + /// Emit unwind opcodes to update stack pointer + void EmitSPOffset(int64_t Offset); + + /// Finalize the unwind opcode sequence for EmitBytes() + void Finalize(); + +private: + /// Get the size of the opcodes in bytes. + size_t getOpcodeSize() const { + return Ops.size() - NUM_PRESERVED_PREFIX_BUF; + } + + /// Add the length prefix to the payload + void AddOpcodeSizePrefix(size_t Pos); + + /// Add personality index prefix in some compact format + void AddPersonalityIndexPrefix(size_t Pos, unsigned PersonalityIndex); + + /// Fill the words with finish opcode if it is not aligned + void EmitFinishOpcodes(); +}; + +} // namespace llvm + +#endif // ARM_UNWIND_OP_ASM_H diff --git a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt index e17eb4d..a7ac5ca 100644 --- a/lib/Target/ARM/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/ARM/MCTargetDesc/CMakeLists.txt @@ -8,6 +8,7 @@ add_llvm_library(LLVMARMDesc ARMMCTargetDesc.cpp ARMMachObjectWriter.cpp ARMELFObjectWriter.cpp + ARMUnwindOpAsm.cpp ) add_dependencies(LLVMARMDesc ARMCommonTableGen) diff --git a/lib/Target/ARM/README-Thumb.txt b/lib/Target/ARM/README-Thumb.txt index 463c440..a64707e 100644 --- a/lib/Target/ARM/README-Thumb.txt +++ b/lib/Target/ARM/README-Thumb.txt @@ -173,7 +173,6 @@ GCC is doing a couple of clever things here: mov r1, #1 lsl r1, r1, #8 tst r2, r1 - //===---------------------------------------------------------------------===// @@ -196,7 +195,6 @@ This is especially bad when dynamic alloca is used. The all fixed size stack objects are referenced off the frame pointer with negative offsets. See oggenc for an example. - //===---------------------------------------------------------------------===// Poor codegen test/CodeGen/ARM/select.ll f7: diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index 2c3388c..1e2a8b0 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -88,7 +88,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const { const Thumb1InstrInfo &TII = *static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo()); - unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); + unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); unsigned NumBytes = MFI->getStackSize(); const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); @@ -104,8 +104,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const { unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0; int FramePtrSpillFI = 0; - if (VARegSaveSize) - emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -VARegSaveSize, + if (ArgRegsSaveSize) + emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -ArgRegsSaveSize, MachineInstr::FrameSetup); if (!AFI->hasStackFrame()) { @@ -249,7 +249,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, const Thumb1InstrInfo &TII = *static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo()); - unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize(); + unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); int NumBytes = (int)MFI->getStackSize(); const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(); unsigned FramePtr = RegInfo->getFrameRegister(MF); @@ -300,7 +300,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, } } - if (VARegSaveSize) { + if (ArgRegsSaveSize) { // Unlike T2 and ARM mode, the T1 pop instruction cannot restore // to LR, and we can't pop the value directly to the PC since // we need to update the SP after popping the value. Therefore, we @@ -313,7 +313,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP))) .addReg(ARM::R3, RegState::Define); - emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, VARegSaveSize); + emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, ArgRegsSaveSize); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg)) @@ -376,7 +376,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB, ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); - bool isVarArg = AFI->getVarArgsRegSaveSize() > 0; + bool isVarArg = AFI->getArgRegsSaveSize() > 0; DebugLoc DL = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MF, DL, TII.get(ARM::tPOP)); AddDefaultPred(MIB); diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index 609d502..7452fb7 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -588,7 +588,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // means the stack pointer cannot be used to access the emergency spill slot // when !hasReservedCallFrame(). #ifndef NDEBUG - if (RS && FrameReg == ARM::SP && FrameIndex == RS->getScavengingFrameIndex()){ + if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ assert(MF.getTarget().getFrameLowering()->hasReservedCallFrame(MF) && "Cannot use SP to access the emergency spill slot in " "functions without a reserved call frame"); diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp index 67e8ec7..a1b48c2 100644 --- a/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -19,6 +19,7 @@ #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/MC/MCInst.h" #include "llvm/Support/CommandLine.h" @@ -126,25 +127,41 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + + MachineFunction &MF = *MBB.getParent(); + MachineFrameInfo &MFI = *MF.getFrameInfo(); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), + MachineMemOperand::MOStore, + MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { - DebugLoc DL; - if (I != MBB.end()) DL = I->getDebugLoc(); - - MachineFunction &MF = *MBB.getParent(); - MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), - MachineMemOperand::MOStore, - MFI.getObjectSize(FI), - MFI.getObjectAlignment(FI)); AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); return; } + if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { + // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for + // gsub_0, but needs an extra constraint for gsub_1 (which could be sp + // otherwise). + MachineRegisterInfo *MRI = &MF.getRegInfo(); + MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); + + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); + AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); + AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); + MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO); + AddDefaultPred(MIB); + return; + } + ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); } @@ -153,24 +170,42 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { + MachineFunction &MF = *MBB.getParent(); + MachineFrameInfo &MFI = *MF.getFrameInfo(); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), + MachineMemOperand::MOLoad, + MFI.getObjectSize(FI), + MFI.getObjectAlignment(FI)); + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { - DebugLoc DL; - if (I != MBB.end()) DL = I->getDebugLoc(); - - MachineFunction &MF = *MBB.getParent(); - MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), - MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), - MFI.getObjectAlignment(FI)); AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); return; } + if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { + // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for + // gsub_0, but needs an extra constraint for gsub_1 (which could be sp + // otherwise). + MachineRegisterInfo *MRI = &MF.getRegInfo(); + MRI->constrainRegClass(DestReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); + + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); + AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); + AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); + MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO); + AddDefaultPred(MIB); + + if (TargetRegisterInfo::isPhysicalRegister(DestReg)) + MIB.addReg(DestReg, RegState::ImplicitDefine); + return; + } + ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); } @@ -514,6 +549,15 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Offset = -Offset; isSub = true; } + } else if (AddrMode == ARMII::AddrModeT2_i8s4) { + Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; + NumBits = 8; + // MCInst operand has already scaled value. + Scale = 1; + if (Offset < 0) { + isSub = true; + Offset = -Offset; + } } else { llvm_unreachable("Unsupported addressing mode!"); } diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index 567bc05..4795aae 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -15,6 +15,7 @@ #include "MCTargetDesc/ARMAddressingModes.h" #include "Thumb2InstrInfo.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" @@ -79,11 +80,8 @@ namespace { { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, - // FIXME: tMOVi8 and tMVN also partially update CPSR but they are less - // likely to cause issue in the loop. As a size / performance workaround, - // they are not marked as such. - { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0,0,0 }, - { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0,1,0 }, + { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, + { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, // FIXME: Do we need the 16-bit 'S' variant? { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, @@ -149,8 +147,7 @@ namespace { /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable. DenseMap<unsigned, unsigned> ReduceOpcodeMap; - bool canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use, - bool IsSelfLoop); + bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop); bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, bool is2Addr, ARMCC::CondCodes Pred, @@ -160,33 +157,46 @@ namespace { const ReduceEntry &Entry); bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, - const ReduceEntry &Entry, bool LiveCPSR, - MachineInstr *CPSRDef, bool IsSelfLoop); + const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address /// instruction. bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, - const ReduceEntry &Entry, - bool LiveCPSR, MachineInstr *CPSRDef, + const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit /// non-two-address instruction. bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, - const ReduceEntry &Entry, - bool LiveCPSR, MachineInstr *CPSRDef, + const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); /// ReduceMI - Attempt to reduce MI, return true on success. bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, - bool LiveCPSR, MachineInstr *CPSRDef, - bool IsSelfLoop); + bool LiveCPSR, bool IsSelfLoop); /// ReduceMBB - Reduce width of instructions in the specified basic block. bool ReduceMBB(MachineBasicBlock &MBB); bool OptimizeSize; bool MinimizeSize; + + // Last instruction to define CPSR in the current block. + MachineInstr *CPSRDef; + // Was CPSR last defined by a high latency instruction? + // When CPSRDef is null, this refers to CPSR defs in predecessors. + bool HighLatencyCPSR; + + struct MBBInfo { + // The flags leaving this block have high latency. + bool HighLatencyCPSR; + // Has this block been visited yet? + bool Visited; + + MBBInfo() : HighLatencyCPSR(false), Visited(false) {} + }; + + SmallVector<MBBInfo, 8> BlockInfo; }; char Thumb2SizeReduce::ID = 0; } @@ -207,6 +217,16 @@ static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { return false; } +// Check for a likely high-latency flag def. +static bool isHighLatencyCPSR(MachineInstr *Def) { + switch(Def->getOpcode()) { + case ARM::FMSTAT: + case ARM::tMUL: + return true; + } + return false; +} + /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations, /// the 's' 16-bit instruction partially update CPSR. Abort the /// transformation to avoid adding false dependency on last CPSR setting @@ -225,20 +245,19 @@ static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { /// In this case it would have been ok to narrow the mul.w to muls since there /// are indirect RAW dependency between the muls and the mul.w bool -Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use, - bool FirstInSelfLoop) { +Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { // Disable the check for -Oz (aka OptimizeForSizeHarder). if (MinimizeSize || !STI->avoidCPSRPartialUpdate()) return false; - if (!Def) + if (!CPSRDef) // If this BB loops back to itself, conservatively avoid narrowing the // first instruction that does partial flag update. - return FirstInSelfLoop; + return HighLatencyCPSR || FirstInSelfLoop; SmallSet<unsigned, 2> Defs; - for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = Def->getOperand(i); + for (unsigned i = 0, e = CPSRDef->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = CPSRDef->getOperand(i); if (!MO.isReg() || MO.isUndef() || MO.isUse()) continue; unsigned Reg = MO.getReg(); @@ -256,6 +275,16 @@ Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use, return false; } + // If the current CPSR has high latency, try to avoid the false dependency. + if (HighLatencyCPSR) + return true; + + // tMOVi8 usually doesn't start long dependency chains, and there are a lot + // of them, so always shrink them when CPSR doesn't have high latency. + if (Use->getOpcode() == ARM::t2MOVi || + Use->getOpcode() == ARM::t2MOVi16) + return false; + // No read-after-write dependency. The narrowing will add false dependency. return true; } @@ -498,16 +527,15 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, bool Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, const ReduceEntry &Entry, - bool LiveCPSR, MachineInstr *CPSRDef, - bool IsSelfLoop) { + bool LiveCPSR, bool IsSelfLoop) { unsigned Opc = MI->getOpcode(); if (Opc == ARM::t2ADDri) { // If the source register is SP, try to reduce to tADDrSPi, otherwise // it's a normal reduce. if (MI->getOperand(1).getReg() != ARM::SP) { - if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) + if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) return true; - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); } // Try to reduce to tADDrSPi. unsigned Imm = MI->getOperand(2).getImm(); @@ -557,12 +585,12 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, switch (Opc) { default: break; case ARM::t2ADDSri: { - if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) + if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) return true; // fallthrough } case ARM::t2ADDSrr: - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); } } break; @@ -574,13 +602,13 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, case ARM::t2UXTB: case ARM::t2UXTH: if (MI->getOperand(2).getImm() == 0) - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); break; case ARM::t2MOVi16: // Can convert only 'pure' immediate operands, not immediates obtained as // globals' addresses. if (MI->getOperand(1).isImm()) - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); break; case ARM::t2CMPrr: { // Try to reduce to the lo-reg only version first. Why there are two @@ -590,9 +618,9 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, // source insn opcode. So for now, we hack a local entry record to use. static const ReduceEntry NarrowEntry = { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 }; - if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, CPSRDef, IsSelfLoop)) + if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop)) return true; - return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); } } return false; @@ -601,8 +629,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, bool Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, const ReduceEntry &Entry, - bool LiveCPSR, MachineInstr *CPSRDef, - bool IsSelfLoop) { + bool LiveCPSR, bool IsSelfLoop) { if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) return false; @@ -683,7 +710,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, // Avoid adding a false dependency on partial flag update by some 16-bit // instructions which has the 's' bit set. if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && - canAddPseudoFlagDep(CPSRDef, MI, IsSelfLoop)) + canAddPseudoFlagDep(MI, IsSelfLoop)) return false; // Add the 16-bit instruction. @@ -720,8 +747,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, bool Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, const ReduceEntry &Entry, - bool LiveCPSR, MachineInstr *CPSRDef, - bool IsSelfLoop) { + bool LiveCPSR, bool IsSelfLoop) { if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) return false; @@ -780,7 +806,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, // Avoid adding a false dependency on partial flag update by some 16-bit // instructions which has the 's' bit set. if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && - canAddPseudoFlagDep(CPSRDef, MI, IsSelfLoop)) + canAddPseudoFlagDep(MI, IsSelfLoop)) return false; // Add the 16-bit instruction. @@ -865,8 +891,7 @@ static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) { } bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, - bool LiveCPSR, MachineInstr *CPSRDef, - bool IsSelfLoop) { + bool LiveCPSR, bool IsSelfLoop) { unsigned Opcode = MI->getOpcode(); DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode); if (OPI == ReduceOpcodeMap.end()) @@ -875,16 +900,16 @@ bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, // Don't attempt normal reductions on "special" cases for now. if (Entry.Special) - return ReduceSpecial(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop); + return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop); // Try to transform to a 16-bit two-address instruction. if (Entry.NarrowOpc2 && - ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) + ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) return true; // Try to transform to a 16-bit non-two-address instruction. if (Entry.NarrowOpc1 && - ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) + ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) return true; return false; @@ -895,9 +920,25 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { // Yes, CPSR could be livein. bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); - MachineInstr *CPSRDef = 0; MachineInstr *BundleMI = 0; + CPSRDef = 0; + HighLatencyCPSR = false; + + // Check predecessors for the latest CPSRDef. + for (MachineBasicBlock::pred_iterator + I = MBB.pred_begin(), E = MBB.pred_end(); I != E; ++I) { + const MBBInfo &PInfo = BlockInfo[(*I)->getNumber()]; + if (!PInfo.Visited) { + // Since blocks are visited in RPO, this must be a back-edge. + continue; + } + if (PInfo.HighLatencyCPSR) { + HighLatencyCPSR = true; + break; + } + } + // If this BB loops back to itself, conservatively avoid narrowing the // first instruction that does partial flag update. bool IsSelfLoop = MBB.isSuccessor(&MBB); @@ -911,13 +952,15 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { BundleMI = MI; continue; } + if (MI->isDebugValue()) + continue; LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); // Does NextMII belong to the same bundle as MI? bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred(); - if (ReduceMI(MBB, MI, LiveCPSR, CPSRDef, IsSelfLoop)) { + if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { Modified = true; MachineBasicBlock::instr_iterator I = prior(NextMII); MI = &*I; @@ -944,14 +987,19 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { if (MI->isCall()) { // Calls don't really set CPSR. CPSRDef = 0; + HighLatencyCPSR = false; IsSelfLoop = false; } else if (DefCPSR) { // This is the last CPSR defining instruction. CPSRDef = MI; + HighLatencyCPSR = isHighLatencyCPSR(CPSRDef); IsSelfLoop = false; } } + MBBInfo &Info = BlockInfo[MBB.getNumber()]; + Info.HighLatencyCPSR = HighLatencyCPSR; + Info.Visited = true; return Modified; } @@ -967,9 +1015,16 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); + BlockInfo.clear(); + BlockInfo.resize(MF.getNumBlockIDs()); + + // Visit blocks in reverse post-order so LastCPSRDef is known for all + // predecessors. + ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); bool Modified = false; - for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) - Modified |= ReduceMBB(*I); + for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator + I = RPOT.begin(), E = RPOT.end(); I != E; ++I) + Modified |= ReduceMBB(**I); return Modified; } diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index 604abf9..3e69098 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -518,7 +518,6 @@ void CppWriter::printAttributes(const AttributeSet &PAL, attrs.removeAttribute(Attribute::StackAlignment); } - assert(!attrs.hasAttributes() && "Unhandled attribute!"); Out << "PAS = AttributeSet::get(mod->getContext(), "; if (index == ~0U) Out << "~0U,"; diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td index 8a5ee40..af1c56b 100644 --- a/lib/Target/Hexagon/Hexagon.td +++ b/lib/Target/Hexagon/Hexagon.td @@ -89,7 +89,7 @@ def getPredOpcode : InstrMapping { // def getPredNewOpcode : InstrMapping { let FilterClass = "PredNewRel"; - let RowFields = ["BaseOpcode", "PredSense", "isNVStore"]; + let RowFields = ["BaseOpcode", "PredSense", "isNVStore", "isBrTaken"]; let ColFields = ["PNewValue"]; let KeyCol = [""]; let ValueCols = [["new"]]; diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp index d4078ad..b6022ca 100644 --- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp +++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp @@ -52,8 +52,8 @@ private: char HexagonCFGOptimizer::ID = 0; static bool IsConditionalBranch(int Opc) { - return (Opc == Hexagon::JMP_c) || (Opc == Hexagon::JMP_cNot) - || (Opc == Hexagon::JMP_cdnPt) || (Opc == Hexagon::JMP_cdnNotPt); + return (Opc == Hexagon::JMP_t) || (Opc == Hexagon::JMP_f) + || (Opc == Hexagon::JMP_tnew_t) || (Opc == Hexagon::JMP_fnew_t); } @@ -68,20 +68,20 @@ HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI, const HexagonInstrInfo *QII = QTM.getInstrInfo(); int NewOpcode = 0; switch(MI->getOpcode()) { - case Hexagon::JMP_c: - NewOpcode = Hexagon::JMP_cNot; + case Hexagon::JMP_t: + NewOpcode = Hexagon::JMP_f; break; - case Hexagon::JMP_cNot: - NewOpcode = Hexagon::JMP_c; + case Hexagon::JMP_f: + NewOpcode = Hexagon::JMP_t; break; - case Hexagon::JMP_cdnPt: - NewOpcode = Hexagon::JMP_cdnNotPt; + case Hexagon::JMP_tnew_t: + NewOpcode = Hexagon::JMP_fnew_t; break; - case Hexagon::JMP_cdnNotPt: - NewOpcode = Hexagon::JMP_cdnPt; + case Hexagon::JMP_fnew_t: + NewOpcode = Hexagon::JMP_tnew_t; break; default: @@ -156,8 +156,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) { // The target of the unconditional branch must be JumpAroundTarget. // TODO: If not, we should not invert the unconditional branch. MachineBasicBlock* CondBranchTarget = NULL; - if ((MI->getOpcode() == Hexagon::JMP_c) || - (MI->getOpcode() == Hexagon::JMP_cNot)) { + if ((MI->getOpcode() == Hexagon::JMP_t) || + (MI->getOpcode() == Hexagon::JMP_f)) { CondBranchTarget = MI->getOperand(1).getMBB(); } diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index d6a9329..de993ee8 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -189,7 +189,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF, // Replace 'jumpr r31' instruction with dealloc_return for V4 and higher // versions. - if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPR + if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) { // Remove jumpr node. MBB.erase(MBBI); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 62aed13..d002788 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -541,12 +541,6 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, case Hexagon::CMPEQrr: Cmp = !Negated ? Comparison::EQ : Comparison::NE; break; - case Hexagon::CMPLTrr: - Cmp = !Negated ? Comparison::LTs : Comparison::GEs; - break; - case Hexagon::CMPLTUrr: - Cmp = !Negated ? Comparison::LTu : Comparison::GEu; - break; case Hexagon::CMPGTUri: case Hexagon::CMPGTUrr: Cmp = !Negated ? Comparison::GTu : Comparison::LEu; @@ -701,7 +695,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // If the induction variable bump is not a power of 2, quit. // Othwerise we'd need a general integer division. - if (!isPowerOf2_64(abs(IVBump))) + if (!isPowerOf2_64(abs64(IVBump))) return 0; MachineBasicBlock *PH = Loop->getLoopPreheader(); @@ -1125,8 +1119,8 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) { // The loop ends with either: // - a conditional branch followed by an unconditional branch, or // - a conditional branch to the loop start. - if (LastI->getOpcode() == Hexagon::JMP_c || - LastI->getOpcode() == Hexagon::JMP_cNot) { + if (LastI->getOpcode() == Hexagon::JMP_t || + LastI->getOpcode() == Hexagon::JMP_f) { // Delete one and change/add an uncond. branch to out of the loop. MachineBasicBlock *BranchTarget = LastI->getOperand(1).getMBB(); LastI = LastMBB->erase(LastI); @@ -1430,7 +1424,6 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( return 0; typedef MachineBasicBlock::instr_iterator instr_iterator; - typedef MachineBasicBlock::pred_iterator pred_iterator; // Verify that all existing predecessors have analyzable branches // (or no branches at all). diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 3a1c48b..ba6c100 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -113,6 +113,46 @@ public: SDNode *SelectAdd(SDNode *N); bool isConstExtProfitable(SDNode *N) const; +// XformMskToBitPosU5Imm - Returns the bit position which +// the single bit 32 bit mask represents. +// Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU5Imm(uint32_t Imm) { + int32_t bitPos; + bitPos = Log2_32(Imm); + assert(bitPos >= 0 && bitPos < 32 && + "Constant out of range for 32 BitPos Memops"); + return CurDAG->getTargetConstant(bitPos, MVT::i32); +} + +// XformMskToBitPosU4Imm - Returns the bit position which the single bit 16 bit +// mask represents. Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU4Imm(uint16_t Imm) { + return XformMskToBitPosU5Imm(Imm); +} + +// XformMskToBitPosU3Imm - Returns the bit position which the single bit 8 bit +// mask represents. Used in Clr and Set bit immediate memops. +SDValue XformMskToBitPosU3Imm(uint8_t Imm) { + return XformMskToBitPosU5Imm(Imm); +} + +// Return true if there is exactly one bit set in V, i.e., if V is one of the +// following integers: 2^0, 2^1, ..., 2^31. +bool ImmIsSingleBit(uint32_t v) const { + uint32_t c = CountPopulation_64(v); + // Only return true if we counted 1 bit. + return c == 1; +} + +// XformM5ToU5Imm - Return a target constant with the specified value, of type +// i32 where the negative literal is transformed into a positive literal for +// use in -= memops. +inline SDValue XformM5ToU5Imm(signed Imm) { + assert( (Imm >= -31 && Imm <= -1) && "Constant out of range for Memops"); + return CurDAG->getTargetConstant( - Imm, MVT::i32); +} + + // XformU7ToU7M1Imm - Return a target constant decremented by 1, in range // [1..128], used in cmpb.gtu instructions. inline SDValue XformU7ToU7M1Imm(signed Imm) { @@ -120,6 +160,17 @@ inline SDValue XformU7ToU7M1Imm(signed Imm) { return CurDAG->getTargetConstant(Imm - 1, MVT::i8); } +// XformS8ToS8M1Imm - Return a target constant decremented by 1. +inline SDValue XformSToSM1Imm(signed Imm) { + return CurDAG->getTargetConstant(Imm - 1, MVT::i32); +} + +// XformU8ToU8M1Imm - Return a target constant decremented by 1. +inline SDValue XformUToUM1Imm(unsigned Imm) { + assert((Imm >= 1) && "Cannot decrement unsigned int less than 1"); + return CurDAG->getTargetConstant(Imm - 1, MVT::i32); +} + // Include the pieces autogenerated from the target description. #include "HexagonGenDAGISel.inc" }; @@ -657,7 +708,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { // Build post increment store. SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32, - MVT::Other, Ops, 4); + MVT::Other, Ops); MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = ST->getMemOperand(); cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1); @@ -683,8 +734,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { // Build regular store. SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32); - SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops, - 4); + SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); // Build splitted incriment instruction. SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32, Base, @@ -740,7 +790,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, Value, Chain}; // build indexed store SDNode* Result = CurDAG->getMachineNode(Opcode, dl, - MVT::Other, Ops, 4); + MVT::Other, Ops); MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = ST->getMemOperand(); cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1); @@ -1190,8 +1240,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { } EVT ReturnValueVT = N->getValueType(0); SDNode *Result = CurDAG->getMachineNode(IntrinsicWithPred, dl, - ReturnValueVT, - Ops.data(), Ops.size()); + ReturnValueVT, Ops); ReplaceUses(N, Result); return Result; } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 0a8b1af..0e5b8dc 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1002,14 +1002,6 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { return FrameAddr; } - -SDValue HexagonTargetLowering::LowerMEMBARRIER(SDValue Op, - SelectionDAG& DAG) const { - DebugLoc dl = Op.getDebugLoc(); - return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0)); -} - - SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { DebugLoc dl = Op.getDebugLoc(); @@ -1361,7 +1353,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine } - setOperationAction(ISD::BRIND, MVT::Other, Expand); if (EmitJumpTables) { setOperationAction(ISD::BR_JT, MVT::Other, Custom); } else { @@ -1370,12 +1361,13 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine // Increase jump tables cutover to 5, was 4. setMinimumJumpTableEntries(5); + setOperationAction(ISD::BR_CC, MVT::Other, Expand); setOperationAction(ISD::BR_CC, MVT::f32, Expand); setOperationAction(ISD::BR_CC, MVT::f64, Expand); setOperationAction(ISD::BR_CC, MVT::i1, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Expand); + setOperationAction(ISD::BR_CC, MVT::i64, Expand); - setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); setOperationAction(ISD::FSIN , MVT::f64, Expand); @@ -1442,7 +1434,7 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); - setOperationAction(ISD::EH_RETURN, MVT::Other, Expand); + setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); if (TM.getSubtargetImpl()->isSubtargetV2()) { setExceptionPointerRegister(Hexagon::R20); @@ -1497,6 +1489,7 @@ HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; + case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN"; } } @@ -1518,16 +1511,43 @@ bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { } SDValue +HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + SDValue Offset = Op.getOperand(1); + SDValue Handler = Op.getOperand(2); + DebugLoc dl = Op.getDebugLoc(); + + // Mark function as containing a call to EH_RETURN. + HexagonMachineFunctionInfo *FuncInfo = + DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); + FuncInfo->setHasEHReturn(); + + unsigned OffsetReg = Hexagon::R28; + + SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), + DAG.getRegister(Hexagon::R30, getPointerTy()), + DAG.getIntPtrConstant(4)); + Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), + false, false, 0); + Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset); + + // Not needed we already use it as explict input to EH_RETURN. + // MF.getRegInfo().addLiveOut(OffsetReg); + + return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain); +} + +SDValue HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); + case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); // Frame & Return address. Currently unimplemented. case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for Hexagon."); - case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG); case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 3279cc6..bb1acc1 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -62,7 +62,8 @@ namespace llvm { WrapperShuffEH, WrapperShuffOB, WrapperShuffOH, - TC_RETURN + TC_RETURN, + EH_RETURN }; } @@ -101,6 +102,7 @@ namespace llvm { SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, @@ -122,7 +124,6 @@ namespace llvm { SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const; SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 96a252e..e0beab0 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -23,6 +23,7 @@ #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #define GET_INSTRINFO_CTOR #define GET_INSTRMAP_INFO @@ -118,16 +119,16 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, DebugLoc DL) const{ int BOpc = Hexagon::JMP; - int BccOpc = Hexagon::JMP_c; + int BccOpc = Hexagon::JMP_t; assert(TBB && "InsertBranch must not be told to insert a fallthrough"); int regPos = 0; // Check if ReverseBranchCondition has asked to reverse this branch // If we want to reverse the branch an odd number of times, we want - // JMP_cNot. + // JMP_f. if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) { - BccOpc = Hexagon::JMP_cNot; + BccOpc = Hexagon::JMP_f; regPos = 1; } @@ -174,8 +175,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, FBB = NULL; // If the block has no terminators, it just falls into the block after it. - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) + MachineBasicBlock::instr_iterator I = MBB.instr_end(); + if (I == MBB.instr_begin()) return false; // A basic block may looks like this: @@ -194,13 +195,24 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, --I; if (I->isEHLabel()) return true; - } while (I != MBB.begin()); + } while (I != MBB.instr_begin()); - I = MBB.end(); + I = MBB.instr_end(); --I; while (I->isDebugValue()) { - if (I == MBB.begin()) + if (I == MBB.instr_begin()) + return false; + --I; + } + + // Delete the JMP if it's equivalent to a fall-through. + if (AllowModify && I->getOpcode() == Hexagon::JMP && + MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { + DEBUG(dbgs()<< "\nErasing the jump to successor block\n";); + I->eraseFromParent(); + I = MBB.instr_end(); + if (I == MBB.instr_begin()) return false; --I; } @@ -209,23 +221,42 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // Get the last instruction in the block. MachineInstr *LastInst = I; + MachineInstr *SecondLastInst = NULL; + // Find one more terminator if present. + do { + if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) { + if (!SecondLastInst) + SecondLastInst = I; + else + // This is a third branch. + return true; + } + if (I == MBB.instr_begin()) + break; + --I; + } while(I); + + int LastOpcode = LastInst->getOpcode(); + + bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode); + bool LastOpcodeHasNot = PredOpcodeHasNot(LastOpcode); // If there is only one terminator instruction, process it. - if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { - if (LastInst->getOpcode() == Hexagon::JMP) { + if (LastInst && !SecondLastInst) { + if (LastOpcode == Hexagon::JMP) { TBB = LastInst->getOperand(0).getMBB(); return false; } - if (LastInst->getOpcode() == Hexagon::JMP_c) { - // Block ends with fall-through true condbranch. - TBB = LastInst->getOperand(1).getMBB(); + if (LastOpcode == Hexagon::ENDLOOP0) { + TBB = LastInst->getOperand(0).getMBB(); Cond.push_back(LastInst->getOperand(0)); return false; } - if (LastInst->getOpcode() == Hexagon::JMP_cNot) { - // Block ends with fall-through false condbranch. + if (LastOpcodeHasJMP_c) { TBB = LastInst->getOperand(1).getMBB(); - Cond.push_back(MachineOperand::CreateImm(0)); + if (LastOpcodeHasNot) { + Cond.push_back(MachineOperand::CreateImm(0)); + } Cond.push_back(LastInst->getOperand(0)); return false; } @@ -233,29 +264,14 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, return true; } - // Get the instruction before it if it's a terminator. - MachineInstr *SecondLastInst = I; - - // If there are three terminators, we don't know what sort of block this is. - if (SecondLastInst && I != MBB.begin() && - isUnpredicatedTerminator(--I)) - return true; - - // If the block ends with Hexagon::BRCOND and Hexagon:JMP, handle it. - if (((SecondLastInst->getOpcode() == Hexagon::BRCOND) || - (SecondLastInst->getOpcode() == Hexagon::JMP_c)) && - LastInst->getOpcode() == Hexagon::JMP) { - TBB = SecondLastInst->getOperand(1).getMBB(); - Cond.push_back(SecondLastInst->getOperand(0)); - FBB = LastInst->getOperand(0).getMBB(); - return false; - } + int SecLastOpcode = SecondLastInst->getOpcode(); - // If the block ends with Hexagon::JMP_cNot and Hexagon:JMP, handle it. - if ((SecondLastInst->getOpcode() == Hexagon::JMP_cNot) && - LastInst->getOpcode() == Hexagon::JMP) { + bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode); + bool SecLastOpcodeHasNot = PredOpcodeHasNot(SecLastOpcode); + if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::JMP)) { TBB = SecondLastInst->getOperand(1).getMBB(); - Cond.push_back(MachineOperand::CreateImm(0)); + if (SecLastOpcodeHasNot) + Cond.push_back(MachineOperand::CreateImm(0)); Cond.push_back(SecondLastInst->getOperand(0)); FBB = LastInst->getOperand(0).getMBB(); return false; @@ -263,8 +279,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // If the block ends with two Hexagon:JMPs, handle it. The second one is not // executed, so remove it. - if (SecondLastInst->getOpcode() == Hexagon::JMP && - LastInst->getOpcode() == Hexagon::JMP) { + if (SecLastOpcode == Hexagon::JMP && LastOpcode == Hexagon::JMP) { TBB = SecondLastInst->getOperand(0).getMBB(); I = LastInst; if (AllowModify) @@ -272,6 +287,15 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, return false; } + // If the block ends with an ENDLOOP, and JMP, handle it. + if (SecLastOpcode == Hexagon::ENDLOOP0 && + LastOpcode == Hexagon::JMP) { + TBB = SecondLastInst->getOperand(0).getMBB(); + Cond.push_back(SecondLastInst->getOperand(0)); + FBB = LastInst->getOperand(0).getMBB(); + return false; + } + // Otherwise, can't handle this. return true; } @@ -279,8 +303,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { int BOpc = Hexagon::JMP; - int BccOpc = Hexagon::JMP_c; - int BccOpcNot = Hexagon::JMP_cNot; + int BccOpc = Hexagon::JMP_t; + int BccOpcNot = Hexagon::JMP_f; MachineBasicBlock::iterator I = MBB.end(); if (I == MBB.begin()) return 0; @@ -325,8 +349,6 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI, case Hexagon::CMPGTUrr: case Hexagon::CMPGTri: case Hexagon::CMPGTrr: - case Hexagon::CMPLTUrr: - case Hexagon::CMPLTrr: SrcReg = MI->getOperand(1).getReg(); Mask = ~0; break; @@ -366,8 +388,6 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI, case Hexagon::CMPhEQrr_xor_V4: case Hexagon::CMPhGTUrr_V4: case Hexagon::CMPhGTrr_shl_V4: - case Hexagon::CMPLTUrr: - case Hexagon::CMPLTrr: SrcReg2 = MI->getOperand(2).getReg(); return true; @@ -537,6 +557,15 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, return(0); } +MachineInstr* +HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const { + MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE)) + .addImm(0).addImm(Offset).addMetadata(MDPtr); + return &*MIB; +} unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { @@ -737,11 +766,6 @@ bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { case Hexagon::STrib_abs_cdnPt_nv_V4: case Hexagon::STrib_abs_cNotPt_nv_V4: case Hexagon::STrib_abs_cdnNotPt_nv_V4: - case Hexagon::STrib_imm_abs_nv_V4: - case Hexagon::STrib_imm_abs_cPt_nv_V4: - case Hexagon::STrib_imm_abs_cdnPt_nv_V4: - case Hexagon::STrib_imm_abs_cNotPt_nv_V4: - case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4: // Store Halfword case Hexagon::STrih_nv_V4: @@ -775,11 +799,6 @@ bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { case Hexagon::STrih_abs_cdnPt_nv_V4: case Hexagon::STrih_abs_cNotPt_nv_V4: case Hexagon::STrih_abs_cdnNotPt_nv_V4: - case Hexagon::STrih_imm_abs_nv_V4: - case Hexagon::STrih_imm_abs_cPt_nv_V4: - case Hexagon::STrih_imm_abs_cdnPt_nv_V4: - case Hexagon::STrih_imm_abs_cNotPt_nv_V4: - case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4: // Store Word case Hexagon::STriw_nv_V4: @@ -813,11 +832,6 @@ bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { case Hexagon::STriw_abs_cdnPt_nv_V4: case Hexagon::STriw_abs_cNotPt_nv_V4: case Hexagon::STriw_abs_cdnNotPt_nv_V4: - case Hexagon::STriw_imm_abs_nv_V4: - case Hexagon::STriw_imm_abs_cPt_nv_V4: - case Hexagon::STriw_imm_abs_cdnPt_nv_V4: - case Hexagon::STriw_imm_abs_cNotPt_nv_V4: - case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4: return true; } } @@ -994,9 +1008,6 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const { case Hexagon::ZXTB: case Hexagon::ZXTH: return Subtarget.hasV4TOps(); - - case Hexagon::JMPR: - return false; } return true; @@ -1033,10 +1044,10 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { case Hexagon::TFRI_cNotPt: return Hexagon::TFRI_cPt; - case Hexagon::JMP_c: - return Hexagon::JMP_cNot; - case Hexagon::JMP_cNot: - return Hexagon::JMP_c; + case Hexagon::JMP_t: + return Hexagon::JMP_f; + case Hexagon::JMP_f: + return Hexagon::JMP_t; case Hexagon::ADD_ri_cPt: return Hexagon::ADD_ri_cNotPt; @@ -1104,10 +1115,10 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { return Hexagon::ZXTH_cPt_V4; - case Hexagon::JMPR_cPt: - return Hexagon::JMPR_cNotPt; - case Hexagon::JMPR_cNotPt: - return Hexagon::JMPR_cPt; + case Hexagon::JMPR_t: + return Hexagon::JMPR_f; + case Hexagon::JMPR_f: + return Hexagon::JMPR_t; // V4 indexed+scaled load. case Hexagon::LDrid_indexed_shl_cPt_V4: @@ -1490,8 +1501,8 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { return !invertPredicate ? Hexagon::TFRI_cPt : Hexagon::TFRI_cNotPt; case Hexagon::JMP: - return !invertPredicate ? Hexagon::JMP_c : - Hexagon::JMP_cNot; + return !invertPredicate ? Hexagon::JMP_t : + Hexagon::JMP_f; case Hexagon::JMP_EQrrPt_nv_V4: return !invertPredicate ? Hexagon::JMP_EQrrPt_nv_V4 : Hexagon::JMP_EQrrNotPt_nv_V4; @@ -1521,8 +1532,8 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { Hexagon::ZXTH_cNotPt_V4; case Hexagon::JMPR: - return !invertPredicate ? Hexagon::JMPR_cPt : - Hexagon::JMPR_cNotPt; + return !invertPredicate ? Hexagon::JMPR_t : + Hexagon::JMPR_f; // V4 indexed+scaled load. case Hexagon::LDrid_indexed_shl_V4: @@ -1821,11 +1832,15 @@ PredicateInstruction(MachineInstr *MI, // It is better to have an assert here to check this. But I don't know how // to write this assert because findFirstPredOperandIdx() would return -1 if (oper < -1) oper = -1; + MI->getOperand(oper+1).ChangeToRegister(PredMO.getReg(), PredMO.isDef(), - PredMO.isImplicit(), PredMO.isKill(), + PredMO.isImplicit(), false, PredMO.isDead(), PredMO.isUndef(), PredMO.isDebug()); + MachineRegisterInfo &RegInfo = MI->getParent()->getParent()->getRegInfo(); + RegInfo.clearKillFlags(PredMO.getReg()); + if (hasGAOpnd) { unsigned int i; @@ -1881,6 +1896,13 @@ bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const { return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask); } +bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const { + const uint64_t F = MI->getDesc().TSFlags; + + assert(isPredicated(MI)); + return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask); +} + bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const { @@ -1991,46 +2013,28 @@ isValidOffset(const int Opcode, const int Offset) const { return (Offset >= Hexagon_ADDI_OFFSET_MIN) && (Offset <= Hexagon_ADDI_OFFSET_MAX); - case Hexagon::MEMw_ADDi_indexed_MEM_V4 : - case Hexagon::MEMw_SUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDr_indexed_MEM_V4 : - case Hexagon::MEMw_SUBr_indexed_MEM_V4 : - case Hexagon::MEMw_ANDr_indexed_MEM_V4 : - case Hexagon::MEMw_ORr_indexed_MEM_V4 : - case Hexagon::MEMw_ADDi_MEM_V4 : - case Hexagon::MEMw_SUBi_MEM_V4 : - case Hexagon::MEMw_ADDr_MEM_V4 : - case Hexagon::MEMw_SUBr_MEM_V4 : - case Hexagon::MEMw_ANDr_MEM_V4 : - case Hexagon::MEMw_ORr_MEM_V4 : + case Hexagon::MemOPw_ADDi_V4 : + case Hexagon::MemOPw_SUBi_V4 : + case Hexagon::MemOPw_ADDr_V4 : + case Hexagon::MemOPw_SUBr_V4 : + case Hexagon::MemOPw_ANDr_V4 : + case Hexagon::MemOPw_ORr_V4 : return (0 <= Offset && Offset <= 255); - case Hexagon::MEMh_ADDi_indexed_MEM_V4 : - case Hexagon::MEMh_SUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDr_indexed_MEM_V4 : - case Hexagon::MEMh_SUBr_indexed_MEM_V4 : - case Hexagon::MEMh_ANDr_indexed_MEM_V4 : - case Hexagon::MEMh_ORr_indexed_MEM_V4 : - case Hexagon::MEMh_ADDi_MEM_V4 : - case Hexagon::MEMh_SUBi_MEM_V4 : - case Hexagon::MEMh_ADDr_MEM_V4 : - case Hexagon::MEMh_SUBr_MEM_V4 : - case Hexagon::MEMh_ANDr_MEM_V4 : - case Hexagon::MEMh_ORr_MEM_V4 : + case Hexagon::MemOPh_ADDi_V4 : + case Hexagon::MemOPh_SUBi_V4 : + case Hexagon::MemOPh_ADDr_V4 : + case Hexagon::MemOPh_SUBr_V4 : + case Hexagon::MemOPh_ANDr_V4 : + case Hexagon::MemOPh_ORr_V4 : return (0 <= Offset && Offset <= 127); - case Hexagon::MEMb_ADDi_indexed_MEM_V4 : - case Hexagon::MEMb_SUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDr_indexed_MEM_V4 : - case Hexagon::MEMb_SUBr_indexed_MEM_V4 : - case Hexagon::MEMb_ANDr_indexed_MEM_V4 : - case Hexagon::MEMb_ORr_indexed_MEM_V4 : - case Hexagon::MEMb_ADDi_MEM_V4 : - case Hexagon::MEMb_SUBi_MEM_V4 : - case Hexagon::MEMb_ADDr_MEM_V4 : - case Hexagon::MEMb_SUBr_MEM_V4 : - case Hexagon::MEMb_ANDr_MEM_V4 : - case Hexagon::MEMb_ORr_MEM_V4 : + case Hexagon::MemOPb_ADDi_V4 : + case Hexagon::MemOPb_SUBi_V4 : + case Hexagon::MemOPb_ADDr_V4 : + case Hexagon::MemOPb_SUBr_V4 : + case Hexagon::MemOPb_ANDr_V4 : + case Hexagon::MemOPb_ORr_V4 : return (0 <= Offset && Offset <= 63); // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of @@ -2086,44 +2090,33 @@ isMemOp(const MachineInstr *MI) const { switch (MI->getOpcode()) { default: return false; - case Hexagon::MEMw_ADDi_indexed_MEM_V4 : - case Hexagon::MEMw_SUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDr_indexed_MEM_V4 : - case Hexagon::MEMw_SUBr_indexed_MEM_V4 : - case Hexagon::MEMw_ANDr_indexed_MEM_V4 : - case Hexagon::MEMw_ORr_indexed_MEM_V4 : - case Hexagon::MEMw_ADDi_MEM_V4 : - case Hexagon::MEMw_SUBi_MEM_V4 : - case Hexagon::MEMw_ADDr_MEM_V4 : - case Hexagon::MEMw_SUBr_MEM_V4 : - case Hexagon::MEMw_ANDr_MEM_V4 : - case Hexagon::MEMw_ORr_MEM_V4 : - case Hexagon::MEMh_ADDi_indexed_MEM_V4 : - case Hexagon::MEMh_SUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDr_indexed_MEM_V4 : - case Hexagon::MEMh_SUBr_indexed_MEM_V4 : - case Hexagon::MEMh_ANDr_indexed_MEM_V4 : - case Hexagon::MEMh_ORr_indexed_MEM_V4 : - case Hexagon::MEMh_ADDi_MEM_V4 : - case Hexagon::MEMh_SUBi_MEM_V4 : - case Hexagon::MEMh_ADDr_MEM_V4 : - case Hexagon::MEMh_SUBr_MEM_V4 : - case Hexagon::MEMh_ANDr_MEM_V4 : - case Hexagon::MEMh_ORr_MEM_V4 : - case Hexagon::MEMb_ADDi_indexed_MEM_V4 : - case Hexagon::MEMb_SUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDr_indexed_MEM_V4 : - case Hexagon::MEMb_SUBr_indexed_MEM_V4 : - case Hexagon::MEMb_ANDr_indexed_MEM_V4 : - case Hexagon::MEMb_ORr_indexed_MEM_V4 : - case Hexagon::MEMb_ADDi_MEM_V4 : - case Hexagon::MEMb_SUBi_MEM_V4 : - case Hexagon::MEMb_ADDr_MEM_V4 : - case Hexagon::MEMb_SUBr_MEM_V4 : - case Hexagon::MEMb_ANDr_MEM_V4 : - case Hexagon::MEMb_ORr_MEM_V4 : - return true; + case Hexagon::MemOPw_ADDi_V4 : + case Hexagon::MemOPw_SUBi_V4 : + case Hexagon::MemOPw_ADDr_V4 : + case Hexagon::MemOPw_SUBr_V4 : + case Hexagon::MemOPw_ANDr_V4 : + case Hexagon::MemOPw_ORr_V4 : + case Hexagon::MemOPh_ADDi_V4 : + case Hexagon::MemOPh_SUBi_V4 : + case Hexagon::MemOPh_ADDr_V4 : + case Hexagon::MemOPh_SUBr_V4 : + case Hexagon::MemOPh_ANDr_V4 : + case Hexagon::MemOPh_ORr_V4 : + case Hexagon::MemOPb_ADDi_V4 : + case Hexagon::MemOPb_SUBi_V4 : + case Hexagon::MemOPb_ADDr_V4 : + case Hexagon::MemOPb_SUBr_V4 : + case Hexagon::MemOPb_ANDr_V4 : + case Hexagon::MemOPb_ORr_V4 : + case Hexagon::MemOPb_SETBITi_V4: + case Hexagon::MemOPh_SETBITi_V4: + case Hexagon::MemOPw_SETBITi_V4: + case Hexagon::MemOPb_CLRBITi_V4: + case Hexagon::MemOPh_CLRBITi_V4: + case Hexagon::MemOPw_CLRBITi_V4: + return true; } + return false; } @@ -2142,14 +2135,10 @@ bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const { default: return false; case Hexagon::CMPEQrr: case Hexagon::CMPEQri: - case Hexagon::CMPLTrr: case Hexagon::CMPGTrr: case Hexagon::CMPGTri: - case Hexagon::CMPLTUrr: case Hexagon::CMPGTUrr: case Hexagon::CMPGTUri: - case Hexagon::CMPGEri: - case Hexagon::CMPGEUri: return true; } } @@ -2382,6 +2371,13 @@ isConditionalStore (const MachineInstr* MI) const { } } +// Returns true, if any one of the operands is a dot new +// insn, whether it is predicated dot new or register dot new. +bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const { + return (isNewValueInst(MI) || + (isPredicated(MI) && isPredicatedNew(MI))); +} + unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const { const uint64_t F = MI->getDesc().TSFlags; @@ -2476,6 +2472,34 @@ bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const { return (ImmValue < MinValue || ImmValue > MaxValue); } +// Returns the opcode to use when converting MI, which is a conditional jump, +// into a conditional instruction which uses the .new value of the predicate. +// We also use branch probabilities to add a hint to the jump. +int +HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI, + const + MachineBranchProbabilityInfo *MBPI) const { + + // We assume that block can have at most two successors. + bool taken = false; + MachineBasicBlock *Src = MI->getParent(); + MachineOperand *BrTarget = &MI->getOperand(1); + MachineBasicBlock *Dst = BrTarget->getMBB(); + + const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst); + if (Prediction >= BranchProbability(1,2)) + taken = true; + + switch (MI->getOpcode()) { + case Hexagon::JMP_t: + return taken ? Hexagon::JMP_tnew_t : Hexagon::JMP_tnew_nt; + case Hexagon::JMP_f: + return taken ? Hexagon::JMP_fnew_t : Hexagon::JMP_fnew_nt; + + default: + llvm_unreachable("Unexpected jump instruction."); + } +} // Returns true if a particular operand is extendable for an instruction. bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI, unsigned short OperandNum) const { @@ -2580,3 +2604,18 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const { } return -1; } + +bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const { + return (Opcode == Hexagon::JMP_t) || + (Opcode == Hexagon::JMP_f) || + (Opcode == Hexagon::JMP_tnew_t) || + (Opcode == Hexagon::JMP_fnew_t) || + (Opcode == Hexagon::JMP_tnew_nt) || + (Opcode == Hexagon::JMP_fnew_nt); +} + +bool HexagonInstrInfo::PredOpcodeHasNot(Opcode_t Opcode) const { + return (Opcode == Hexagon::JMP_f) || + (Opcode == Hexagon::JMP_fnew_t) || + (Opcode == Hexagon::JMP_fnew_nt); +} diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index d2f059a..e0bec04 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -16,9 +16,9 @@ #include "HexagonRegisterInfo.h" #include "MCTargetDesc/HexagonBaseInfo.h" -#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetInstrInfo.h" - +#include "llvm/Target/TargetFrameLowering.h" +#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" #define GET_INSTRINFO_HEADER #include "HexagonGenInstrInfo.inc" @@ -28,6 +28,8 @@ namespace llvm { class HexagonInstrInfo : public HexagonGenInstrInfo { const HexagonRegisterInfo RI; const HexagonSubtarget& Subtarget; + typedef unsigned Opcode_t; + public: explicit HexagonInstrInfo(HexagonSubtarget &ST); @@ -127,6 +129,7 @@ public: const BranchProbability &Probability) const; virtual bool isPredicated(const MachineInstr *MI) const; + virtual bool isPredicatedNew(const MachineInstr *MI) const; virtual bool DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const; virtual bool @@ -140,6 +143,11 @@ public: isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles, const BranchProbability &Probability) const; + virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, + uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const; virtual DFAPacketizer* CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const; @@ -170,6 +178,7 @@ public: bool isConditionalLoad (const MachineInstr* MI) const; bool isConditionalStore(const MachineInstr* MI) const; bool isNewValueInst(const MachineInstr* MI) const; + bool isDotNewInst(const MachineInstr* MI) const; bool isDeallocRet(const MachineInstr *MI) const; unsigned getInvertedPredicatedOpcode(const int Opc) const; bool isExtendable(const MachineInstr* MI) const; @@ -182,6 +191,8 @@ public: void immediateExtend(MachineInstr *MI) const; bool isConstExtended(MachineInstr *MI) const; + int getDotNewPredJumpOp(MachineInstr *MI, + const MachineBranchProbabilityInfo *MBPI) const; unsigned getAddrMode(const MachineInstr* MI) const; bool isOperandExtended(const MachineInstr *MI, unsigned short OperandNum) const; @@ -190,6 +201,9 @@ public: int getMaxValue(const MachineInstr *MI) const; bool NonExtEquivalentExists (const MachineInstr *MI) const; short getNonExtOpcode(const MachineInstr *MI) const; + bool PredOpcodeHasJMP_c(Opcode_t Opcode) const; + bool PredOpcodeHasNot(Opcode_t Opcode) const; + private: int getMatchingCondBranchOpcode(int Opc, bool sense) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index d7bab20..2a4b17b 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -14,6 +14,8 @@ include "HexagonInstrFormats.td" include "HexagonOperands.td" +//===----------------------------------------------------------------------===// + // Multi-class for logical operators. multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> { def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), @@ -34,12 +36,6 @@ multiclass CMP64_rr<string OpcStr, PatFrag OpNode> { [(set (i1 PredRegs:$dst), (OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>; } -multiclass CMP32_rr<string OpcStr, PatFrag OpNode> { - def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), - !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set (i1 PredRegs:$dst), - (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; -} multiclass CMP32_rr_ri_s10<string OpcStr, string CextOp, PatFrag OpNode> { let CextOpcode = CextOp in { @@ -75,14 +71,6 @@ multiclass CMP32_rr_ri_u9<string OpcStr, string CextOp, PatFrag OpNode> { } } -multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> { -let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 8 in - def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Ext:$c), - !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b), - u8ExtPred:$c))]>; -} - multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> { let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Ext:$c), @@ -95,22 +83,30 @@ let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in //===----------------------------------------------------------------------===// // ALU32/ALU (Instructions with register-register form) //===----------------------------------------------------------------------===// -multiclass ALU32_Pbase<string mnemonic, bit isNot, - bit isPredNew> { +def SDTHexagonI64I32I32 : SDTypeProfile<1, 2, + [SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; + +def HexagonWrapperCombineII : + SDNode<"HexagonISD::WrapperCombineII", SDTHexagonI64I32I32>; + +def HexagonWrapperCombineRR : + SDNode<"HexagonISD::WrapperCombineRR", SDTHexagonI64I32I32>; - let PNewValue = !if(isPredNew, "new", "") in - def NAME : ALU32_rr<(outs IntRegs:$dst), +multiclass ALU32_Pbase<string mnemonic, RegisterClass RC, bit isNot, + bit isPredNew> { + let isPredicatedNew = isPredNew in + def NAME : ALU32_rr<(outs RC:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs: $src3), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", ") $dst = ")#mnemonic#"($src2, $src3)", []>; } -multiclass ALU32_Pred<string mnemonic, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { - defm _c#NAME : ALU32_Pbase<mnemonic, PredNot, 0>; +multiclass ALU32_Pred<string mnemonic, RegisterClass RC, bit PredNot> { + let isPredicatedFalse = PredNot in { + defm _c#NAME : ALU32_Pbase<mnemonic, RC, PredNot, 0>; // Predicate new - defm _cdn#NAME : ALU32_Pbase<mnemonic, PredNot, 1>; + defm _cdn#NAME : ALU32_Pbase<mnemonic, RC, PredNot, 1>; } } @@ -125,8 +121,8 @@ multiclass ALU32_base<string mnemonic, string CextOp, SDNode OpNode> { (i32 IntRegs:$src2)))]>; let neverHasSideEffects = 1, isPredicated = 1 in { - defm Pt : ALU32_Pred<mnemonic, 0>; - defm NotPt : ALU32_Pred<mnemonic, 1>; + defm Pt : ALU32_Pred<mnemonic, IntRegs, 0>; + defm NotPt : ALU32_Pred<mnemonic, IntRegs, 1>; } } } @@ -140,11 +136,42 @@ let isCommutable = 1 in { defm SUB_rr : ALU32_base<"sub", "SUB", sub>, ImmRegRel, PredNewRel; +// Combines the two integer registers SRC1 and SRC2 into a double register. +let isPredicable = 1 in +class T_Combine : ALU32_rr<(outs DoubleRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = combine($src1, $src2)", + [(set (i64 DoubleRegs:$dst), + (i64 (HexagonWrapperCombineRR (i32 IntRegs:$src1), + (i32 IntRegs:$src2))))]>; + +multiclass Combine_base { + let BaseOpcode = "combine" in { + def NAME : T_Combine; + let neverHasSideEffects = 1, isPredicated = 1 in { + defm Pt : ALU32_Pred<"combine", DoubleRegs, 0>; + defm NotPt : ALU32_Pred<"combine", DoubleRegs, 1>; + } + } +} + +defm COMBINE_rr : Combine_base, PredNewRel; + +// Combines the two immediates SRC1 and SRC2 into a double register. +class COMBINE_imm<Operand imm1, Operand imm2, PatLeaf pat1, PatLeaf pat2> : + ALU32_ii<(outs DoubleRegs:$dst), (ins imm1:$src1, imm2:$src2), + "$dst = combine(#$src1, #$src2)", + [(set (i64 DoubleRegs:$dst), + (i64 (HexagonWrapperCombineII (i32 pat1:$src1), (i32 pat2:$src2))))]>; + +let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 8 in +def COMBINE_Ii : COMBINE_imm<s8Ext, s8Imm, s8ExtPred, s8ImmPred>; + //===----------------------------------------------------------------------===// // ALU32/ALU (ADD with register-immediate form) //===----------------------------------------------------------------------===// multiclass ALU32ri_Pbase<string mnemonic, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, s8Ext: $src3), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", @@ -153,7 +180,7 @@ multiclass ALU32ri_Pbase<string mnemonic, bit isNot, bit isPredNew> { } multiclass ALU32ri_Pred<string mnemonic, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ALU32ri_Pbase<mnemonic, PredNot, 0>; // Predicate new defm _cdn#NAME : ALU32ri_Pbase<mnemonic, PredNot, 1>; @@ -189,11 +216,6 @@ def OR_ri : ALU32_ri<(outs IntRegs:$dst), [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), s10ExtPred:$src2))]>, ImmRegRel; -def NOT_rr : ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1), - "$dst = not($src1)", - [(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>; - let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 10, InputType = "imm", CextOpcode = "AND" in def AND_ri : ALU32_ri<(outs IntRegs:$dst), @@ -201,10 +223,7 @@ def AND_ri : ALU32_ri<(outs IntRegs:$dst), "$dst = and($src1, #$src2)", [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), s10ExtPred:$src2))]>, ImmRegRel; -// Negate. -def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = neg($src1)", - [(set (i32 IntRegs:$dst), (ineg (i32 IntRegs:$src1)))]>; + // Nop. let neverHasSideEffects = 1 in def NOP : ALU32_rr<(outs), (ins), @@ -220,15 +239,21 @@ def SUB_ri : ALU32_ri<(outs IntRegs:$dst), [(set IntRegs:$dst, (sub s10ExtPred:$src1, IntRegs:$src2))]>, ImmRegRel; +// Rd = not(Rs) gets mapped to Rd=sub(#-1, Rs). +def : Pat<(not (i32 IntRegs:$src1)), + (SUB_ri -1, (i32 IntRegs:$src1))>; + +// Rd = neg(Rs) gets mapped to Rd=sub(#0, Rs). +// Pattern definition for 'neg' was not necessary. multiclass TFR_Pred<bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { def _c#NAME : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2), !if(PredNot, "if (!$src1", "if ($src1")#") $dst = $src2", []>; // Predicate new - let PNewValue = "new" in + let isPredicatedNew = 1 in def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2), !if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = $src2", @@ -274,10 +299,10 @@ class T_TFR64_Pred<bit PredNot, bit isPredNew> } multiclass TFR64_Pred<bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { def _c#NAME : T_TFR64_Pred<PredNot, 0>; - let PNewValue = "new" in + let isPredicatedNew = 1 in def _cdn#NAME : T_TFR64_Pred<PredNot, 1>; // Predicate new } } @@ -309,14 +334,14 @@ multiclass TFR64_base<string BaseName> { } multiclass TFRI_Pred<bit PredNot> { - let isMoveImm = 1, PredSense = !if(PredNot, "false", "true") in { + let isMoveImm = 1, isPredicatedFalse = PredNot in { def _c#NAME : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Ext:$src2), !if(PredNot, "if (!$src1", "if ($src1")#") $dst = #$src2", []>; // Predicate new - let PNewValue = "new" in + let isPredicatedNew = 1 in def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Ext:$src2), !if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = #$src2", @@ -359,52 +384,6 @@ def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1), // ALU32/PERM + //===----------------------------------------------------------------------===// -// Combine. - -def SDTHexagonI64I32I32 : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; - -def HexagonWrapperCombineII : - SDNode<"HexagonISD::WrapperCombineII", SDTHexagonI64I32I32>; -def HexagonWrapperCombineRR : - SDNode<"HexagonISD::WrapperCombineRR", SDTHexagonI64I32I32>; - -// Combines the two integer registers SRC1 and SRC2 into a double register. -let isPredicable = 1 in -def COMBINE_rr : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, - IntRegs:$src2), - "$dst = combine($src1, $src2)", - [(set (i64 DoubleRegs:$dst), - (i64 (HexagonWrapperCombineRR (i32 IntRegs:$src1), - (i32 IntRegs:$src2))))]>; - -// Rd=combine(Rt.[HL], Rs.[HL]) -class COMBINE_halves<string A, string B>: ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1, - IntRegs:$src2), - "$dst = combine($src1."# A #", $src2."# B #")", []>; - -let isPredicable = 1 in { - def COMBINE_hh : COMBINE_halves<"H", "H">; - def COMBINE_hl : COMBINE_halves<"H", "L">; - def COMBINE_lh : COMBINE_halves<"L", "H">; - def COMBINE_ll : COMBINE_halves<"L", "L">; -} - -def : Pat<(i32 (trunc (i64 (srl (i64 DoubleRegs:$a), (i32 16))))), - (COMBINE_lh (EXTRACT_SUBREG (i64 DoubleRegs:$a), subreg_hireg), - (EXTRACT_SUBREG (i64 DoubleRegs:$a), subreg_loreg))>; - -// Combines the two immediates SRC1 and SRC2 into a double register. -class COMBINE_imm<Operand imm1, Operand imm2, PatLeaf pat1, PatLeaf pat2> : - ALU32_ii<(outs DoubleRegs:$dst), (ins imm1:$src1, imm2:$src2), - "$dst = combine(#$src1, #$src2)", - [(set (i64 DoubleRegs:$dst), - (i64 (HexagonWrapperCombineII (i32 pat1:$src1), (i32 pat2:$src2))))]>; - -let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 8 in -def COMBINE_Ii : COMBINE_imm<s8Ext, s8Imm, s8ExtPred, s8ImmPred>; - // Mux. def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, @@ -446,38 +425,58 @@ def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2, s8ExtPred:$src2, s8ImmPred:$src3)))]>; -// Shift halfword. -let isPredicable = 1 in -def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = aslh($src1)", - [(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>; +// ALU32 - aslh, asrh, sxtb, sxth, zxtb, zxth +multiclass ALU32_2op_Pbase<string mnemonic, bit isNot, bit isPredNew> { + let isPredicatedNew = isPredNew in + def NAME : ALU32Inst<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2), + !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", + ") $dst = ")#mnemonic#"($src2)">, + Requires<[HasV4T]>; +} -let isPredicable = 1 in -def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = asrh($src1)", - [(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>; +multiclass ALU32_2op_Pred<string mnemonic, bit PredNot> { + let isPredicatedFalse = PredNot in { + defm _c#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 0>; + // Predicate new + defm _cdn#NAME : ALU32_2op_Pbase<mnemonic, PredNot, 1>; + } +} -// Sign extend. -let isPredicable = 1 in -def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = sxtb($src1)", - [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>; +multiclass ALU32_2op_base<string mnemonic> { + let BaseOpcode = mnemonic in { + let isPredicable = 1, neverHasSideEffects = 1 in + def NAME : ALU32Inst<(outs IntRegs:$dst), + (ins IntRegs:$src1), + "$dst = "#mnemonic#"($src1)">; -let isPredicable = 1 in -def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = sxth($src1)", - [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>; - -// Zero extend. -let isPredicable = 1, neverHasSideEffects = 1 in -def ZXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = zxtb($src1)", - []>; + let Predicates = [HasV4T], validSubTargets = HasV4SubT, isPredicated = 1, + neverHasSideEffects = 1 in { + defm Pt_V4 : ALU32_2op_Pred<mnemonic, 0>; + defm NotPt_V4 : ALU32_2op_Pred<mnemonic, 1>; + } + } +} + +defm ASLH : ALU32_2op_base<"aslh">, PredNewRel; +defm ASRH : ALU32_2op_base<"asrh">, PredNewRel; +defm SXTB : ALU32_2op_base<"sxtb">, PredNewRel; +defm SXTH : ALU32_2op_base<"sxth">, PredNewRel; +defm ZXTB : ALU32_2op_base<"zxtb">, PredNewRel; +defm ZXTH : ALU32_2op_base<"zxth">, PredNewRel; + +def : Pat <(shl (i32 IntRegs:$src1), (i32 16)), + (ASLH IntRegs:$src1)>; + +def : Pat <(sra (i32 IntRegs:$src1), (i32 16)), + (ASRH IntRegs:$src1)>; + +def : Pat <(sext_inreg (i32 IntRegs:$src1), i8), + (SXTB IntRegs:$src1)>; + +def : Pat <(sext_inreg (i32 IntRegs:$src1), i16), + (SXTH IntRegs:$src1)>; -let isPredicable = 1, neverHasSideEffects = 1 in -def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), - "$dst = zxth($src1)", - []>; //===----------------------------------------------------------------------===// // ALU32/PERM - //===----------------------------------------------------------------------===// @@ -487,39 +486,24 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), // ALU32/PRED + //===----------------------------------------------------------------------===// -// Conditional combine. -let neverHasSideEffects = 1, isPredicated = 1 in -def COMBINE_rr_cPt : ALU32_rr<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = combine($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def COMBINE_rr_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = combine($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def COMBINE_rr_cdnPt : ALU32_rr<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = combine($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = combine($src2, $src3)", - []>; - // Compare. defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", "CMPGTU", setugt>, ImmRegRel; defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", "CMPGT", setgt>, ImmRegRel; -defm CMPLT : CMP32_rr<"cmp.lt", setlt>; -defm CMPLTU : CMP32_rr<"cmp.ltu", setult>; defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", "CMPEQ", seteq>, ImmRegRel; -defm CMPGE : CMP32_ri_s8<"cmp.ge", setge>; -defm CMPGEU : CMP32_ri_u8<"cmp.geu", setuge>; + +// SDNode for converting immediate C to C-1. +def DEC_CONST_SIGNED : SDNodeXForm<imm, [{ + // Return the byte immediate const-1 as an SDNode. + int32_t imm = N->getSExtValue(); + return XformSToSM1Imm(imm); +}]>; + +// SDNode for converting immediate C to C-1. +def DEC_CONST_UNSIGNED : SDNodeXForm<imm, [{ + // Return the byte immediate const-1 as an SDNode. + uint32_t imm = N->getZExtValue(); + return XformUToUM1Imm(imm); +}]>; def CTLZ_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = cl0($src1)", @@ -753,112 +737,153 @@ def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), // CR - //===----------------------------------------------------------------------===// +def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def eh_return: SDNode<"HexagonISD::EH_RETURN", SDTNone, + [SDNPHasChain]>; -//===----------------------------------------------------------------------===// -// J + -//===----------------------------------------------------------------------===// -// Jump to address. -let isBranch = 1, isTerminator=1, isBarrier = 1, isPredicable = 1 in { - def JMP : JInst< (outs), - (ins brtarget:$offset), - "jump $offset", - [(br bb:$offset)]>; -} +def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; +def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; -// if (p0) jump -let isBranch = 1, isTerminator=1, Defs = [PC], - isPredicated = 1 in { - def JMP_c : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if ($src) jump $offset", - [(brcond (i1 PredRegs:$src), bb:$offset)]>; -} +let InputType = "imm", isBarrier = 1, isPredicable = 1, +Defs = [PC], isExtendable = 1, opExtendable = 0, isExtentSigned = 1, +opExtentBits = 24 in +class T_JMP <dag InsDag, list<dag> JumpList = []> + : JInst<(outs), InsDag, + "jump $dst" , JumpList> { + bits<24> dst; + + let IClass = 0b0101; + + let Inst{27-25} = 0b100; + let Inst{24-16} = dst{23-15}; + let Inst{13-1} = dst{14-2}; +} + +let InputType = "imm", isExtendable = 1, opExtendable = 1, isExtentSigned = 1, +Defs = [PC], isPredicated = 1, opExtentBits = 17 in +class T_JMP_c <bit PredNot, bit isPredNew, bit isTaken>: + JInst<(outs ), (ins PredRegs:$src, brtarget:$dst), + !if(PredNot, "if (!$src", "if ($src")# + !if(isPredNew, ".new) ", ") ")#"jump"# + !if(isPredNew, !if(isTaken, ":t ", ":nt "), " ")#"$dst"> { + + let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), ""); + let isPredicatedFalse = PredNot; + let isPredicatedNew = isPredNew; + bits<2> src; + bits<17> dst; + + let IClass = 0b0101; + + let Inst{27-24} = 0b1100; + let Inst{21} = PredNot; + let Inst{12} = !if(isPredNew, isTaken, zero); + let Inst{11} = isPredNew; + let Inst{9-8} = src; + let Inst{23-22} = dst{16-15}; + let Inst{20-16} = dst{14-10}; + let Inst{13} = dst{9}; + let Inst{7-1} = dst{8-2}; + } -// if (!p0) jump -let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def JMP_cNot : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if (!$src) jump $offset", - []>; +let isBarrier = 1, Defs = [PC], isPredicable = 1, InputType = "reg" in +class T_JMPr<dag InsDag = (ins IntRegs:$dst)> + : JRInst<(outs ), InsDag, + "jumpr $dst" , + []> { + bits<5> dst; + + let IClass = 0b0101; + let Inst{27-21} = 0b0010100; + let Inst{20-16} = dst; } -let isTerminator = 1, isBranch = 1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def BRCOND : JInst < (outs), (ins PredRegs:$pred, brtarget:$dst), - "if ($pred) jump $dst", - []>; +let Defs = [PC], isPredicated = 1, InputType = "reg" in +class T_JMPr_c <bit PredNot, bit isPredNew, bit isTaken>: + JRInst <(outs ), (ins PredRegs:$src, IntRegs:$dst), + !if(PredNot, "if (!$src", "if ($src")# + !if(isPredNew, ".new) ", ") ")#"jumpr"# + !if(isPredNew, !if(isTaken, ":t ", ":nt "), " ")#"$dst"> { + + let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), ""); + let isPredicatedFalse = PredNot; + let isPredicatedNew = isPredNew; + bits<2> src; + bits<5> dst; + + let IClass = 0b0101; + + let Inst{27-22} = 0b001101; + let Inst{21} = PredNot; + let Inst{20-16} = dst; + let Inst{12} = !if(isPredNew, isTaken, zero); + let Inst{11} = isPredNew; + let Inst{9-8} = src; + let Predicates = !if(isPredNew, [HasV3T], [HasV2T]); + let validSubTargets = !if(isPredNew, HasV3SubT, HasV2SubT); } -// Jump to address conditioned on new predicate. -// if (p0) jump:t -let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def JMP_cdnPt : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if ($src.new) jump:t $offset", - []>; +multiclass JMP_Pred<bit PredNot> { + def _#NAME : T_JMP_c<PredNot, 0, 0>; + // Predicate new + def _#NAME#new_t : T_JMP_c<PredNot, 1, 1>; // taken + def _#NAME#new_nt : T_JMP_c<PredNot, 1, 0>; // not taken } -// if (!p0) jump:t -let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def JMP_cdnNotPt : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if (!$src.new) jump:t $offset", - []>; +multiclass JMP_base<string BaseOp> { + let BaseOpcode = BaseOp in { + def NAME : T_JMP<(ins brtarget:$dst), [(br bb:$dst)]>; + defm t : JMP_Pred<0>; + defm f : JMP_Pred<1>; + } } -// Not taken. -let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def JMP_cdnPnt : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if ($src.new) jump:nt $offset", - []>; +multiclass JMPR_Pred<bit PredNot> { + def NAME: T_JMPr_c<PredNot, 0, 0>; + // Predicate new + def NAME#new_tV3 : T_JMPr_c<PredNot, 1, 1>; // taken + def NAME#new_ntV3 : T_JMPr_c<PredNot, 1, 0>; // not taken } -// Not taken. -let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC], - isPredicated = 1 in { - def JMP_cdnNotPnt : JInst< (outs), - (ins PredRegs:$src, brtarget:$offset), - "if (!$src.new) jump:nt $offset", - []>; +multiclass JMPR_base<string BaseOp> { + let BaseOpcode = BaseOp in { + def NAME : T_JMPr; + defm _t : JMPR_Pred<0>; + defm _f : JMPR_Pred<1>; + } } -//===----------------------------------------------------------------------===// -// J - -//===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// JR + -//===----------------------------------------------------------------------===// -def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +let isTerminator = 1, neverHasSideEffects = 1 in { +let isBranch = 1 in +defm JMP : JMP_base<"JMP">, PredNewRel; -// Jump to address from register. -let isPredicable =1, isReturn = 1, isTerminator = 1, isBarrier = 1, - Defs = [PC], Uses = [R31] in { - def JMPR: JRInst<(outs), (ins), - "jumpr r31", - [(retflag)]>; -} +let isBranch = 1, isIndirectBranch = 1 in +defm JMPR : JMPR_base<"JMPr">, PredNewRel; -// Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1), - "if ($src1) jumpr r31", - []>; +let isReturn = 1, isCodeGenOnly = 1 in +defm JMPret : JMPR_base<"JMPret">, PredNewRel; } -// Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1), - "if (!$src1) jumpr r31", - []>; -} +def : Pat<(retflag), + (JMPret (i32 R31))>; + +def : Pat <(brcond (i1 PredRegs:$src1), bb:$offset), + (JMP_t (i1 PredRegs:$src1), bb:$offset)>; + +// A return through builtin_eh_return. +let isReturn = 1, isTerminator = 1, isBarrier = 1, neverHasSideEffects = 1, +isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in +def EH_RETURN_JMPR : T_JMPr; + +def : Pat<(eh_return), + (EH_RETURN_JMPR (i32 R31))>; + +def : Pat<(HexagonBR_JT (i32 IntRegs:$dst)), + (JMPR (i32 IntRegs:$dst))>; + +def : Pat<(brind (i32 IntRegs:$dst)), + (JMPR (i32 IntRegs:$dst))>; //===----------------------------------------------------------------------===// // JR - @@ -871,7 +896,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, // Load -- MEMri operand multiclass LD_MEMri_Pbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : LDInst2<(outs RC:$dst), (ins PredRegs:$src1, MEMri:$addr), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -880,7 +905,7 @@ multiclass LD_MEMri_Pbase<string mnemonic, RegisterClass RC, } multiclass LD_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 1>; @@ -937,7 +962,7 @@ def : Pat < (i64 (load ADDRriS11_3:$addr)), // Load - Base with Immediate offset addressing mode multiclass LD_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : LDInst2<(outs RC:$dst), (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -947,7 +972,7 @@ multiclass LD_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp, multiclass LD_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>; // Predicate new defm _cdn#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 1>; @@ -1009,20 +1034,6 @@ def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))), (LDrid_indexed IntRegs:$src1, s11_3ExtPred:$offset) >; } -let neverHasSideEffects = 1 in -def LDrid_GP : LDInst2<(outs DoubleRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memd(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDd_GP : LDInst2<(outs DoubleRegs:$dst), - (ins globaladdress:$global), - "$dst = memd(#$global)", - []>, - Requires<[NoV4T]>; - //===----------------------------------------------------------------------===// // Post increment load // Make sure that in post increment load, the first operand is always the post @@ -1031,7 +1042,7 @@ def LDd_GP : LDInst2<(outs DoubleRegs:$dst), multiclass LD_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1042,7 +1053,7 @@ multiclass LD_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp, multiclass LD_PostInc_Pred<string mnemonic, RegisterClass RC, Operand ImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : LD_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>; // Predicate new let Predicates = [HasV4T], validSubTargets = HasV4SubT in @@ -1095,27 +1106,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))), (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >; -let neverHasSideEffects = 1 in -def LDrib_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memb(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDb_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memb(#$global)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDub_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memub(#$global)", - []>, - Requires<[NoV4T]>; - def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)), (i32 (LDrih ADDRriS11_1:$addr))>; @@ -1123,27 +1113,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))), (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >; -let neverHasSideEffects = 1 in -def LDrih_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memh(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memh(#$global)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDuh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memuh(#$global)", - []>, - Requires<[NoV4T]>; - let AddedComplexity = 10 in def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)), (i32 (LDriub ADDRriS11_0:$addr))>; @@ -1152,21 +1121,6 @@ let AddedComplexity = 20 in def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))), (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>; -let neverHasSideEffects = 1 in -def LDriub_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memub(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -// Load unsigned halfword. -let neverHasSideEffects = 1 in -def LDriuh_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memuh(#$global+$offset)", - []>, - Requires<[NoV4T]>; - // Load predicate. let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13, isPseudo = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in @@ -1175,21 +1129,6 @@ def LDriw_pred : LDInst2<(outs PredRegs:$dst), "Error; should not emit", []>; -// Indexed load. -let neverHasSideEffects = 1 in -def LDriw_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst = memw(#$global+$offset)", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def LDw_GP : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst = memw(#$global)", - []>, - Requires<[NoV4T]>; - // Deallocate stack frame. let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in { def DEALLOCFRAME : LDInst2<(outs), (ins), @@ -1423,35 +1362,15 @@ def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, // ST + //===----------------------------------------------------------------------===// /// -/// Assumptions::: ****** DO NOT IGNORE ******** -/// 1. Make sure that in post increment store, the zero'th operand is always the -/// post increment operand. -/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the -/// last operand. -/// // Store doubleword. -let neverHasSideEffects = 1 in -def STrid_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), - "memd(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STd_GP : STInst2<(outs), - (ins globaladdress:$global, DoubleRegs:$src), - "memd(#$global) = $src", - []>, - Requires<[NoV4T]>; - //===----------------------------------------------------------------------===// // Post increment store //===----------------------------------------------------------------------===// multiclass ST_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1462,7 +1381,7 @@ multiclass ST_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp, multiclass ST_PostInc_Pred<string mnemonic, RegisterClass RC, Operand ImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME# : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>; // Predicate new let Predicates = [HasV4T], validSubTargets = HasV4SubT in @@ -1516,7 +1435,7 @@ def : Pat<(post_store (i64 DoubleRegs:$src1), IntRegs:$src2, //===----------------------------------------------------------------------===// multiclass ST_MEMri_Pbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1525,7 +1444,7 @@ multiclass ST_MEMri_Pbase<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_MEMri_Pbase<mnemonic, RC, PredNot, 0>; // Predicate new @@ -1582,7 +1501,7 @@ def : Pat<(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr), //===----------------------------------------------------------------------===// multiclass ST_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1592,7 +1511,7 @@ multiclass ST_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp, multiclass ST_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true"), isPredicated = 1 in { + let isPredicatedFalse = PredNot, isPredicated = 1 in { defm _c#NAME : ST_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>; // Predicate new @@ -1655,36 +1574,6 @@ def : Pat<(store (i64 DoubleRegs:$src1), (add IntRegs:$src2, (i64 DoubleRegs:$src1))>; } -// memb(gp+#u16:0)=Rt -let neverHasSideEffects = 1 in -def STrib_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memb(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -// memb(#global)=Rt -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STb_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1 in -def STrih_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memh(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STh_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src", - []>, - Requires<[NoV4T]>; - // memh(Rx++#s4:1)=Rt.H // Store word. @@ -1695,20 +1584,6 @@ def STriw_pred : STInst2<(outs), "Error; should not emit", []>; -let neverHasSideEffects = 1 in -def STriw_GP : STInst2<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memw(#$global+$offset) = $src", - []>, - Requires<[NoV4T]>; - -let neverHasSideEffects = 1, validSubTargets = NoV4SubT in -def STw_GP : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src", - []>, - Requires<[NoV4T]>; - // Allocate stack frame. let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in { def ALLOCFRAME : STInst2<(outs), @@ -2152,20 +2027,18 @@ let isCall = 1, neverHasSideEffects = 1, []>; } -// Tail Calls. -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { - def TCRETURNtg : JInst<(outs), (ins calltarget:$dst), - "jump $dst // TAILCALL", []>; -} -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { - def TCRETURNtext : JInst<(outs), (ins calltarget:$dst), - "jump $dst // TAILCALL", []>; -} -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { - def TCRETURNR : JInst<(outs), (ins IntRegs:$dst), - "jumpr $dst // TAILCALL", []>; +// Indirect tail-call. +let isCodeGenOnly = 1, isCall = 1, isReturn = 1 in +def TCRETURNR : T_JMPr; + +// Direct tail-calls. +let isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0, +isTerminator = 1, isCodeGenOnly = 1 in { + def TCRETURNtg : T_JMP<(ins calltarget:$dst)>; + def TCRETURNtext : T_JMP<(ins calltarget:$dst)>; } + // Map call instruction. def : Pat<(call (i32 IntRegs:$dst)), (CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>; @@ -2183,68 +2056,26 @@ def : Pat<(HexagonTCRet (i32 IntRegs:$dst)), // Atomic load and store support // 8 bit atomic load -def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDub_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_8 ADDRriS11_0:$src1), (i32 (LDriub ADDRriS11_0:$src1))>; def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)), (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>; - - // 16 bit atomic load -def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDuh_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_16 ADDRriS11_1:$src1), (i32 (LDriuh ADDRriS11_1:$src1))>; def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)), (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>; - - -// 32 bit atomic load -def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDw_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_32 ADDRriS11_2:$src1), (i32 (LDriw ADDRriS11_2:$src1))>; def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)), (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>; - // 64 bit atomic load -def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), - (i64 (LDd_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - def : Pat<(atomic_load_64 ADDRriS11_3:$src1), (i64 (LDrid ADDRriS11_3:$src1))>; @@ -2252,30 +2083,6 @@ def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)), (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>; -// 64 bit atomic store -def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), - (i64 DoubleRegs:$src1)), - (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i64 DoubleRegs:$src1)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; - -// 8 bit atomic store -def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, Requires<[NoV4T]>; - def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)), (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>; @@ -2285,18 +2092,6 @@ def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset), (i32 IntRegs:$src1))>; -// 16 bit atomic store -def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, Requires<[NoV4T]>; - def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)), (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>; @@ -2305,20 +2100,6 @@ def : Pat<(atomic_store_16 (i32 IntRegs:$src1), (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset, (i32 IntRegs:$src1))>; - -// 32 bit atomic store -def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, - (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)), (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>; @@ -2354,10 +2135,11 @@ def : Pat <(add (i1 PredRegs:$src1), -1), // Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) => // p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1). +// cmp.lt(r0, r1) -> cmp.gt(r1, r0) def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), (i32 IntRegs:$src3), (i32 IntRegs:$src4)), - (i32 (TFR_condset_rr (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + (i32 (TFR_condset_rr (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)), (i32 IntRegs:$src4), (i32 IntRegs:$src3)))>, Requires<[HasV2TOnly]>; @@ -2375,210 +2157,27 @@ def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2, // Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) // => r0 = TFR_condset_ir(p0, #i, r1) -def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, s12ImmPred:$src3), +def : Pat <(select (not (i1 PredRegs:$src1)), IntRegs:$src2, s12ImmPred:$src3), (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3, (i32 IntRegs:$src2)))>; // Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump. -def : Pat <(brcond (not PredRegs:$src1), bb:$offset), - (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; +def : Pat <(brcond (not (i1 PredRegs:$src1)), bb:$offset), + (JMP_f (i1 PredRegs:$src1), bb:$offset)>; // Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2). -def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)), +def : Pat <(and (i1 PredRegs:$src1), (not (i1 PredRegs:$src2))), (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; -// Map from store(globaladdress + x) -> memd(#foo + x). -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; - -// Map from store(globaladdress) -> memd(#foo). -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress + x) -> memw(#foo + x). -let AddedComplexity = 100 in -def : Pat <(store (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress) -> memw(#foo + 0). -let AddedComplexity = 100 in -def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>; - -// Map from store(globaladdress) -> memw(#foo). -let AddedComplexity = 100 in -def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress + x) -> memh(#foo + x). -let AddedComplexity = 100 in -def : Pat <(truncstorei16 (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress) -> memh(#foo). -let AddedComplexity = 100 in -def : Pat <(truncstorei16 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress + x) -> memb(#foo + x). -let AddedComplexity = 100 in -def : Pat <(truncstorei8 (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from store(globaladdress) -> memb(#foo). -let AddedComplexity = 100 in -def : Pat <(truncstorei8 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memw(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memw(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDw_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memd(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memw(#foo + 0). -let AddedComplexity = 100 in -def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i64 (LDd_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd. -let AddedComplexity = 100 in -def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i1 (TFR_PdRs (i32 (LDb_GP tglobaladdr:$global))))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memh(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memh(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDrih_GP tglobaladdr:$global, 0))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memuh(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memuh(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDriuh_GP tglobaladdr:$global, 0))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memh(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDh_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memuh(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDuh_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memb(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memb(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress + x) -> memub(#foo + x). -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memb(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// Map from load(globaladdress) -> memb(#foo). -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; -// Map from load(globaladdress) -> memub(#foo). let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -// When the Interprocedural Global Variable optimizer realizes that a -// certain global variable takes only two constant values, it shrinks the -// global to a boolean. Catch those loads here in the following 3 patterns. -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP tglobaladdr:$global))>, - Requires<[NoV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP tglobaladdr:$global))>, +def : Pat <(i64 (zextloadi1 (HexagonCONST32 tglobaladdr:$global))), + (i64 (COMBINE_rr (TFRI 0), + (LDriub_indexed (CONST32_set tglobaladdr:$global), 0)))>, Requires<[NoV4T]>; // Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned. +let AddedComplexity = 10 in def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)), (i32 (AND_rr (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>; @@ -2597,43 +2196,46 @@ def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)), subreg_loreg))))))>; // We want to prevent emitting pnot's as much as possible. -// Map brcond with an unsupported setcc to a JMP_cNot. +// Map brcond with an unsupported setcc to a JMP_f. def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), bb:$offset), - (JMP_cNot (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + (JMP_f (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>; def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>; + (JMP_f (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>; def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset), - (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; + (JMP_f (i1 PredRegs:$src1), bb:$offset)>; def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset), - (JMP_c (i1 PredRegs:$src1), bb:$offset)>; + (JMP_t (i1 PredRegs:$src1), bb:$offset)>; +// cmp.lt(Rs, Imm) -> !cmp.ge(Rs, Imm) -> !cmp.gt(Rs, Imm-1) def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2), bb:$offset)>; + (JMP_f (CMPGTri (i32 IntRegs:$src1), + (DEC_CONST_SIGNED s8ImmPred:$src2)), bb:$offset)>; +// cmp.lt(r0, r1) -> cmp.gt(r1, r0) def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), bb:$offset), - (JMP_c (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>; + (JMP_t (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)), bb:$offset)>; def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)), + (JMP_f (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)), bb:$offset)>; def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + (JMP_f (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>; def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + (JMP_f (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), bb:$offset)>; // Map from a 64-bit select to an emulated 64-bit mux. @@ -2694,12 +2296,6 @@ def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), def : Pat<(store (i1 -1), ADDRriS11_2:$addr), (STrib ADDRriS11_2:$addr, (TFRI 1))>; -let AddedComplexity = 100 in -// Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1; -// memw(#foo) = r0 -def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, (TFRI 1))>, - Requires<[NoV4T]>; // Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0. def : Pat<(store (i1 -1), ADDRriS11_2:$addr), @@ -2717,8 +2313,8 @@ def : Pat<(i64 (anyext (i32 IntRegs:$src1))), // Map cmple -> cmpgt. // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)), - (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>; +def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)), + (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ExtPred:$src2)))>; // rs <= rt -> !(rs > rt). def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))), @@ -2731,8 +2327,8 @@ def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), // Map cmpne -> cmpeq. // Hexagon_TODO: We should improve on this. // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), - (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>; +def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)), + (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ExtPred:$src2))))>; // Map cmpne(Rs) -> !cmpeqe(Rs). // rs != rt -> !(rs == rt). @@ -2754,8 +2350,9 @@ def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), (i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>; -def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)), - (i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>; +// cmpge(Rs, Imm) -> cmpgt(Rs, Imm-1) +def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ExtPred:$src2)), + (i1 (CMPGTri (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2)))>; // Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss). // rss >= rtt -> !(rtt > rss). @@ -2764,9 +2361,10 @@ def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), (i64 DoubleRegs:$src1)))))>; // Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm). +// !cmpge(Rs, Imm) -> !cmpgt(Rs, Imm-1). // rs < rt -> !(rs >= rt). -def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), - (i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>; +def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)), + (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2))))>; // Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs). // rs < rt -> rt > rs. @@ -2790,13 +2388,17 @@ def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))), def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), (i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; -// Generate cmpgeu(Rs, #u8) -def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)), - (i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>; +// Generate cmpgeu(Rs, #0) -> cmpeq(Rs, Rs) +def : Pat <(i1 (setuge (i32 IntRegs:$src1), 0)), + (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src1)))>; + +// Generate cmpgeu(Rs, #u8) -> cmpgtu(Rs, #u8 -1) +def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ExtPred:$src2)), + (i1 (CMPGTUri (i32 IntRegs:$src1), (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>; // Generate cmpgtu(Rs, #u9) -def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)), - (i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>; +def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)), + (i1 (CMPGTUri (i32 IntRegs:$src1), u9ExtPred:$src2))>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). @@ -2808,7 +2410,7 @@ def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>; -// Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs). +// Map from cmpleu(Rs, Rt) -> !cmpgtu(Rs, Rt). // Map from (Rs <= Rt) -> !(Rs > Rt). def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; @@ -2904,6 +2506,13 @@ def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)), (i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>, Requires<[NoV4T]>; +let AddedComplexity = 100 in +def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), + (i64 (COMBINE_rr (TFRI 0), (LDriw_indexed IntRegs:$src1, + s11_2ExtPred:$offset)))>, + Requires<[NoV4T]>; + +let AddedComplexity = 10 in def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)), (i32 (LDriw ADDRriS11_0:$src1))>; @@ -2920,6 +2529,48 @@ def : Pat <(i64 (anyext (i1 PredRegs:$src1))), (i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>; +let AddedComplexity = 100 in +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zextloadi32 (i32 (add IntRegs:$src2, + s11_2ExtPred:$offset2)))))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + (LDriw_indexed IntRegs:$src2, + s11_2ExtPred:$offset2)))>; + +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zextloadi32 ADDRriS11_2:$srcLow)))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + (LDriw ADDRriS11_2:$srcLow)))>; + +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zext (i32 IntRegs:$srcLow))))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + IntRegs:$srcLow))>; + +let AddedComplexity = 100 in +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zextloadi32 (i32 (add IntRegs:$src2, + s11_2ExtPred:$offset2)))))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + (LDriw_indexed IntRegs:$src2, + s11_2ExtPred:$offset2)))>; + +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zextloadi32 ADDRriS11_2:$srcLow)))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + (LDriw ADDRriS11_2:$srcLow)))>; + +def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), + (i32 32))), + (i64 (zext (i32 IntRegs:$srcLow))))), + (i64 (COMBINE_rr (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), + IntRegs:$srcLow))>; + // Any extended 64-bit load. // anyext i32 -> i64 def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)), @@ -3054,19 +2705,6 @@ let AddedComplexity = 100 in def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)), (COPY (i32 IntRegs:$src1))>; -def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; -def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; - -let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in -def BR_JT : JRInst<(outs), (ins IntRegs:$src), - "jumpr $src", - [(HexagonBR_JT (i32 IntRegs:$src))]>; - -let isBranch=1, isIndirectBranch=1, isTerminator=1 in -def BRIND : JRInst<(outs), (ins IntRegs:$src), - "jumpr $src", - [(brind (i32 IntRegs:$src))]>; - def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>; def : Pat<(HexagonWrapperJT tjumptable:$dst), diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td b/lib/Target/Hexagon/HexagonInstrInfoV3.td index 157ab3d..7e75554 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV3.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td @@ -11,6 +11,11 @@ // //===----------------------------------------------------------------------===// +def callv3 : SDNode<"HexagonISD::CALLv3", SDT_SPCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; + +def callv3nr : SDNode<"HexagonISD::CALLv3nr", SDT_SPCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; //===----------------------------------------------------------------------===// // J + @@ -40,41 +45,6 @@ let isCall = 1, neverHasSideEffects = 1, []>, Requires<[HasV3TOnly]>; } - -// Jump to address from register -// if(p?.new) jumpr:t r? -let isReturn = 1, isTerminator = 1, isBarrier = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) jumpr:t $src2", - []>, Requires<[HasV3T]>; -} - -// if (!p?.new) jumpr:t r? -let isReturn = 1, isTerminator = 1, isBarrier = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) jumpr:t $src2", - []>, Requires<[HasV3T]>; -} - -// Not taken. -// if(p?.new) jumpr:nt r? -let isReturn = 1, isTerminator = 1, isBarrier = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) jumpr:nt $src2", - []>, Requires<[HasV3T]>; -} - -// if (!p?.new) jumpr:nt r? -let isReturn = 1, isTerminator = 1, isBarrier = 1, - Defs = [PC], Uses = [R31] in { - def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) jumpr:nt $src2", - []>, Requires<[HasV3T]>; -} - //===----------------------------------------------------------------------===// // JR - //===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td index 1d0643d..744efe8 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -95,164 +95,6 @@ def NumUsesBelowThresCONST32 : PatFrag<(ops node:$addr), //===----------------------------------------------------------------------===// // ALU32 + //===----------------------------------------------------------------------===// - -// Shift halfword. - -let isPredicated = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in { -def ASLH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = aslh($src2)", - []>, - Requires<[HasV4T]>; - -def ASLH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = aslh($src2)", - []>, - Requires<[HasV4T]>; - -def ASLH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = aslh($src2)", - []>, - Requires<[HasV4T]>; - -def ASLH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = aslh($src2)", - []>, - Requires<[HasV4T]>; - -def ASRH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = asrh($src2)", - []>, - Requires<[HasV4T]>; - -def ASRH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = asrh($src2)", - []>, - Requires<[HasV4T]>; - -def ASRH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = asrh($src2)", - []>, - Requires<[HasV4T]>; - -def ASRH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = asrh($src2)", - []>, - Requires<[HasV4T]>; -} - -// Sign extend. - -let isPredicated = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in { -def SXTB_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = sxtb($src2)", - []>, - Requires<[HasV4T]>; - -def SXTB_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = sxtb($src2)", - []>, - Requires<[HasV4T]>; - -def SXTB_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = sxtb($src2)", - []>, - Requires<[HasV4T]>; - -def SXTB_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = sxtb($src2)", - []>, - Requires<[HasV4T]>; - - -def SXTH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = sxth($src2)", - []>, - Requires<[HasV4T]>; - -def SXTH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = sxth($src2)", - []>, - Requires<[HasV4T]>; - -def SXTH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = sxth($src2)", - []>, - Requires<[HasV4T]>; - -def SXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = sxth($src2)", - []>, - Requires<[HasV4T]>; -} - -// Zero exten. - -let neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in { -def ZXTB_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = zxtb($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTB_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = zxtb($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTB_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = zxtb($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTB_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = zxtb($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1) $dst = zxth($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1) $dst = zxth($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if ($src1.new) $dst = zxth($src2)", - []>, - Requires<[HasV4T]>; - -def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2), - "if (!$src1.new) $dst = zxth($src2)", - []>, - Requires<[HasV4T]>; -} - // Generate frame index addresses. let neverHasSideEffects = 1, isReMaterializable = 1, isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in @@ -367,105 +209,31 @@ def COMBINE_iI_V4 : ALU32_ii<(outs DoubleRegs:$dst), //===----------------------------------------------------------------------===// // LD + //===----------------------------------------------------------------------===// -// -// These absolute set addressing mode instructions accept immediate as -// an operand. We have duplicated these patterns to take global address. - +//===----------------------------------------------------------------------===// +// Template class for load instructions with Absolute set addressing mode. +//===----------------------------------------------------------------------===// let isExtended = 1, opExtendable = 2, neverHasSideEffects = 1, -validSubTargets = HasV4SubT in { -def LDrid_abs_setimm_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2), - (ins u0AlwaysExt:$addr), - "$dst1 = memd($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memb(Re=#U6) -def LDrib_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u0AlwaysExt:$addr), - "$dst1 = memb($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memh(Re=#U6) -def LDrih_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u0AlwaysExt:$addr), - "$dst1 = memh($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memub(Re=#U6) -def LDriub_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u0AlwaysExt:$addr), - "$dst1 = memub($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memuh(Re=#U6) -def LDriuh_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), +validSubTargets = HasV4SubT in +class T_LD_abs_set<string mnemonic, RegisterClass RC>: + LDInst2<(outs RC:$dst1, IntRegs:$dst2), (ins u0AlwaysExt:$addr), - "$dst1 = memuh($dst2=##$addr)", + "$dst1 = "#mnemonic#"($dst2=##$addr)", []>, Requires<[HasV4T]>; -// Rd=memw(Re=#U6) -def LDriw_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u0AlwaysExt:$addr), - "$dst1 = memw($dst2=##$addr)", - []>, - Requires<[HasV4T]>; -} +def LDrid_abs_set_V4 : T_LD_abs_set <"memd", DoubleRegs>; +def LDrib_abs_set_V4 : T_LD_abs_set <"memb", IntRegs>; +def LDriub_abs_set_V4 : T_LD_abs_set <"memub", IntRegs>; +def LDrih_abs_set_V4 : T_LD_abs_set <"memh", IntRegs>; +def LDriw_abs_set_V4 : T_LD_abs_set <"memw", IntRegs>; +def LDriuh_abs_set_V4 : T_LD_abs_set <"memuh", IntRegs>; -// Following patterns are defined for absolute set addressing mode -// instruction which take global address as operand. -let isExtended = 1, opExtendable = 2, neverHasSideEffects = 1, -validSubTargets = HasV4SubT in { -def LDrid_abs_set_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memd($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memb(Re=#U6) -def LDrib_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memb($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memh(Re=#U6) -def LDrih_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memh($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memub(Re=#U6) -def LDriub_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memub($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memuh(Re=#U6) -def LDriuh_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memuh($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memw(Re=#U6) -def LDriw_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdressExt:$addr), - "$dst1 = memw($dst2=##$addr)", - []>, - Requires<[HasV4T]>; -} // multiclass for load instructions with base + register offset // addressing mode multiclass ld_idxd_shl_pbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : LDInst2<(outs RC:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -474,7 +242,7 @@ multiclass ld_idxd_shl_pbase<string mnemonic, RegisterClass RC, bit isNot, } multiclass ld_idxd_shl_pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ld_idxd_shl_pbase<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : ld_idxd_shl_pbase<mnemonic, RC, PredNot, 1>; @@ -596,329 +364,6 @@ def : Pat <(i32 (load (add IntRegs:$src1, IntRegs:$src2))), Requires<[HasV4T]>; } -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDd_GP_V4 : LDInst2<(outs DoubleRegs:$dst), - (ins globaladdress:$global), - "$dst=memd(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rtt=memd(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDd_GP_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rtt=memd(##global) -def LDd_GP_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rtt=memd(##global) -def LDd_GP_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rtt=memd(##global) -def LDd_GP_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; -} - -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDb_GP_V4 : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memb(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memb(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDb_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memb(##global) -def LDb_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memb(##global) -def LDb_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memb(##global) -def LDb_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; -} - -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDub_GP_V4 : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memub(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memub(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDub_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memub(##global) -def LDub_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memub(##global) -def LDub_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memub(##global) -def LDub_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; -} - -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDh_GP_V4 : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memh(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memh(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memh(##global) -def LDh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memh(##global) -def LDh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memh(##global) -def LDh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; -} - -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDuh_GP_V4 : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memuh(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memuh(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDuh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memuh(##global) -def LDuh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memuh(##global) -def LDuh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memuh(##global) -def LDuh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; -} - -let isPredicable = 1, neverHasSideEffects = 1, validSubTargets = HasV4SubT in -def LDw_GP_V4 : LDInst2<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memw(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memw(##global) -let neverHasSideEffects = 1, isPredicated = 1, isExtended = 1, opExtendable = 2, -validSubTargets = HasV4SubT in { -def LDw_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memw(##global) -def LDw_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memw(##global) -def LDw_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memw(##global) -def LDw_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; -} - - -def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), - (i64 (LDd_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDw_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDuh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memw(#foo + 0) -let AddedComplexity = 100 in -def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i64 (LDd_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd -let AddedComplexity = 100 in -def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>, - Requires<[HasV4T]>; - -// When the Interprocedural Global Variable optimizer realizes that a certain -// global variable takes only two constant values, it shrinks the global to -// a boolean. Catch those loads here in the following 3 patterns. -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memub(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memuh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDuh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memw(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDw_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - // zext i1->i64 def : Pat <(i64 (zext (i1 PredRegs:$src1))), (i64 (COMBINE_Ir_V4 0, (MUX_ii (i1 PredRegs:$src1), 1, 0)))>, @@ -1008,78 +453,29 @@ def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), // ST + //===----------------------------------------------------------------------===// /// -/// Assumptions::: ****** DO NOT IGNORE ******** -/// 1. Make sure that in post increment store, the zero'th operand is always the -/// post increment operand. -/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the -/// last operand. -/// - -// memd(Re=#U)=Rtt -let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in { -def STrid_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), - (ins DoubleRegs:$src1, u0AlwaysExt:$src2), - "memd($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memb(Re=#U)=Rs -def STrib_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u0AlwaysExt:$src2), - "memb($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memh(Re=#U)=Rs -def STrih_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u0AlwaysExt:$src2), - "memh($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memw(Re=#U)=Rs -def STriw_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u0AlwaysExt:$src2), - "memw($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; -} - -// memd(Re=#U)=Rtt -let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in { -def STrid_abs_set_V4 : STInst2<(outs IntRegs:$dst1), - (ins DoubleRegs:$src1, globaladdressExt:$src2), - "memd($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memb(Re=#U)=Rs -def STrib_abs_set_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdressExt:$src2), - "memb($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memh(Re=#U)=Rs -def STrih_abs_set_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdressExt:$src2), - "memh($dst1=##$src2) = $src1", +//===----------------------------------------------------------------------===// +// Template class for store instructions with Absolute set addressing mode. +//===----------------------------------------------------------------------===// +let isExtended = 1, opExtendable = 2, validSubTargets = HasV4SubT in +class T_ST_abs_set<string mnemonic, RegisterClass RC>: + STInst2<(outs IntRegs:$dst1), + (ins RC:$src1, u0AlwaysExt:$src2), + mnemonic#"($dst1=##$src2) = $src1", []>, Requires<[HasV4T]>; -// memw(Re=#U)=Rs -def STriw_abs_set_V4 : STInst2<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdressExt:$src2), - "memw($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; -} +def STrid_abs_set_V4 : T_ST_abs_set <"memd", DoubleRegs>; +def STrib_abs_set_V4 : T_ST_abs_set <"memb", IntRegs>; +def STrih_abs_set_V4 : T_ST_abs_set <"memh", IntRegs>; +def STriw_abs_set_V4 : T_ST_abs_set <"memw", IntRegs>; +//===----------------------------------------------------------------------===// // multiclass for store instructions with base + register offset addressing // mode +//===----------------------------------------------------------------------===// multiclass ST_Idxd_shl_Pbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, RC:$src5), @@ -1090,7 +486,7 @@ multiclass ST_Idxd_shl_Pbase<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_Idxd_shl_Pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Idxd_shl_Pbase<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Idxd_shl_Pbase<mnemonic, RC, PredNot, 1>; @@ -1118,7 +514,7 @@ multiclass ST_Idxd_shl<string mnemonic, string CextOp, RegisterClass RC> { // addressing mode. multiclass ST_Idxd_shl_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, RC:$src5), @@ -1129,7 +525,7 @@ multiclass ST_Idxd_shl_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_Idxd_shl_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Idxd_shl_Pbase_nv<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Idxd_shl_Pbase_nv<mnemonic, RC, PredNot, 1>; @@ -1192,17 +588,59 @@ def : Pat<(store (i64 DoubleRegs:$src4), u2ImmPred:$src3, DoubleRegs:$src4)>; } -// memd(Ru<<#u2+#U6)=Rtt -let isExtended = 1, opExtendable = 2, AddedComplexity = 10, -validSubTargets = HasV4SubT in -def STrid_shl_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, DoubleRegs:$src4), - "memd($src1<<#$src2+#$src3) = $src4", - [(store (i64 DoubleRegs:$src4), +let isExtended = 1, opExtendable = 2 in +class T_ST_LongOff <string mnemonic, PatFrag stOp, RegisterClass RC, ValueType VT> : + STInst<(outs), + (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, RC:$src4), + mnemonic#"($src1<<#$src2+##$src3) = $src4", + [(stOp (VT RC:$src4), (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), u0AlwaysExtPred:$src3))]>, Requires<[HasV4T]>; +let isExtended = 1, opExtendable = 2, mayStore = 1, isNVStore = 1 in +class T_ST_LongOff_nv <string mnemonic> : + NVInst_V4<(outs), + (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), + mnemonic#"($src1<<#$src2+##$src3) = $src4.new", + []>, + Requires<[HasV4T]>; + +multiclass ST_LongOff <string mnemonic, string BaseOp, PatFrag stOp> { + let BaseOpcode = BaseOp#"_shl" in { + let isNVStorable = 1 in + def NAME#_V4 : T_ST_LongOff<mnemonic, stOp, IntRegs, i32>; + + def NAME#_nv_V4 : T_ST_LongOff_nv<mnemonic>; + } +} + +let AddedComplexity = 10, validSubTargets = HasV4SubT in { + def STrid_shl_V4 : T_ST_LongOff<"memd", store, DoubleRegs, i64>; + defm STrib_shl : ST_LongOff <"memb", "STrib", truncstorei8>, NewValueRel; + defm STrih_shl : ST_LongOff <"memh", "Strih", truncstorei16>, NewValueRel; + defm STriw_shl : ST_LongOff <"memw", "STriw", store>, NewValueRel; +} + +let AddedComplexity = 40 in +multiclass T_ST_LOff_Pats <InstHexagon I, RegisterClass RC, ValueType VT, + PatFrag stOp> { + def : Pat<(stOp (VT RC:$src4), + (add (shl IntRegs:$src1, u2ImmPred:$src2), + (NumUsesBelowThresCONST32 tglobaladdr:$src3))), + (I IntRegs:$src1, u2ImmPred:$src2, tglobaladdr:$src3, RC:$src4)>; + + def : Pat<(stOp (VT RC:$src4), + (add IntRegs:$src1, + (NumUsesBelowThresCONST32 tglobaladdr:$src3))), + (I IntRegs:$src1, 0, tglobaladdr:$src3, RC:$src4)>; +} + +defm : T_ST_LOff_Pats<STrid_shl_V4, DoubleRegs, i64, store>; +defm : T_ST_LOff_Pats<STriw_shl_V4, IntRegs, i32, store>; +defm : T_ST_LOff_Pats<STrib_shl_V4, IntRegs, i32, truncstorei8>; +defm : T_ST_LOff_Pats<STrih_shl_V4, IntRegs, i32, truncstorei16>; + // memd(Rx++#s4:3)=Rtt // memd(Rx++#s4:3:circ(Mu))=Rtt // memd(Rx++I:circ(Mu))=Rtt @@ -1222,7 +660,7 @@ def STrid_shl_V4 : STInst<(outs), //===----------------------------------------------------------------------===// multiclass ST_Imm_Pbase<string mnemonic, Operand OffsetOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, OffsetOp:$src3, s6Ext:$src4), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1232,7 +670,7 @@ multiclass ST_Imm_Pbase<string mnemonic, Operand OffsetOp, bit isNot, } multiclass ST_Imm_Pred<string mnemonic, Operand OffsetOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Imm_Pbase<mnemonic, OffsetOp, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Imm_Pbase<mnemonic, OffsetOp, PredNot, 1>; @@ -1280,17 +718,6 @@ def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)), (STrib_imm_V4 IntRegs:$src1, 0, s8ExtPred:$src2)>, Requires<[HasV4T]>; -// memb(Ru<<#u2+#U6)=Rt -let isExtended = 1, opExtendable = 2, AddedComplexity = 10, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STrib_shl_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memb($src1<<#$src2+#$src3) = $src4", - [(truncstorei8 (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u0AlwaysExtPred:$src3))]>, - Requires<[HasV4T]>; - // memb(Rx++#s4:0:circ(Mu))=Rt // memb(Rx++I:circ(Mu))=Rt // memb(Rx++Mu)=Rt @@ -1311,17 +738,6 @@ def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)), // TODO: needs to be implemented. // memh(Ru<<#u2+#U6)=Rt.H -// memh(Ru<<#u2+#U6)=Rt -let isExtended = 1, opExtendable = 2, AddedComplexity = 10, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STrih_shl_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memh($src1<<#$src2+#$src3) = $src4", - [(truncstorei16 (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u0AlwaysExtPred:$src3))]>, - Requires<[HasV4T]>; - // memh(Rx++#s4:1:circ(Mu))=Rt.H // memh(Rx++#s4:1:circ(Mu))=Rt // memh(Rx++I:circ(Mu))=Rt.H @@ -1358,241 +774,11 @@ def : Pat <(store s8ExtPred:$src2, (i32 IntRegs:$src1)), (STriw_imm_V4 IntRegs:$src1, 0, s8ExtPred:$src2)>, Requires<[HasV4T]>; -// memw(Ru<<#u2+#U6)=Rt -let isExtended = 1, opExtendable = 2, AddedComplexity = 10, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STriw_shl_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memw($src1<<#$src2+#$src3) = $src4", - [(store (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u0AlwaysExtPred:$src3))]>, - Requires<[HasV4T]>; - // memw(Rx++#s4:2)=Rt // memw(Rx++#s4:2:circ(Mu))=Rt // memw(Rx++I:circ(Mu))=Rt // memw(Rx++Mu)=Rt // memw(Rx++Mu:brev)=Rt -// memw(gp+#u16:2)=Rt - - -// memd(#global)=Rtt -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1, -validSubTargets = HasV4SubT in -def STd_GP_V4 : STInst2<(outs), - (ins globaladdress:$global, DoubleRegs:$src), - "memd(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memd(##global) = Rtt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1, -isExtended = 1, opExtendable = 1, validSubTargets = HasV4SubT in { -def STd_GP_cPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if ($src1) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memd(##global) = Rtt -def STd_GP_cNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if (!$src1) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memd(##global) = Rtt -def STd_GP_cdnPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if ($src1.new) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memd(##global) = Rtt -def STd_GP_cdnNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if (!$src1.new) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; -} - -// memb(#global)=Rt -let isPredicable = 1, neverHasSideEffects = 1, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STb_GP_V4 : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let neverHasSideEffects = 1, isPredicated = 1, isNVStorable = 1, -isExtended = 1, opExtendable = 1, validSubTargets = HasV4SubT in { -def STb_GP_cPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -def STb_GP_cNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -def STb_GP_cdnPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -def STb_GP_cdnNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; -} - -// memh(#global)=Rt -let isPredicable = 1, neverHasSideEffects = 1, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STh_GP_V4 : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -let neverHasSideEffects = 1, isPredicated = 1, isNVStorable = 1, -isExtended = 1, opExtendable = 1, validSubTargets = HasV4SubT in { -def STh_GP_cPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -def STh_GP_cNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -def STh_GP_cdnPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -def STh_GP_cdnNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; -} - -// memw(#global)=Rt -let isPredicable = 1, neverHasSideEffects = 1, isNVStorable = 1, -validSubTargets = HasV4SubT in -def STw_GP_V4 : STInst2<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -let neverHasSideEffects = 1, isPredicated = 1, isNVStorable = 1, -isExtended = 1, opExtendable = 1, validSubTargets = HasV4SubT in { -def STw_GP_cPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -def STw_GP_cNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -def STw_GP_cdnPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -def STw_GP_cdnNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; -} - -// 64 bit atomic store -def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), - (i64 DoubleRegs:$src1)), - (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memd(#foo) -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -// 8 bit atomic store -def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat<(truncstorei8 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1" -// to "r0 = 1; memw(#foo) = r0" -let AddedComplexity = 100 in -def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat<(truncstorei16 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// 32 bit atomic store -def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memw(#foo) -let AddedComplexity = 100 in -def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; //===----------------------------------------------------------------------=== // ST - @@ -1607,7 +793,7 @@ def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), // multiclass ST_Idxd_Pbase_nv<string mnemonic, RegisterClass RC, Operand predImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1618,7 +804,7 @@ multiclass ST_Idxd_Pbase_nv<string mnemonic, RegisterClass RC, multiclass ST_Idxd_Pred_nv<string mnemonic, RegisterClass RC, Operand predImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Idxd_Pbase_nv<mnemonic, RC, predImmOp, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Idxd_Pbase_nv<mnemonic, RC, predImmOp, PredNot, 1>; @@ -1660,7 +846,7 @@ let addrMode = BaseImmOffset, validSubTargets = HasV4SubT in { // and MEMri operand. multiclass ST_MEMri_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1670,7 +856,7 @@ multiclass ST_MEMri_Pbase_nv<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_MEMri_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_MEMri_Pbase_nv<mnemonic, RC, PredNot, 0>; // Predicate new @@ -1706,15 +892,6 @@ mayStore = 1 in { defm STriw: ST_MEMri_nv<"memw", "STriw", IntRegs, 13, 8>, AddrModeRel; } -// memb(Ru<<#u2+#U6)=Nt.new -let isExtended = 1, opExtendable = 2, mayStore = 1, AddedComplexity = 10, -isNVStore = 1, validSubTargets = HasV4SubT in -def STrib_shl_nv_V4 : NVInst_V4<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memb($src1<<#$src2+#$src3) = $src4.new", - []>, - Requires<[HasV4T]>; - //===----------------------------------------------------------------------===// // Post increment store // mem[bhwd](Rx++#s4:[0123])=Nt.new @@ -1722,7 +899,7 @@ def STrib_shl_nv_V4 : NVInst_V4<(outs), multiclass ST_PostInc_Pbase_nv<string mnemonic, RegisterClass RC, Operand ImmOp, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", @@ -1734,7 +911,7 @@ multiclass ST_PostInc_Pbase_nv<string mnemonic, RegisterClass RC, Operand ImmOp, multiclass ST_PostInc_Pred_nv<string mnemonic, RegisterClass RC, Operand ImmOp, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_PostInc_Pbase_nv<mnemonic, RC, ImmOp, PredNot, 0>; // Predicate new let Predicates = [HasV4T], validSubTargets = HasV4SubT in @@ -1772,146 +949,15 @@ defm POST_STwri: ST_PostInc_nv <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel; // memb(Rx++I:circ(Mu))=Nt.new // memb(Rx++Mu)=Nt.new // memb(Rx++Mu:brev)=Nt.new - -// memb(#global)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src.new", - []>, - Requires<[HasV4T]>; - -// memh(Ru<<#u2+#U6)=Nt.new -let isExtended = 1, opExtendable = 2, mayStore = 1, AddedComplexity = 10, -isNVStore = 1, validSubTargets = HasV4SubT in -def STrih_shl_nv_V4 : NVInst_V4<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memh($src1<<#$src2+#$src3) = $src4.new", - []>, - Requires<[HasV4T]>; - // memh(Rx++#s4:1:circ(Mu))=Nt.new // memh(Rx++I:circ(Mu))=Nt.new // memh(Rx++Mu)=Nt.new // memh(Rx++Mu:brev)=Nt.new -// memh(#global)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src.new", - []>, - Requires<[HasV4T]>; - -// memw(Ru<<#u2+#U6)=Nt.new -let isExtended = 1, opExtendable = 2, mayStore = 1, AddedComplexity = 10, -isNVStore = 1, validSubTargets = HasV4SubT in -def STriw_shl_nv_V4 : NVInst_V4<(outs), - (ins IntRegs:$src1, u2Imm:$src2, u0AlwaysExt:$src3, IntRegs:$src4), - "memw($src1<<#$src2+#$src3) = $src4.new", - []>, - Requires<[HasV4T]>; - // memw(Rx++#s4:2:circ(Mu))=Nt.new // memw(Rx++I:circ(Mu))=Nt.new // memw(Rx++Mu)=Nt.new // memw(Rx++Mu:brev)=Nt.new -// memw(gp+#u16:2)=Nt.new - -let mayStore = 1, neverHasSideEffects = 1, isNVStore = 1, -validSubTargets = HasV4SubT in -def STw_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isNVStore = 1, -isExtended = 1, opExtendable = 1, validSubTargets = HasV4SubT in { -def STb_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -def STh_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -def STw_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; -} //===----------------------------------------------------------------------===// // NV/ST - @@ -2658,414 +1704,367 @@ def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), // MEMOP: Word, Half, Byte //===----------------------------------------------------------------------===// +def MEMOPIMM : SDNodeXForm<imm, [{ + // Call the transformation function XformM5ToU5Imm to get the negative + // immediate's positive counterpart. + int32_t imm = N->getSExtValue(); + return XformM5ToU5Imm(imm); +}]>; + +def MEMOPIMM_HALF : SDNodeXForm<imm, [{ + // -1 .. -31 represented as 65535..65515 + // assigning to a short restores our desired signed value. + // Call the transformation function XformM5ToU5Imm to get the negative + // immediate's positive counterpart. + int16_t imm = N->getSExtValue(); + return XformM5ToU5Imm(imm); +}]>; + +def MEMOPIMM_BYTE : SDNodeXForm<imm, [{ + // -1 .. -31 represented as 255..235 + // assigning to a char restores our desired signed value. + // Call the transformation function XformM5ToU5Imm to get the negative + // immediate's positive counterpart. + int8_t imm = N->getSExtValue(); + return XformM5ToU5Imm(imm); +}]>; + +def SETMEMIMM : SDNodeXForm<imm, [{ + // Return the bit position we will set [0-31]. + // As an SDNode. + int32_t imm = N->getSExtValue(); + return XformMskToBitPosU5Imm(imm); +}]>; + +def CLRMEMIMM : SDNodeXForm<imm, [{ + // Return the bit position we will clear [0-31]. + // As an SDNode. + // we bit negate the value first + int32_t imm = ~(N->getSExtValue()); + return XformMskToBitPosU5Imm(imm); +}]>; + +def SETMEMIMM_SHORT : SDNodeXForm<imm, [{ + // Return the bit position we will set [0-15]. + // As an SDNode. + int16_t imm = N->getSExtValue(); + return XformMskToBitPosU4Imm(imm); +}]>; + +def CLRMEMIMM_SHORT : SDNodeXForm<imm, [{ + // Return the bit position we will clear [0-15]. + // As an SDNode. + // we bit negate the value first + int16_t imm = ~(N->getSExtValue()); + return XformMskToBitPosU4Imm(imm); +}]>; + +def SETMEMIMM_BYTE : SDNodeXForm<imm, [{ + // Return the bit position we will set [0-7]. + // As an SDNode. + int8_t imm = N->getSExtValue(); + return XformMskToBitPosU3Imm(imm); +}]>; + +def CLRMEMIMM_BYTE : SDNodeXForm<imm, [{ + // Return the bit position we will clear [0-7]. + // As an SDNode. + // we bit negate the value first + int8_t imm = ~(N->getSExtValue()); + return XformMskToBitPosU3Imm(imm); +}]>; + //===----------------------------------------------------------------------===// -// MEMOP: Word -// -// Implemented: -// MEMw_ADDi_indexed_V4 : memw(Rs+#u6:2)+=#U5 -// MEMw_SUBi_indexed_V4 : memw(Rs+#u6:2)-=#U5 -// MEMw_ADDr_indexed_V4 : memw(Rs+#u6:2)+=Rt -// MEMw_SUBr_indexed_V4 : memw(Rs+#u6:2)-=Rt -// MEMw_CLRr_indexed_V4 : memw(Rs+#u6:2)&=Rt -// MEMw_SETr_indexed_V4 : memw(Rs+#u6:2)|=Rt -// MEMw_ADDi_V4 : memw(Rs+#u6:2)+=#U5 -// MEMw_SUBi_V4 : memw(Rs+#u6:2)-=#U5 -// MEMw_ADDr_V4 : memw(Rs+#u6:2)+=Rt -// MEMw_SUBr_V4 : memw(Rs+#u6:2)-=Rt -// MEMw_CLRr_V4 : memw(Rs+#u6:2)&=Rt -// MEMw_SETr_V4 : memw(Rs+#u6:2)|=Rt -// -// Not implemented: -// MEMw_CLRi_indexed_V4 : memw(Rs+#u6:2)=clrbit(#U5) -// MEMw_SETi_indexed_V4 : memw(Rs+#u6:2)=setbit(#U5) -// MEMw_CLRi_V4 : memw(Rs+#u6:2)=clrbit(#U5) -// MEMw_SETi_V4 : memw(Rs+#u6:2)=setbit(#U5) +// Template class for MemOp instructions with the register value. //===----------------------------------------------------------------------===// +class MemOp_rr_base <string opc, bits<2> opcBits, Operand ImmOp, + string memOp, bits<2> memOpBits> : + MEMInst_V4<(outs), + (ins IntRegs:$base, ImmOp:$offset, IntRegs:$delta), + opc#"($base+#$offset)"#memOp#"$delta", + []>, + Requires<[HasV4T, UseMEMOP]> { + + bits<5> base; + bits<5> delta; + bits<32> offset; + bits<6> offsetBits; // memb - u6:0 , memh - u6:1, memw - u6:2 + + let offsetBits = !if (!eq(opcBits, 0b00), offset{5-0}, + !if (!eq(opcBits, 0b01), offset{6-1}, + !if (!eq(opcBits, 0b10), offset{7-2},0))); + + let IClass = 0b0011; + let Inst{27-24} = 0b1110; + let Inst{22-21} = opcBits; + let Inst{20-16} = base; + let Inst{13} = 0b0; + let Inst{12-7} = offsetBits; + let Inst{6-5} = memOpBits; + let Inst{4-0} = delta; +} +//===----------------------------------------------------------------------===// +// Template class for MemOp instructions with the immediate value. +//===----------------------------------------------------------------------===// +class MemOp_ri_base <string opc, bits<2> opcBits, Operand ImmOp, + string memOp, bits<2> memOpBits> : + MEMInst_V4 <(outs), + (ins IntRegs:$base, ImmOp:$offset, u5Imm:$delta), + opc#"($base+#$offset)"#memOp#"#$delta" + #!if(memOpBits{1},")", ""), // clrbit, setbit - include ')' + []>, + Requires<[HasV4T, UseMEMOP]> { + bits<5> base; + bits<5> delta; + bits<32> offset; + bits<6> offsetBits; // memb - u6:0 , memh - u6:1, memw - u6:2 -// memw(Rs+#u6:2) += #U5 -let AddedComplexity = 30 in -def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend), - "memw($base+#$offset) += #$addend", - []>, - Requires<[HasV4T, UseMEMOP]>; + let offsetBits = !if (!eq(opcBits, 0b00), offset{5-0}, + !if (!eq(opcBits, 0b01), offset{6-1}, + !if (!eq(opcBits, 0b10), offset{7-2},0))); -// memw(Rs+#u6:2) -= #U5 -let AddedComplexity = 30 in -def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend), - "memw($base+#$offset) -= #$subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) += Rt -let AddedComplexity = 30 in -def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend), - "memw($base+#$offset) += $addend", - [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) -= Rt -let AddedComplexity = 30 in -def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend), - "memw($base+#$offset) -= $subend", - [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) &= Rt -let AddedComplexity = 30 in -def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend), - "memw($base+#$offset) &= $andend", - [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) |= Rt -let AddedComplexity = 30 in -def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend), - "memw($base+#$offset) |= $orend", - [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) += #U5 -let AddedComplexity = 30 in -def MEMw_ADDi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$addend), - "memw($addr) += $addend", - []>, - Requires<[HasV4T, UseMEMOP]>; + let IClass = 0b0011; + let Inst{27-24} = 0b1111; + let Inst{22-21} = opcBits; + let Inst{20-16} = base; + let Inst{13} = 0b0; + let Inst{12-7} = offsetBits; + let Inst{6-5} = memOpBits; + let Inst{4-0} = delta; +} -// memw(Rs+#u6:2) -= #U5 -let AddedComplexity = 30 in -def MEMw_SUBi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$subend), - "memw($addr) -= $subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) += Rt -let AddedComplexity = 30 in -def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$addend), - "memw($addr) += $addend", - [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)), - ADDRriU6_2:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) -= Rt -let AddedComplexity = 30 in -def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$subend), - "memw($addr) -= $subend", - [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)), - ADDRriU6_2:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) &= Rt -let AddedComplexity = 30 in -def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$andend), - "memw($addr) &= $andend", - [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)), - ADDRriU6_2:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memw(Rs+#u6:2) |= Rt -let AddedComplexity = 30 in -def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$orend), - "memw($addr) |= $orend", - [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)), - ADDRriU6_2:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; +// multiclass to define MemOp instructions with register operand. +multiclass MemOp_rr<string opc, bits<2> opcBits, Operand ImmOp> { + def _ADD#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " += ", 0b00>; // add + def _SUB#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " -= ", 0b01>; // sub + def _AND#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " &= ", 0b10>; // and + def _OR#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " |= ", 0b11>; // or +} + +// multiclass to define MemOp instructions with immediate Operand. +multiclass MemOp_ri<string opc, bits<2> opcBits, Operand ImmOp> { + def _ADD#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " += ", 0b00 >; + def _SUB#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " -= ", 0b01 >; + def _CLRBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =clrbit(", 0b10>; + def _SETBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =setbit(", 0b11>; +} + +multiclass MemOp_base <string opc, bits<2> opcBits, Operand ImmOp> { + defm r : MemOp_rr <opc, opcBits, ImmOp>; + defm i : MemOp_ri <opc, opcBits, ImmOp>; +} + +// Define MemOp instructions. +let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, +validSubTargets =HasV4SubT in { + let opExtentBits = 6, accessSize = ByteAccess in + defm MemOPb : MemOp_base <"memb", 0b00, u6_0Ext>; + + let opExtentBits = 7, accessSize = HalfWordAccess in + defm MemOPh : MemOp_base <"memh", 0b01, u6_1Ext>; + + let opExtentBits = 8, accessSize = WordAccess in + defm MemOPw : MemOp_base <"memw", 0b10, u6_2Ext>; +} //===----------------------------------------------------------------------===// -// MEMOP: Halfword -// -// Implemented: -// MEMh_ADDi_indexed_V4 : memw(Rs+#u6:2)+=#U5 -// MEMh_SUBi_indexed_V4 : memw(Rs+#u6:2)-=#U5 -// MEMh_ADDr_indexed_V4 : memw(Rs+#u6:2)+=Rt -// MEMh_SUBr_indexed_V4 : memw(Rs+#u6:2)-=Rt -// MEMh_CLRr_indexed_V4 : memw(Rs+#u6:2)&=Rt -// MEMh_SETr_indexed_V4 : memw(Rs+#u6:2)|=Rt -// MEMh_ADDi_V4 : memw(Rs+#u6:2)+=#U5 -// MEMh_SUBi_V4 : memw(Rs+#u6:2)-=#U5 -// MEMh_ADDr_V4 : memw(Rs+#u6:2)+=Rt -// MEMh_SUBr_V4 : memw(Rs+#u6:2)-=Rt -// MEMh_CLRr_V4 : memw(Rs+#u6:2)&=Rt -// MEMh_SETr_V4 : memw(Rs+#u6:2)|=Rt -// -// Not implemented: -// MEMh_CLRi_indexed_V4 : memw(Rs+#u6:2)=clrbit(#U5) -// MEMh_SETi_indexed_V4 : memw(Rs+#u6:2)=setbit(#U5) -// MEMh_CLRi_V4 : memw(Rs+#u6:2)=clrbit(#U5) -// MEMh_SETi_V4 : memw(Rs+#u6:2)=setbit(#U5) +// Multiclass to define 'Def Pats' for ALU operations on the memory +// Here value used for the ALU operation is an immediate value. +// mem[bh](Rs+#0) += #U5 +// mem[bh](Rs+#u6) += #U5 //===----------------------------------------------------------------------===// +multiclass MemOpi_u5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred, + InstHexagon MI, SDNode OpNode> { + let AddedComplexity = 180 in + def : Pat < (stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend), + IntRegs:$addr), + (MI IntRegs:$addr, #0, u5ImmPred:$addend )>; -// memh(Rs+#u6:1) += #U5 -let AddedComplexity = 30 in -def MEMh_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$addend), - "memh($base+#$offset) += $addend", - []>, - Requires<[HasV4T, UseMEMOP]>; + let AddedComplexity = 190 in + def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, ExtPred:$offset)), + u5ImmPred:$addend), + (add IntRegs:$base, ExtPred:$offset)), + (MI IntRegs:$base, ExtPred:$offset, u5ImmPred:$addend)>; +} -// memh(Rs+#u6:1) -= #U5 -let AddedComplexity = 30 in -def MEMh_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$subend), - "memh($base+#$offset) -= $subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) += Rt -let AddedComplexity = 30 in -def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend), - "memh($base+#$offset) += $addend", - [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base), - u6_1ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) -= Rt -let AddedComplexity = 30 in -def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend), - "memh($base+#$offset) -= $subend", - [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base), - u6_1ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) &= Rt -let AddedComplexity = 30 in -def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend), - "memh($base+#$offset) += $andend", - [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base), - u6_1ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) |= Rt -let AddedComplexity = 30 in -def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend), - "memh($base+#$offset) |= $orend", - [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base), - u6_1ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) += #U5 -let AddedComplexity = 30 in -def MEMh_ADDi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$addend), - "memh($addr) += $addend", - []>, - Requires<[HasV4T, UseMEMOP]>; +multiclass MemOpi_u5ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred, + InstHexagon addMI, InstHexagon subMI> { + defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, addMI, add>; + defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, subMI, sub>; +} -// memh(Rs+#u6:1) -= #U5 -let AddedComplexity = 30 in -def MEMh_SUBi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$subend), - "memh($addr) -= $subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) += Rt -let AddedComplexity = 30 in -def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$addend), - "memh($addr) += $addend", - [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) -= Rt -let AddedComplexity = 30 in -def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$subend), - "memh($addr) -= $subend", - [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) &= Rt -let AddedComplexity = 30 in -def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$andend), - "memh($addr) &= $andend", - [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memh(Rs+#u6:1) |= Rt -let AddedComplexity = 30 in -def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$orend), - "memh($addr) |= $orend", - [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; +multiclass MemOpi_u5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > { + // Half Word + defm : MemOpi_u5ALUOp <ldOpHalf, truncstorei16, u6_1ExtPred, + MemOPh_ADDi_V4, MemOPh_SUBi_V4>; + // Byte + defm : MemOpi_u5ALUOp <ldOpByte, truncstorei8, u6ExtPred, + MemOPb_ADDi_V4, MemOPb_SUBi_V4>; +} + +let Predicates = [HasV4T, UseMEMOP] in { + defm : MemOpi_u5ExtType<zextloadi8, zextloadi16>; // zero extend + defm : MemOpi_u5ExtType<sextloadi8, sextloadi16>; // sign extend + defm : MemOpi_u5ExtType<extloadi8, extloadi16>; // any extend + // Word + defm : MemOpi_u5ALUOp <load, store, u6_2ExtPred, MemOPw_ADDi_V4, + MemOPw_SUBi_V4>; +} //===----------------------------------------------------------------------===// -// MEMOP: Byte -// -// Implemented: -// MEMb_ADDi_indexed_V4 : memb(Rs+#u6:0)+=#U5 -// MEMb_SUBi_indexed_V4 : memb(Rs+#u6:0)-=#U5 -// MEMb_ADDr_indexed_V4 : memb(Rs+#u6:0)+=Rt -// MEMb_SUBr_indexed_V4 : memb(Rs+#u6:0)-=Rt -// MEMb_CLRr_indexed_V4 : memb(Rs+#u6:0)&=Rt -// MEMb_SETr_indexed_V4 : memb(Rs+#u6:0)|=Rt -// MEMb_ADDi_V4 : memb(Rs+#u6:0)+=#U5 -// MEMb_SUBi_V4 : memb(Rs+#u6:0)-=#U5 -// MEMb_ADDr_V4 : memb(Rs+#u6:0)+=Rt -// MEMb_SUBr_V4 : memb(Rs+#u6:0)-=Rt -// MEMb_CLRr_V4 : memb(Rs+#u6:0)&=Rt -// MEMb_SETr_V4 : memb(Rs+#u6:0)|=Rt -// -// Not implemented: -// MEMb_CLRi_indexed_V4 : memb(Rs+#u6:0)=clrbit(#U5) -// MEMb_SETi_indexed_V4 : memb(Rs+#u6:0)=setbit(#U5) -// MEMb_CLRi_V4 : memb(Rs+#u6:0)=clrbit(#U5) -// MEMb_SETi_V4 : memb(Rs+#u6:0)=setbit(#U5) +// multiclass to define 'Def Pats' for ALU operations on the memory. +// Here value used for the ALU operation is a negative value. +// mem[bh](Rs+#0) += #m5 +// mem[bh](Rs+#u6) += #m5 //===----------------------------------------------------------------------===// -// memb(Rs+#u6:0) += #U5 -let AddedComplexity = 30 in -def MEMb_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$addend), - "memb($base+#$offset) += $addend", - []>, - Requires<[HasV4T, UseMEMOP]>; +multiclass MemOpi_m5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf extPred, + PatLeaf immPred, ComplexPattern addrPred, + SDNodeXForm xformFunc, InstHexagon MI> { + let AddedComplexity = 190 in + def : Pat <(stOp (add (ldOp IntRegs:$addr), immPred:$subend), + IntRegs:$addr), + (MI IntRegs:$addr, #0, (xformFunc immPred:$subend) )>; -// memb(Rs+#u6:0) -= #U5 -let AddedComplexity = 30 in -def MEMb_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$subend), - "memb($base+#$offset) -= $subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) += Rt -let AddedComplexity = 30 in -def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend), - "memb($base+#$offset) += $addend", - [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base), - u6_0ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) -= Rt -let AddedComplexity = 30 in -def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend), - "memb($base+#$offset) -= $subend", - [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base), - u6_0ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) &= Rt -let AddedComplexity = 30 in -def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend), - "memb($base+#$offset) += $andend", - [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base), - u6_0ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) |= Rt -let AddedComplexity = 30 in -def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), - (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend), - "memb($base+#$offset) |= $orend", - [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base), - u6_0ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) += #U5 -let AddedComplexity = 30 in -def MEMb_ADDi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$addend), - "memb($addr) += $addend", - []>, - Requires<[HasV4T, UseMEMOP]>; + let AddedComplexity = 195 in + def : Pat<(stOp (add (ldOp (add IntRegs:$base, extPred:$offset)), + immPred:$subend), + (add IntRegs:$base, extPred:$offset)), + (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$subend))>; +} -// memb(Rs+#u6:0) -= #U5 -let AddedComplexity = 30 in -def MEMb_SUBi_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, u5Imm:$subend), - "memb($addr) -= $subend", - []>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) += Rt -let AddedComplexity = 30 in -def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$addend), - "memb($addr) += $addend", - [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) -= Rt -let AddedComplexity = 30 in -def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$subend), - "memb($addr) -= $subend", - [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) &= Rt -let AddedComplexity = 30 in -def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$andend), - "memb($addr) &= $andend", - [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; - -// memb(Rs+#u6:0) |= Rt -let AddedComplexity = 30 in -def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs), - (ins MEMri:$addr, IntRegs:$orend), - "memb($addr) |= $orend", - [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>, - Requires<[HasV4T, UseMEMOP]>; +multiclass MemOpi_m5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > { + // Half Word + defm : MemOpi_m5Pats <ldOpHalf, truncstorei16, u6_1ExtPred, m5HImmPred, + ADDRriU6_1, MEMOPIMM_HALF, MemOPh_SUBi_V4>; + // Byte + defm : MemOpi_m5Pats <ldOpByte, truncstorei8, u6ExtPred, m5BImmPred, + ADDRriU6_0, MEMOPIMM_BYTE, MemOPb_SUBi_V4>; +} +let Predicates = [HasV4T, UseMEMOP] in { + defm : MemOpi_m5ExtType<zextloadi8, zextloadi16>; // zero extend + defm : MemOpi_m5ExtType<sextloadi8, sextloadi16>; // sign extend + defm : MemOpi_m5ExtType<extloadi8, extloadi16>; // any extend + + // Word + defm : MemOpi_m5Pats <load, store, u6_2ExtPred, m5ImmPred, + ADDRriU6_2, MEMOPIMM, MemOPw_SUBi_V4>; +} + +//===----------------------------------------------------------------------===// +// Multiclass to define 'def Pats' for bit operations on the memory. +// mem[bhw](Rs+#0) = [clrbit|setbit](#U5) +// mem[bhw](Rs+#u6) = [clrbit|setbit](#U5) +//===----------------------------------------------------------------------===// + +multiclass MemOpi_bitPats <PatFrag ldOp, PatFrag stOp, PatLeaf immPred, + PatLeaf extPred, ComplexPattern addrPred, + SDNodeXForm xformFunc, InstHexagon MI, SDNode OpNode> { + + // mem[bhw](Rs+#u6:[012]) = [clrbit|setbit](#U5) + let AddedComplexity = 250 in + def : Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), + immPred:$bitend), + (add IntRegs:$base, extPred:$offset)), + (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>; + + // mem[bhw](Rs+#0) = [clrbit|setbit](#U5) + let AddedComplexity = 225 in + def : Pat <(stOp (OpNode (ldOp addrPred:$addr), immPred:$bitend), + addrPred:$addr), + (MI IntRegs:$addr, #0, (xformFunc immPred:$bitend))>; +} + +multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf > { + // Byte - clrbit + defm : MemOpi_bitPats<ldOpByte, truncstorei8, Clr3ImmPred, u6ExtPred, + ADDRriU6_0, CLRMEMIMM_BYTE, MemOPb_CLRBITi_V4, and>; + // Byte - setbit + defm : MemOpi_bitPats<ldOpByte, truncstorei8, Set3ImmPred, u6ExtPred, + ADDRriU6_0, SETMEMIMM_BYTE, MemOPb_SETBITi_V4, or>; + // Half Word - clrbit + defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Clr4ImmPred, u6_1ExtPred, + ADDRriU6_1, CLRMEMIMM_SHORT, MemOPh_CLRBITi_V4, and>; + // Half Word - setbit + defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Set4ImmPred, u6_1ExtPred, + ADDRriU6_1, SETMEMIMM_SHORT, MemOPh_SETBITi_V4, or>; +} + +let Predicates = [HasV4T, UseMEMOP] in { + // mem[bh](Rs+#0) = [clrbit|setbit](#U5) + // mem[bh](Rs+#u6:[01]) = [clrbit|setbit](#U5) + defm : MemOpi_bitExtType<zextloadi8, zextloadi16>; // zero extend + defm : MemOpi_bitExtType<sextloadi8, sextloadi16>; // sign extend + defm : MemOpi_bitExtType<extloadi8, extloadi16>; // any extend + + // memw(Rs+#0) = [clrbit|setbit](#U5) + // memw(Rs+#u6:2) = [clrbit|setbit](#U5) + defm : MemOpi_bitPats<load, store, Clr5ImmPred, u6_2ExtPred, ADDRriU6_2, + CLRMEMIMM, MemOPw_CLRBITi_V4, and>; + defm : MemOpi_bitPats<load, store, Set5ImmPred, u6_2ExtPred, ADDRriU6_2, + SETMEMIMM, MemOPw_SETBITi_V4, or>; +} + +//===----------------------------------------------------------------------===// +// Multiclass to define 'def Pats' for ALU operations on the memory +// where addend is a register. +// mem[bhw](Rs+#0) [+-&|]= Rt +// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt +//===----------------------------------------------------------------------===// + +multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, ComplexPattern addrPred, + PatLeaf extPred, InstHexagon MI, SDNode OpNode> { + let AddedComplexity = 141 in + // mem[bhw](Rs+#0) [+-&|]= Rt + def : Pat <(stOp (OpNode (ldOp addrPred:$addr), (i32 IntRegs:$addend)), + addrPred:$addr), + (MI IntRegs:$addr, #0, (i32 IntRegs:$addend) )>; + + // mem[bhw](Rs+#U6:[012]) [+-&|]= Rt + let AddedComplexity = 150 in + def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), + (i32 IntRegs:$orend)), + (add IntRegs:$base, extPred:$offset)), + (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend) )>; +} + +multiclass MemOPr_ALUOp<PatFrag ldOp, PatFrag stOp, + ComplexPattern addrPred, PatLeaf extPred, + InstHexagon addMI, InstHexagon subMI, + InstHexagon andMI, InstHexagon orMI > { + + defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, addMI, add>; + defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, subMI, sub>; + defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, andMI, and>; + defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, orMI, or>; +} + +multiclass MemOPr_ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > { + // Half Word + defm : MemOPr_ALUOp <ldOpHalf, truncstorei16, ADDRriU6_1, u6_1ExtPred, + MemOPh_ADDr_V4, MemOPh_SUBr_V4, + MemOPh_ANDr_V4, MemOPh_ORr_V4>; + // Byte + defm : MemOPr_ALUOp <ldOpByte, truncstorei8, ADDRriU6_0, u6ExtPred, + MemOPb_ADDr_V4, MemOPb_SUBr_V4, + MemOPb_ANDr_V4, MemOPb_ORr_V4>; +} + +// Define 'def Pats' for MemOps with register addend. +let Predicates = [HasV4T, UseMEMOP] in { + // Byte, Half Word + defm : MemOPr_ExtType<zextloadi8, zextloadi16>; // zero extend + defm : MemOPr_ExtType<sextloadi8, sextloadi16>; // sign extend + defm : MemOPr_ExtType<extloadi8, extloadi16>; // any extend + // Word + defm : MemOPr_ALUOp <load, store, ADDRriU6_2, u6_2ExtPred, MemOPw_ADDr_V4, + MemOPw_SUBr_V4, MemOPw_ANDr_V4, MemOPw_ORr_V4 >; +} //===----------------------------------------------------------------------===// // XTYPE/PRED + @@ -3146,7 +2145,7 @@ def CMPbEQri_V4 : MInst<(outs PredRegs:$dst), def : Pat <(brcond (i1 (setne (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPbEQri_V4 (i32 IntRegs:$src1), u8ImmPred:$src2), + (JMP_f (CMPbEQri_V4 (i32 IntRegs:$src1), u8ImmPred:$src2), bb:$offset)>, Requires<[HasV4T]>; @@ -3629,9 +2628,9 @@ let isReturn = 1, isTerminator = 1, multiclass ST_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_V4 : STInst2<(outs), - (ins PredRegs:$src1, globaladdressExt:$absaddr, RC: $src2), + (ins PredRegs:$src1, u0AlwaysExt:$absaddr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", ") ")#mnemonic#"(##$absaddr) = $src2", []>, @@ -3639,7 +2638,7 @@ multiclass ST_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_Abs_Pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Abs_Predbase<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Abs_Predbase<mnemonic, RC, PredNot, 1>; @@ -3651,7 +2650,7 @@ multiclass ST_Abs<string mnemonic, string CextOp, RegisterClass RC> { let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in { let opExtendable = 0, isPredicable = 1 in def NAME#_V4 : STInst2<(outs), - (ins globaladdressExt:$absaddr, RC:$src), + (ins u0AlwaysExt:$absaddr, RC:$src), mnemonic#"(##$absaddr) = $src", []>, Requires<[HasV4T]>; @@ -3665,9 +2664,9 @@ multiclass ST_Abs<string mnemonic, string CextOp, RegisterClass RC> { multiclass ST_Abs_Predbase_nv<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME#_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdressExt:$absaddr, RC: $src2), + (ins PredRegs:$src1, u0AlwaysExt:$absaddr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", ") ")#mnemonic#"(##$absaddr) = $src2.new", []>, @@ -3675,7 +2674,7 @@ multiclass ST_Abs_Predbase_nv<string mnemonic, RegisterClass RC, bit isNot, } multiclass ST_Abs_Pred_nv<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : ST_Abs_Predbase_nv<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : ST_Abs_Predbase_nv<mnemonic, RC, PredNot, 1>; @@ -3687,7 +2686,7 @@ multiclass ST_Abs_nv<string mnemonic, string CextOp, RegisterClass RC> { let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in { let opExtendable = 0, isPredicable = 1 in def NAME#_nv_V4 : NVInst_V4<(outs), - (ins globaladdressExt:$absaddr, RC:$src), + (ins u0AlwaysExt:$absaddr, RC:$src), mnemonic#"(##$absaddr) = $src.new", []>, Requires<[HasV4T]>; @@ -3700,16 +2699,19 @@ multiclass ST_Abs_nv<string mnemonic, string CextOp, RegisterClass RC> { } let addrMode = Absolute in { + let accessSize = ByteAccess in defm STrib_abs : ST_Abs<"memb", "STrib", IntRegs>, ST_Abs_nv<"memb", "STrib", IntRegs>, AddrModeRel; + let accessSize = HalfWordAccess in defm STrih_abs : ST_Abs<"memh", "STrih", IntRegs>, ST_Abs_nv<"memh", "STrih", IntRegs>, AddrModeRel; + let accessSize = WordAccess in defm STriw_abs : ST_Abs<"memw", "STriw", IntRegs>, ST_Abs_nv<"memw", "STriw", IntRegs>, AddrModeRel; - let isNVStorable = 0 in + let accessSize = DoubleWordAccess, isNVStorable = 0 in defm STrid_abs : ST_Abs<"memd", "STrid", DoubleRegs>, AddrModeRel; } @@ -3730,11 +2732,115 @@ def : Pat<(store (i64 DoubleRegs:$src1), (STrid_abs_V4 tglobaladdr: $absaddr, DoubleRegs: $src1)>; } +//===----------------------------------------------------------------------===// +// multiclass for store instructions with GP-relative addressing mode. +// mem[bhwd](#global)=Rt +// if ([!]Pv[.new]) mem[bhwd](##global) = Rt +//===----------------------------------------------------------------------===// +let mayStore = 1, isNVStorable = 1 in +multiclass ST_GP<string mnemonic, string BaseOp, RegisterClass RC> { + let BaseOpcode = BaseOp, isPredicable = 1 in + def NAME#_V4 : STInst2<(outs), + (ins globaladdress:$global, RC:$src), + mnemonic#"(#$global) = $src", + []>; + + // When GP-relative instructions are predicated, their addressing mode is + // changed to absolute and they are always constant extended. + let BaseOpcode = BaseOp, isExtended = 1, opExtendable = 1, + isPredicated = 1 in { + defm Pt : ST_Abs_Pred <mnemonic, RC, 0>; + defm NotPt : ST_Abs_Pred <mnemonic, RC, 1>; + } +} + +let mayStore = 1, isNVStore = 1 in +multiclass ST_GP_nv<string mnemonic, string BaseOp, RegisterClass RC> { + let BaseOpcode = BaseOp, isPredicable = 1 in + def NAME#_nv_V4 : NVInst_V4<(outs), + (ins u0AlwaysExt:$global, RC:$src), + mnemonic#"(#$global) = $src.new", + []>, + Requires<[HasV4T]>; + + // When GP-relative instructions are predicated, their addressing mode is + // changed to absolute and they are always constant extended. + let BaseOpcode = BaseOp, isExtended = 1, opExtendable = 1, + isPredicated = 1 in { + defm Pt : ST_Abs_Pred_nv<mnemonic, RC, 0>; + defm NotPt : ST_Abs_Pred_nv<mnemonic, RC, 1>; + } +} + +let validSubTargets = HasV4SubT, neverHasSideEffects = 1 in { + let isNVStorable = 0 in + defm STd_GP : ST_GP <"memd", "STd_GP", DoubleRegs>, PredNewRel; + + defm STb_GP : ST_GP<"memb", "STb_GP", IntRegs>, + ST_GP_nv<"memb", "STb_GP", IntRegs>, NewValueRel; + defm STh_GP : ST_GP<"memh", "STh_GP", IntRegs>, + ST_GP_nv<"memh", "STh_GP", IntRegs>, NewValueRel; + defm STw_GP : ST_GP<"memw", "STw_GP", IntRegs>, + ST_GP_nv<"memw", "STw_GP", IntRegs>, NewValueRel; +} + +// 64 bit atomic store +def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), + (i64 DoubleRegs:$src1)), + (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress) -> memd(#foo) +let AddedComplexity = 100 in +def : Pat <(store (i64 DoubleRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>; + +// 8 bit atomic store +def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +// Map from store(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat<(truncstorei8 (i32 IntRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1" +// to "r0 = 1; memw(#foo) = r0" +let AddedComplexity = 100 in +def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), + (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>; + +def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +// Map from store(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat<(truncstorei16 (i32 IntRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +// 32 bit atomic store +def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +// Map from store(globaladdress) -> memw(#foo) +let AddedComplexity = 100 in +def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>; + +//===----------------------------------------------------------------------===// +// Multiclass for the load instructions with absolute addressing mode. +//===----------------------------------------------------------------------===// multiclass LD_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot, bit isPredNew> { - let PNewValue = !if(isPredNew, "new", "") in + let isPredicatedNew = isPredNew in def NAME : LDInst2<(outs RC:$dst), - (ins PredRegs:$src1, globaladdressExt:$absaddr), + (ins PredRegs:$src1, u0AlwaysExt:$absaddr), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", ") ")#"$dst = "#mnemonic#"(##$absaddr)", []>, @@ -3742,7 +2848,7 @@ multiclass LD_Abs_Predbase<string mnemonic, RegisterClass RC, bit isNot, } multiclass LD_Abs_Pred<string mnemonic, RegisterClass RC, bit PredNot> { - let PredSense = !if(PredNot, "false", "true") in { + let isPredicatedFalse = PredNot in { defm _c#NAME : LD_Abs_Predbase<mnemonic, RC, PredNot, 0>; // Predicate new defm _cdn#NAME : LD_Abs_Predbase<mnemonic, RC, PredNot, 1>; @@ -3754,7 +2860,7 @@ multiclass LD_Abs<string mnemonic, string CextOp, RegisterClass RC> { let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in { let opExtendable = 1, isPredicable = 1 in def NAME#_V4 : LDInst2<(outs RC:$dst), - (ins globaladdressExt:$absaddr), + (ins u0AlwaysExt:$absaddr), "$dst = "#mnemonic#"(##$absaddr)", []>, Requires<[HasV4T]>; @@ -3767,33 +2873,138 @@ multiclass LD_Abs<string mnemonic, string CextOp, RegisterClass RC> { } let addrMode = Absolute in { + let accessSize = ByteAccess in { defm LDrib_abs : LD_Abs<"memb", "LDrib", IntRegs>, AddrModeRel; defm LDriub_abs : LD_Abs<"memub", "LDriub", IntRegs>, AddrModeRel; + } + let accessSize = HalfWordAccess in { defm LDrih_abs : LD_Abs<"memh", "LDrih", IntRegs>, AddrModeRel; defm LDriuh_abs : LD_Abs<"memuh", "LDriuh", IntRegs>, AddrModeRel; + } + let accessSize = WordAccess in defm LDriw_abs : LD_Abs<"memw", "LDriw", IntRegs>, AddrModeRel; + + let accessSize = DoubleWordAccess in defm LDrid_abs : LD_Abs<"memd", "LDrid", DoubleRegs>, AddrModeRel; } -let Predicates = [HasV4T], AddedComplexity = 30 in +let Predicates = [HasV4T], AddedComplexity = 30 in { def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))), (LDriw_abs_V4 tglobaladdr: $absaddr)>; -let Predicates = [HasV4T], AddedComplexity=30 in def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), (LDrib_abs_V4 tglobaladdr:$absaddr)>; -let Predicates = [HasV4T], AddedComplexity=30 in def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), (LDriub_abs_V4 tglobaladdr:$absaddr)>; -let Predicates = [HasV4T], AddedComplexity=30 in def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), (LDrih_abs_V4 tglobaladdr:$absaddr)>; -let Predicates = [HasV4T], AddedComplexity=30 in def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), (LDriuh_abs_V4 tglobaladdr:$absaddr)>; +} + +//===----------------------------------------------------------------------===// +// multiclass for load instructions with GP-relative addressing mode. +// Rx=mem[bhwd](##global) +// if ([!]Pv[.new]) Rx=mem[bhwd](##global) +//===----------------------------------------------------------------------===// +let neverHasSideEffects = 1, validSubTargets = HasV4SubT in +multiclass LD_GP<string mnemonic, string BaseOp, RegisterClass RC> { + let BaseOpcode = BaseOp in { + let isPredicable = 1 in + def NAME#_V4 : LDInst2<(outs RC:$dst), + (ins globaladdress:$global), + "$dst = "#mnemonic#"(#$global)", + []>; + + let isExtended = 1, opExtendable = 2, isPredicated = 1 in { + defm Pt_V4 : LD_Abs_Pred<mnemonic, RC, 0>; + defm NotPt_V4 : LD_Abs_Pred<mnemonic, RC, 1>; + } + } +} + +defm LDd_GP : LD_GP<"memd", "LDd_GP", DoubleRegs>, PredNewRel; +defm LDb_GP : LD_GP<"memb", "LDb_GP", IntRegs>, PredNewRel; +defm LDub_GP : LD_GP<"memub", "LDub_GP", IntRegs>, PredNewRel; +defm LDh_GP : LD_GP<"memh", "LDh_GP", IntRegs>, PredNewRel; +defm LDuh_GP : LD_GP<"memuh", "LDuh_GP", IntRegs>, PredNewRel; +defm LDw_GP : LD_GP<"memw", "LDw_GP", IntRegs>, PredNewRel; + +def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), + (i64 (LDd_GP_V4 tglobaladdr:$global))>; + +def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDw_GP_V4 tglobaladdr:$global))>; + +def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDuh_GP_V4 tglobaladdr:$global))>; + +def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDub_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memw(#foo + 0) +let AddedComplexity = 100 in +def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i64 (LDd_GP_V4 tglobaladdr:$global))>; + +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd +let AddedComplexity = 100 in +def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>; + +// When the Interprocedural Global Variable optimizer realizes that a certain +// global variable takes only two constant values, it shrinks the global to +// a boolean. Catch those loads here in the following 3 patterns. +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>; + +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>; + +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memub(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memuh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDuh_GP_V4 tglobaladdr:$global))>; + +// Map from load(globaladdress) -> memw(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDw_GP_V4 tglobaladdr:$global))>; + // Transfer global address into a register let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in @@ -3842,19 +3053,21 @@ def : Pat<(HexagonCONST32_GP tglobaladdr:$src1), // Load - Indirect with long offset: These instructions take global address // as an operand -let AddedComplexity = 10 in +let isExtended = 1, opExtendable = 3, AddedComplexity = 40, +validSubTargets = HasV4SubT in def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), + (ins IntRegs:$src1, u2Imm:$src2, globaladdressExt:$offset), "$dst=memd($src1<<#$src2+##$offset)", [(set (i64 DoubleRegs:$dst), (load (add (shl IntRegs:$src1, u2ImmPred:$src2), (HexagonCONST32 tglobaladdr:$offset))))]>, Requires<[HasV4T]>; -let AddedComplexity = 10 in +let AddedComplexity = 40 in multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> { +let isExtended = 1, opExtendable = 3, validSubTargets = HasV4SubT in def _lo_V4 : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), + (ins IntRegs:$src1, u2Imm:$src2, globaladdressExt:$offset), !strconcat("$dst = ", !strconcat(OpcStr, "($src1<<#$src2+##$offset)")), [(set IntRegs:$dst, @@ -3865,202 +3078,53 @@ multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> { defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>; defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>; +defm LDriub_ind_anyext : LD_indirect_lo<"memub", extloadi8>; defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>; defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>; +defm LDriuh_ind_anyext : LD_indirect_lo<"memuh", extloadi16>; defm LDriw_ind : LD_indirect_lo<"memw", load>; -// Store - Indirect with long offset: These instructions take global address -// as an operand -let AddedComplexity = 10 in -def STrid_ind_lo_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, - DoubleRegs:$src4), - "memd($src1<<#$src2+#$src3) = $src4", - [(store (i64 DoubleRegs:$src4), - (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$src3)))]>, - Requires<[HasV4T]>; - -let AddedComplexity = 10 in -multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> { - def _lo_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, - IntRegs:$src4), - !strconcat(OpcStr, "($src1<<#$src2+##$src3) = $src4"), - [(OpNode (i32 IntRegs:$src4), - (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$src3)))]>, - Requires<[HasV4T]>; -} - -defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>; -defm STrih_ind : ST_indirect_lo<"memh", truncstorei16>; -defm STriw_ind : ST_indirect_lo<"memw", store>; - -// Store - absolute addressing mode: These instruction take constant -// value as the extended operand. -multiclass ST_absimm<string OpcStr> { -let isExtended = 1, opExtendable = 0, isPredicable = 1, -validSubTargets = HasV4SubT in - def _abs_V4 : STInst2<(outs), - (ins u0AlwaysExt:$src1, IntRegs:$src2), - !strconcat(OpcStr, "(##$src1) = $src2"), - []>, - Requires<[HasV4T]>; - -let isExtended = 1, opExtendable = 1, isPredicated = 1, -validSubTargets = HasV4SubT in { - def _abs_cPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if ($src1)", !strconcat(OpcStr, "(##$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - def _abs_cNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if (!$src1)", !strconcat(OpcStr, "(##$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if ($src1.new)", - !strconcat(OpcStr, "(##$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnNotPt_V4 : STInst2<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if (!$src1.new)", - !strconcat(OpcStr, "(##$src2) = $src3")), - []>, - Requires<[HasV4T]>; -} - -let isExtended = 1, opExtendable = 0, mayStore = 1, isNVStore = 1, -validSubTargets = HasV4SubT in - def _abs_nv_V4 : NVInst_V4<(outs), - (ins u0AlwaysExt:$src1, IntRegs:$src2), - !strconcat(OpcStr, "(##$src1) = $src2.new"), - []>, - Requires<[HasV4T]>; - -let isExtended = 1, opExtendable = 1, mayStore = 1, isPredicated = 1, -isNVStore = 1, validSubTargets = HasV4SubT in { - def _abs_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if ($src1)", - !strconcat(OpcStr, "(##$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - def _abs_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if (!$src1)", - !strconcat(OpcStr, "(##$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if ($src1.new)", - !strconcat(OpcStr, "(##$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, u0AlwaysExt:$src2, IntRegs:$src3), - !strconcat("if (!$src1.new)", - !strconcat(OpcStr, "(##$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; -} -} +let AddedComplexity = 40 in +def : Pat <(i32 (sextloadi8 (add IntRegs:$src1, + (NumUsesBelowThresCONST32 tglobaladdr:$offset)))), + (i32 (LDrib_ind_lo_V4 IntRegs:$src1, 0, tglobaladdr:$offset))>, + Requires<[HasV4T]>; -defm STrib_imm : ST_absimm<"memb">; -defm STrih_imm : ST_absimm<"memh">; -defm STriw_imm : ST_absimm<"memw">; +let AddedComplexity = 40 in +def : Pat <(i32 (zextloadi8 (add IntRegs:$src1, + (NumUsesBelowThresCONST32 tglobaladdr:$offset)))), + (i32 (LDriub_ind_lo_V4 IntRegs:$src1, 0, tglobaladdr:$offset))>, + Requires<[HasV4T]>; let Predicates = [HasV4T], AddedComplexity = 30 in { def : Pat<(truncstorei8 (i32 IntRegs:$src1), u0AlwaysExtPred:$src2), - (STrib_imm_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; + (STrib_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; def : Pat<(truncstorei16 (i32 IntRegs:$src1), u0AlwaysExtPred:$src2), - (STrih_imm_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; + (STrih_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; def : Pat<(store (i32 IntRegs:$src1), u0AlwaysExtPred:$src2), - (STriw_imm_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; -} - -// Load - absolute addressing mode: These instruction take constant -// value as the extended operand - -multiclass LD_absimm<string OpcStr> { -let isExtended = 1, opExtendable = 1, isPredicable = 1, -validSubTargets = HasV4SubT in - def _abs_V4 : LDInst2<(outs IntRegs:$dst), - (ins u0AlwaysExt:$src), - !strconcat("$dst = ", - !strconcat(OpcStr, "(##$src)")), - []>, - Requires<[HasV4T]>; - -let isExtended = 1, opExtendable = 2, isPredicated = 1, -validSubTargets = HasV4SubT in { - def _abs_cPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, u0AlwaysExt:$src2), - !strconcat("if ($src1) $dst = ", - !strconcat(OpcStr, "(##$src2)")), - []>, - Requires<[HasV4T]>; - - def _abs_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, u0AlwaysExt:$src2), - !strconcat("if (!$src1) $dst = ", - !strconcat(OpcStr, "(##$src2)")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, u0AlwaysExt:$src2), - !strconcat("if ($src1.new) $dst = ", - !strconcat(OpcStr, "(##$src2)")), - []>, - Requires<[HasV4T]>; - - def _abs_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), - (ins PredRegs:$src1, u0AlwaysExt:$src2), - !strconcat("if (!$src1.new) $dst = ", - !strconcat(OpcStr, "(##$src2)")), - []>, - Requires<[HasV4T]>; + (STriw_abs_V4 u0AlwaysExtPred:$src2, IntRegs: $src1)>; } -} - -defm LDrib_imm : LD_absimm<"memb">; -defm LDriub_imm : LD_absimm<"memub">; -defm LDrih_imm : LD_absimm<"memh">; -defm LDriuh_imm : LD_absimm<"memuh">; -defm LDriw_imm : LD_absimm<"memw">; let Predicates = [HasV4T], AddedComplexity = 30 in { def : Pat<(i32 (load u0AlwaysExtPred:$src)), - (LDriw_imm_abs_V4 u0AlwaysExtPred:$src)>; + (LDriw_abs_V4 u0AlwaysExtPred:$src)>; def : Pat<(i32 (sextloadi8 u0AlwaysExtPred:$src)), - (LDrib_imm_abs_V4 u0AlwaysExtPred:$src)>; + (LDrib_abs_V4 u0AlwaysExtPred:$src)>; def : Pat<(i32 (zextloadi8 u0AlwaysExtPred:$src)), - (LDriub_imm_abs_V4 u0AlwaysExtPred:$src)>; + (LDriub_abs_V4 u0AlwaysExtPred:$src)>; def : Pat<(i32 (sextloadi16 u0AlwaysExtPred:$src)), - (LDrih_imm_abs_V4 u0AlwaysExtPred:$src)>; + (LDrih_abs_V4 u0AlwaysExtPred:$src)>; def : Pat<(i32 (zextloadi16 u0AlwaysExtPred:$src)), - (LDriuh_imm_abs_V4 u0AlwaysExtPred:$src)>; + (LDriuh_abs_V4 u0AlwaysExtPred:$src)>; } -// Indexed store double word - global address. +// Indexed store word - global address. // memw(Rs+#u6:2)=#S8 let AddedComplexity = 10 in def STriw_offset_ext_V4 : STInst<(outs), diff --git a/lib/Target/Hexagon/HexagonMCInst.h b/lib/Target/Hexagon/HexagonMCInst.h deleted file mode 100644 index e16636e..0000000 --- a/lib/Target/Hexagon/HexagonMCInst.h +++ /dev/null @@ -1,41 +0,0 @@ -//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This class extends MCInst to allow some VLIW annotation. -// -//===----------------------------------------------------------------------===// - -#ifndef HEXAGONMCINST_H -#define HEXAGONMCINST_H - -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/MC/MCInst.h" - -namespace llvm { - class HexagonMCInst: public MCInst { - // Packet start and end markers - unsigned startPacket: 1, endPacket: 1; - const MachineInstr *MachineI; - public: - explicit HexagonMCInst(): MCInst(), - startPacket(0), endPacket(0) {} - - const MachineInstr* getMI() const { return MachineI; } - - void setMI(const MachineInstr *MI) { MachineI = MI; } - - bool isStartPacket() const { return (startPacket); } - bool isEndPacket() const { return (endPacket); } - - void setStartPacket(bool yes) { startPacket = yes; } - void setEndPacket(bool yes) { endPacket = yes; } - }; -} - -#endif diff --git a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h index 0318c51..bd7b26a 100644 --- a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h +++ b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h @@ -29,15 +29,18 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo { std::vector<MachineInstr*> AllocaAdjustInsts; int VarArgsFrameIndex; bool HasClobberLR; + bool HasEHReturn; std::map<const MachineInstr*, unsigned> PacketInfo; public: - HexagonMachineFunctionInfo() : SRetReturnReg(0), HasClobberLR(0) {} + HexagonMachineFunctionInfo() : SRetReturnReg(0), HasClobberLR(0), + HasEHReturn(false) {} HexagonMachineFunctionInfo(MachineFunction &MF) : SRetReturnReg(0), - HasClobberLR(0) {} + HasClobberLR(0), + HasEHReturn(false) {} unsigned getSRetReturnReg() const { return SRetReturnReg; } void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } @@ -69,6 +72,8 @@ public: void setHasClobberLR(bool v) { HasClobberLR = v; } bool hasClobberLR() const { return HasClobberLR; } + bool hasEHReturn() const { return HasEHReturn; }; + void setHasEHReturn(bool H = true) { HasEHReturn = H; }; }; } // End llvm namespace diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp index cd3d289..72af876 100644 --- a/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -68,6 +68,7 @@ namespace { HexagonNewValueJump() : MachineFunctionPass(ID) { } virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<MachineBranchProbabilityInfo>(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -78,6 +79,8 @@ namespace { virtual bool runOnMachineFunction(MachineFunction &Fn); private: + /// \brief A handle to the branch probability pass. + const MachineBranchProbabilityInfo *MBPI; }; @@ -208,19 +211,15 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII, // range specified by the arch. if (!secondReg) { int64_t v = MI->getOperand(2).getImm(); - if (MI->getOpcode() == Hexagon::CMPGEri || - (MI->getOpcode() == Hexagon::CMPGEUri && v > 0)) - --v; if (!(isUInt<5>(v) || ((MI->getOpcode() == Hexagon::CMPEQri || - MI->getOpcode() == Hexagon::CMPGTri || - MI->getOpcode() == Hexagon::CMPGEri) && + MI->getOpcode() == Hexagon::CMPGTri) && (v == -1)))) return false; } - unsigned cmpReg1, cmpOp2 = 0; // cmpOp2 assignment silences compiler warning. + unsigned cmpReg1, cmpOp2; cmpReg1 = MI->getOperand(1).getReg(); if (secondReg) { @@ -271,58 +270,58 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII, // Given a compare operator, return a matching New Value Jump // compare operator. Make sure that MI here is included in // HexagonInstrInfo.cpp::isNewValueJumpCandidate -static unsigned getNewValueJumpOpcode(const MachineInstr *MI, int reg, - bool secondRegNewified) { +static unsigned getNewValueJumpOpcode(MachineInstr *MI, int reg, + bool secondRegNewified, + MachineBasicBlock *jmpTarget, + const MachineBranchProbabilityInfo + *MBPI) { + bool taken = false; + MachineBasicBlock *Src = MI->getParent(); + const BranchProbability Prediction = + MBPI->getEdgeProbability(Src, jmpTarget); + + if (Prediction >= BranchProbability(1,2)) + taken = true; + switch (MI->getOpcode()) { case Hexagon::CMPEQrr: - return Hexagon::JMP_EQrrPt_nv_V4; + return taken ? Hexagon::JMP_EQrrPt_nv_V4 : Hexagon::JMP_EQrrPnt_nv_V4; case Hexagon::CMPEQri: { if (reg >= 0) - return Hexagon::JMP_EQriPt_nv_V4; + return taken ? Hexagon::JMP_EQriPt_nv_V4 : Hexagon::JMP_EQriPnt_nv_V4; else - return Hexagon::JMP_EQriPtneg_nv_V4; + return taken ? Hexagon::JMP_EQriPtneg_nv_V4 + : Hexagon::JMP_EQriPntneg_nv_V4; } - case Hexagon::CMPLTrr: case Hexagon::CMPGTrr: { if (secondRegNewified) - return Hexagon::JMP_GTrrdnPt_nv_V4; + return taken ? Hexagon::JMP_GTrrdnPt_nv_V4 + : Hexagon::JMP_GTrrdnPnt_nv_V4; else - return Hexagon::JMP_GTrrPt_nv_V4; - } - - case Hexagon::CMPGEri: { - if (reg >= 1) - return Hexagon::JMP_GTriPt_nv_V4; - else - return Hexagon::JMP_GTriPtneg_nv_V4; + return taken ? Hexagon::JMP_GTrrPt_nv_V4 + : Hexagon::JMP_GTrrPnt_nv_V4; } case Hexagon::CMPGTri: { if (reg >= 0) - return Hexagon::JMP_GTriPt_nv_V4; + return taken ? Hexagon::JMP_GTriPt_nv_V4 : Hexagon::JMP_GTriPnt_nv_V4; else - return Hexagon::JMP_GTriPtneg_nv_V4; + return taken ? Hexagon::JMP_GTriPtneg_nv_V4 + : Hexagon::JMP_GTriPntneg_nv_V4; } - case Hexagon::CMPLTUrr: case Hexagon::CMPGTUrr: { if (secondRegNewified) - return Hexagon::JMP_GTUrrdnPt_nv_V4; + return taken ? Hexagon::JMP_GTUrrdnPt_nv_V4 + : Hexagon::JMP_GTUrrdnPnt_nv_V4; else - return Hexagon::JMP_GTUrrPt_nv_V4; + return taken ? Hexagon::JMP_GTUrrPt_nv_V4 : Hexagon::JMP_GTUrrPnt_nv_V4; } case Hexagon::CMPGTUri: - return Hexagon::JMP_GTUriPt_nv_V4; - - case Hexagon::CMPGEUri: { - if (reg == 0) - return Hexagon::JMP_EQrrPt_nv_V4; - else - return Hexagon::JMP_GTUriPt_nv_V4; - } + return taken ? Hexagon::JMP_GTUriPt_nv_V4 : Hexagon::JMP_GTUriPnt_nv_V4; default: llvm_unreachable("Could not find matching New Value Jump instruction."); @@ -346,6 +345,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { QII = static_cast<const HexagonInstrInfo *>(MF.getTarget().getInstrInfo()); QRI = static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo()); + MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); if (!QRI->Subtarget.hasV4TOps() || DisableNewValueJumps) { @@ -393,12 +393,12 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n"); if (!foundJump && - (MI->getOpcode() == Hexagon::JMP_c || - MI->getOpcode() == Hexagon::JMP_cNot || - MI->getOpcode() == Hexagon::JMP_cdnPt || - MI->getOpcode() == Hexagon::JMP_cdnPnt || - MI->getOpcode() == Hexagon::JMP_cdnNotPt || - MI->getOpcode() == Hexagon::JMP_cdnNotPnt)) { + (MI->getOpcode() == Hexagon::JMP_t || + MI->getOpcode() == Hexagon::JMP_f || + MI->getOpcode() == Hexagon::JMP_tnew_t || + MI->getOpcode() == Hexagon::JMP_tnew_nt || + MI->getOpcode() == Hexagon::JMP_fnew_t || + MI->getOpcode() == Hexagon::JMP_fnew_nt)) { // This is where you would insert your compare and // instr that feeds compare jmpPos = MII; @@ -434,9 +434,9 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { jmpTarget = MI->getOperand(1).getMBB(); foundJump = true; - if (MI->getOpcode() == Hexagon::JMP_cNot || - MI->getOpcode() == Hexagon::JMP_cdnNotPt || - MI->getOpcode() == Hexagon::JMP_cdnNotPnt) { + if (MI->getOpcode() == Hexagon::JMP_f || + MI->getOpcode() == Hexagon::JMP_fnew_t || + MI->getOpcode() == Hexagon::JMP_fnew_nt) { invertPredicate = true; } continue; @@ -525,10 +525,8 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { if (isSecondOpReg) { // In case of CMPLT, or CMPLTU, or EQ with the second register // to newify, swap the operands. - if (cmpInstr->getOpcode() == Hexagon::CMPLTrr || - cmpInstr->getOpcode() == Hexagon::CMPLTUrr || - (cmpInstr->getOpcode() == Hexagon::CMPEQrr && - feederReg == (unsigned) cmpOp2)) { + if (cmpInstr->getOpcode() == Hexagon::CMPEQrr && + feederReg == (unsigned) cmpOp2) { unsigned tmp = cmpReg1; bool tmpIsKill = MO1IsKill; cmpReg1 = cmpOp2; @@ -582,42 +580,34 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { assert((QII->isNewValueJumpCandidate(cmpInstr)) && "This compare is not a New Value Jump candidate."); unsigned opc = getNewValueJumpOpcode(cmpInstr, cmpOp2, - isSecondOpNewified); + isSecondOpNewified, + jmpTarget, MBPI); if (invertPredicate) opc = QII->getInvertedPredicatedOpcode(opc); - // Manage the conversions from CMPGEUri to either CMPEQrr - // or CMPGTUri properly. See Arch spec for CMPGEUri instructions. - // This has to be after the getNewValueJumpOpcode function call as - // second operand of the compare could be modified in this logic. - if (cmpInstr->getOpcode() == Hexagon::CMPGEUri) { - if (cmpOp2 == 0) { - cmpOp2 = cmpReg1; - MO2IsKill = MO1IsKill; - isSecondOpReg = true; - } else - --cmpOp2; - } - - // Manage the conversions from CMPGEri to CMPGTUri properly. - // See Arch spec for CMPGEri instructions. - if (cmpInstr->getOpcode() == Hexagon::CMPGEri) - --cmpOp2; - - if (isSecondOpReg) { + if (isSecondOpReg) NewMI = BuildMI(*MBB, jmpPos, dl, QII->get(opc)) .addReg(cmpReg1, getKillRegState(MO1IsKill)) .addReg(cmpOp2, getKillRegState(MO2IsKill)) .addMBB(jmpTarget); - } - else { + + else if ((cmpInstr->getOpcode() == Hexagon::CMPEQri || + cmpInstr->getOpcode() == Hexagon::CMPGTri) && + cmpOp2 == -1 ) + // Corresponding new-value compare jump instructions don't have the + // operand for -1 immediate value. + NewMI = BuildMI(*MBB, jmpPos, dl, + QII->get(opc)) + .addReg(cmpReg1, getKillRegState(MO1IsKill)) + .addMBB(jmpTarget); + + else NewMI = BuildMI(*MBB, jmpPos, dl, QII->get(opc)) .addReg(cmpReg1, getKillRegState(MO1IsKill)) .addImm(cmpOp2) .addMBB(jmpTarget); - } assert(NewMI && "New Value Jump Instruction Not created!"); if (cmpInstr->getOperand(0).isReg() && diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp index 576f1d7..6c4eb7e 100644 --- a/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/lib/Target/Hexagon/HexagonPeephole.cpp @@ -73,6 +73,10 @@ static cl::opt<bool> DisableOptSZExt("disable-hexagon-optszext", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable Optimization of Sign/Zero Extends")); +static cl::opt<bool> DisableOptExtTo64("disable-hexagon-opt-ext-to-64", + cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::desc("Disable Optimization of extensions to i64.")); + namespace { struct HexagonPeephole : public MachineFunctionPass { const HexagonInstrInfo *QII; @@ -142,6 +146,21 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { } } + // Look for %vreg170<def> = COMBINE_ir_V4 (0, %vreg169) + // %vreg170:DoublRegs, %vreg169:IntRegs + if (!DisableOptExtTo64 && + MI->getOpcode () == Hexagon::COMBINE_Ir_V4) { + assert (MI->getNumOperands() == 3); + MachineOperand &Dst = MI->getOperand(0); + MachineOperand &Src1 = MI->getOperand(1); + MachineOperand &Src2 = MI->getOperand(2); + if (Src1.getImm() != 0) + continue; + unsigned DstReg = Dst.getReg(); + unsigned SrcReg = Src2.getReg(); + PeepholeMap[DstReg] = SrcReg; + } + // Look for this sequence below // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32 // %vregIntReg = COPY %vregDoubleReg1:subreg_loreg. diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index f947dfc..d8b4e2f 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -14,25 +14,26 @@ #include "HexagonRegisterInfo.h" #include "Hexagon.h" -#include "HexagonMachineFunctionInfo.h" #include "HexagonSubtarget.h" #include "HexagonTargetMachine.h" +#include "HexagonMachineFunctionInfo.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/IR/Function.h" #include "llvm/IR/Type.h" #include "llvm/MC/MachineLocation.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ErrorHandling.h" using namespace llvm; @@ -215,28 +216,41 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false,true); MI.getOperand(FIOperandNum+1).ChangeToImmediate(0); } else if (TII.isMemOp(&MI)) { - unsigned resReg = HEXAGON_RESERVED_REG_1; - if (!MFI.hasVarSizedObjects() && - TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) { - MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(), - false, false, true); - MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset); - } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) { - BuildMI(*MI.getParent(), II, MI.getDebugLoc(), - TII.get(Hexagon::CONST32_Int_Real), resReg).addImm(Offset); - BuildMI(*MI.getParent(), II, MI.getDebugLoc(), - TII.get(Hexagon::ADD_rr), - resReg).addReg(FrameReg).addReg(resReg); - MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false, - true); - MI.getOperand(FIOperandNum+1).ChangeToImmediate(0); + // use the constant extender if the instruction provides it + // and we are V4TOps. + if (Subtarget.hasV4TOps()) { + if (TII.isConstExtended(&MI)) { + MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false); + MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset); + TII.immediateExtend(&MI); + } else { + llvm_unreachable("Need to implement for memops"); + } } else { - BuildMI(*MI.getParent(), II, MI.getDebugLoc(), - TII.get(Hexagon::ADD_ri), - resReg).addReg(FrameReg).addImm(Offset); - MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false, - true); - MI.getOperand(FIOperandNum+1).ChangeToImmediate(0); + // Only V3 and older instructions here. + unsigned ResReg = HEXAGON_RESERVED_REG_1; + if (!MFI.hasVarSizedObjects() && + TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) { + MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(), + false, false, false); + MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset); + } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) { + BuildMI(*MI.getParent(), II, MI.getDebugLoc(), + TII.get(Hexagon::CONST32_Int_Real), ResReg).addImm(Offset); + BuildMI(*MI.getParent(), II, MI.getDebugLoc(), + TII.get(Hexagon::ADD_rr), ResReg).addReg(FrameReg). + addReg(ResReg); + MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false, + true); + MI.getOperand(FIOperandNum+1).ChangeToImmediate(0); + } else { + BuildMI(*MI.getParent(), II, MI.getDebugLoc(), + TII.get(Hexagon::ADD_ri), ResReg).addReg(FrameReg). + addImm(Offset); + MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false, + true); + MI.getOperand(FIOperandNum+1).ChangeToImmediate(0); + } } } else { unsigned dstReg = MI.getOperand(0).getReg(); diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index 4bacb8f..07d5ce1 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -29,8 +29,16 @@ EnableV3("enable-hexagon-v3", cl::Hidden, static cl::opt<bool> EnableMemOps( "enable-hexagon-memops", - cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, - cl::desc("Generate V4 memop instructions.")); + cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(true), + cl::desc( + "Generate V4 MEMOP in code generation for Hexagon target")); + +static cl::opt<bool> +DisableMemOps( + "disable-hexagon-memops", + cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(false), + cl::desc( + "Do not generate V4 MEMOP in code generation for Hexagon target")); static cl::opt<bool> EnableIEEERndNear( @@ -64,7 +72,10 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUString); - if (EnableMemOps) + // UseMemOps on by default unless disabled explicitly + if (DisableMemOps) + UseMemOps = false; + else if (EnableMemOps) UseMemOps = true; else UseMemOps = false; diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index d9fef3e..ce45c62 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -35,6 +35,10 @@ opt<bool> DisableHexagonMISched("disable-hexagon-misched", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable Hexagon MI Scheduling")); +static cl::opt<bool> DisableHexagonCFGOpt("disable-hexagon-cfgopt", + cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::desc("Disable Hexagon CFG Optimization")); + /// HexagonTargetMachineModule - Note that this is used on hosts that /// cannot link in a library unless there are references into the /// library. In particular, it seems that it is not possible to get @@ -75,19 +79,20 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT, TSInfo(*this), FrameLowering(Subtarget), InstrItins(&Subtarget.getInstrItineraryData()) { - setMCUseCFI(false); + setMCUseCFI(false); } // addPassesForOptimizations - Allow the backend (target) to add Target // Independent Optimization passes to the Pass Manager. bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) { - - PM.add(createConstantPropagationPass()); - PM.add(createLoopSimplifyPass()); - PM.add(createDeadCodeEliminationPass()); - PM.add(createConstantPropagationPass()); - PM.add(createLoopUnrollPass()); - PM.add(createLoopStrengthReducePass()); + if (getOptLevel() != CodeGenOpt::None) { + PM.add(createConstantPropagationPass()); + PM.add(createLoopSimplifyPass()); + PM.add(createDeadCodeEliminationPass()); + PM.add(createConstantPropagationPass()); + PM.add(createLoopUnrollPass()); + PM.add(createLoopStrengthReducePass()); + } return true; } @@ -121,38 +126,45 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { } bool HexagonPassConfig::addInstSelector() { - addPass(createHexagonRemoveExtendOps(getHexagonTargetMachine())); + + if (getOptLevel() != CodeGenOpt::None) + addPass(createHexagonRemoveExtendOps(getHexagonTargetMachine())); + addPass(createHexagonISelDag(getHexagonTargetMachine(), getOptLevel())); - addPass(createHexagonPeephole()); + + if (getOptLevel() != CodeGenOpt::None) + addPass(createHexagonPeephole()); + return false; } bool HexagonPassConfig::addPreRegAlloc() { - if (!DisableHardwareLoops) { + if (!DisableHardwareLoops && getOptLevel() != CodeGenOpt::None) addPass(createHexagonHardwareLoops()); - } return false; } bool HexagonPassConfig::addPostRegAlloc() { - addPass(createHexagonCFGOptimizer(getHexagonTargetMachine())); + if (!DisableHexagonCFGOpt && getOptLevel() != CodeGenOpt::None) + addPass(createHexagonCFGOptimizer(getHexagonTargetMachine())); return true; } bool HexagonPassConfig::addPreSched2() { - addPass(&IfConverterID); + if (getOptLevel() != CodeGenOpt::None) + addPass(&IfConverterID); return true; } bool HexagonPassConfig::addPreEmitPass() { - if (!DisableHardwareLoops) { + if (!DisableHardwareLoops && getOptLevel() != CodeGenOpt::None) addPass(createHexagonFixupHwLoops()); - } - addPass(createHexagonNewValueJump()); + if (getOptLevel() != CodeGenOpt::None) + addPass(createHexagonNewValueJump()); // Expand Spill code for predicate registers. addPass(createHexagonExpandPredSpillCode(getHexagonTargetMachine())); @@ -161,7 +173,8 @@ bool HexagonPassConfig::addPreEmitPass() { addPass(createHexagonSplitTFRCondSets(getHexagonTargetMachine())); // Create Packets. - addPass(createHexagonPacketizer()); + if (getOptLevel() != CodeGenOpt::None) + addPass(createHexagonPacketizer()); return false; } diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index 866beb1..e592df9 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -48,19 +48,35 @@ #include "HexagonMachineFunctionInfo.h" #include <map> +#include <vector> using namespace llvm; +static cl::opt<bool> PacketizeVolatiles("hexagon-packetize-volatiles", + cl::ZeroOrMore, cl::Hidden, cl::init(true), + cl::desc("Allow non-solo packetization of volatile memory references")); + +extern cl::opt<bool> ScheduleInlineAsm; +extern cl::opt<bool> CountDeadOutput; + +namespace llvm { + void initializeHexagonPacketizerPass(PassRegistry&); +} + + namespace { class HexagonPacketizer : public MachineFunctionPass { public: static char ID; - HexagonPacketizer() : MachineFunctionPass(ID) {} + HexagonPacketizer() : MachineFunctionPass(ID) { + initializeHexagonPacketizerPass(*PassRegistry::getPassRegistry()); + } void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired<MachineDominatorTree>(); + AU.addRequired<MachineBranchProbabilityInfo>(); AU.addPreserved<MachineDominatorTree>(); AU.addRequired<MachineLoopInfo>(); AU.addPreserved<MachineLoopInfo>(); @@ -96,10 +112,17 @@ namespace { // schedule this instruction. bool FoundSequentialDependence; + /// \brief A handle to the branch probability pass. + const MachineBranchProbabilityInfo *MBPI; + + // Track MIs with ignored dependece. + std::vector<MachineInstr*> IgnoreDepMIs; + public: // Ctor. HexagonPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI, - MachineDominatorTree &MDT); + MachineDominatorTree &MDT, + const MachineBranchProbabilityInfo *MBPI); // initPacketizerState - initialize some internal flags. void initPacketizerState(); @@ -123,20 +146,20 @@ namespace { private: bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg); bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType, - MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC); + MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC); bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC); + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC); bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII); + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII); bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr* PacketMI, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit); + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit); bool DemoteToDotOld(MachineInstr* MI); bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit); @@ -149,23 +172,34 @@ namespace { bool canReserveResourcesForConstExt(MachineInstr *MI); void reserveResourcesForConstExt(MachineInstr* MI); bool isNewValueInst(MachineInstr* MI); - bool isDotNewInst(MachineInstr* MI); }; } +INITIALIZE_PASS_BEGIN(HexagonPacketizer, "packets", "Hexagon Packetizer", + false, false) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) +INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) +INITIALIZE_PASS_END(HexagonPacketizer, "packets", "Hexagon Packetizer", + false, false) + + // HexagonPacketizerList Ctor. HexagonPacketizerList::HexagonPacketizerList( - MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT) + MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT, + const MachineBranchProbabilityInfo *MBPI) : VLIWPacketizerList(MF, MLI, MDT, true){ + this->MBPI = MBPI; } bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) { const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); - + const MachineBranchProbabilityInfo *MBPI = + &getAnalysis<MachineBranchProbabilityInfo>(); // Instantiate the packetizer. - HexagonPacketizerList Packetizer(Fn, MLI, MDT); + HexagonPacketizerList Packetizer(Fn, MLI, MDT, MBPI); // DFA state table should not be empty. assert(Packetizer.getResourceTracker() && "Empty DFA table!"); @@ -711,8 +745,10 @@ static int GetDotNewOp(const int opc) { } // Return .new predicate version for an instruction -static int GetDotNewPredOp(const int opc) { - switch (opc) { +static int GetDotNewPredOp(MachineInstr *MI, + const MachineBranchProbabilityInfo *MBPI, + const HexagonInstrInfo *QII) { + switch (MI->getOpcode()) { default: llvm_unreachable("Unknown .new type"); // Conditional stores // Store byte conditionally @@ -858,17 +894,15 @@ static int GetDotNewPredOp(const int opc) { return Hexagon::STw_GP_cdnNotPt_V4; // Condtional Jumps - case Hexagon::JMP_c: - return Hexagon::JMP_cdnPt; - - case Hexagon::JMP_cNot: - return Hexagon::JMP_cdnNotPt; + case Hexagon::JMP_t: + case Hexagon::JMP_f: + return QII->getDotNewPredJumpOp(MI, MBPI); - case Hexagon::JMPR_cPt: - return Hexagon::JMPR_cdnPt_V3; + case Hexagon::JMPR_t: + return Hexagon::JMPR_tnew_tV3; - case Hexagon::JMPR_cNotPt: - return Hexagon::JMPR_cdnNotPt_V3; + case Hexagon::JMPR_f: + return Hexagon::JMPR_fnew_tV3; // Conditional Transfers case Hexagon::TFR_cPt: @@ -1262,7 +1296,7 @@ bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI, int NewOpcode; if (RC == &Hexagon::PredRegsRegClass) - NewOpcode = GetDotNewPredOp(MI->getOpcode()); + NewOpcode = GetDotNewPredOp(MI, MBPI, QII); else NewOpcode = GetDotNewOp(MI->getOpcode()); MI->setDesc(QII->get(NewOpcode)); @@ -1307,17 +1341,17 @@ static int GetDotOldOp(const int opc) { case Hexagon::TFRI_cdnNotPt: return Hexagon::TFRI_cNotPt; - case Hexagon::JMP_cdnPt: - return Hexagon::JMP_c; + case Hexagon::JMP_tnew_t: + return Hexagon::JMP_t; - case Hexagon::JMP_cdnNotPt: - return Hexagon::JMP_cNot; + case Hexagon::JMP_fnew_t: + return Hexagon::JMP_f; - case Hexagon::JMPR_cdnPt_V3: - return Hexagon::JMPR_cPt; + case Hexagon::JMPR_tnew_tV3: + return Hexagon::JMPR_t; - case Hexagon::JMPR_cdnNotPt_V3: - return Hexagon::JMPR_cNotPt; + case Hexagon::JMPR_fnew_tV3: + return Hexagon::JMPR_f; // Load double word @@ -1913,7 +1947,7 @@ static bool GetPredicateSense(MachineInstr* MI, case Hexagon::STrih_imm_cdnPt_V4 : case Hexagon::STriw_imm_cPt_V4 : case Hexagon::STriw_imm_cdnPt_V4 : - case Hexagon::JMP_cdnPt : + case Hexagon::JMP_tnew_t : case Hexagon::LDrid_cPt : case Hexagon::LDrid_cdnPt : case Hexagon::LDrid_indexed_cPt : @@ -2052,7 +2086,7 @@ static bool GetPredicateSense(MachineInstr* MI, case Hexagon::STrih_imm_cdnNotPt_V4 : case Hexagon::STriw_imm_cNotPt_V4 : case Hexagon::STriw_imm_cdnNotPt_V4 : - case Hexagon::JMP_cdnNotPt : + case Hexagon::JMP_fnew_t : case Hexagon::LDrid_cNotPt : case Hexagon::LDrid_cdnNotPt : case Hexagon::LDrid_indexed_cNotPt : @@ -2154,172 +2188,6 @@ static bool GetPredicateSense(MachineInstr* MI, return false; } -bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - if (QII->isNewValueInst(MI)) - return true; - - switch (MI->getOpcode()) { - case Hexagon::TFR_cdnNotPt: - case Hexagon::TFR_cdnPt: - case Hexagon::TFRI_cdnNotPt: - case Hexagon::TFRI_cdnPt: - case Hexagon::LDrid_cdnPt : - case Hexagon::LDrid_cdnNotPt : - case Hexagon::LDrid_indexed_cdnPt : - case Hexagon::LDrid_indexed_cdnNotPt : - case Hexagon::POST_LDrid_cdnPt_V4 : - case Hexagon::POST_LDrid_cdnNotPt_V4 : - case Hexagon::LDriw_cdnPt : - case Hexagon::LDriw_cdnNotPt : - case Hexagon::LDriw_indexed_cdnPt : - case Hexagon::LDriw_indexed_cdnNotPt : - case Hexagon::POST_LDriw_cdnPt_V4 : - case Hexagon::POST_LDriw_cdnNotPt_V4 : - case Hexagon::LDrih_cdnPt : - case Hexagon::LDrih_cdnNotPt : - case Hexagon::LDrih_indexed_cdnPt : - case Hexagon::LDrih_indexed_cdnNotPt : - case Hexagon::POST_LDrih_cdnPt_V4 : - case Hexagon::POST_LDrih_cdnNotPt_V4 : - case Hexagon::LDrib_cdnPt : - case Hexagon::LDrib_cdnNotPt : - case Hexagon::LDrib_indexed_cdnPt : - case Hexagon::LDrib_indexed_cdnNotPt : - case Hexagon::POST_LDrib_cdnPt_V4 : - case Hexagon::POST_LDrib_cdnNotPt_V4 : - case Hexagon::LDriuh_cdnPt : - case Hexagon::LDriuh_cdnNotPt : - case Hexagon::LDriuh_indexed_cdnPt : - case Hexagon::LDriuh_indexed_cdnNotPt : - case Hexagon::POST_LDriuh_cdnPt_V4 : - case Hexagon::POST_LDriuh_cdnNotPt_V4 : - case Hexagon::LDriub_cdnPt : - case Hexagon::LDriub_cdnNotPt : - case Hexagon::LDriub_indexed_cdnPt : - case Hexagon::LDriub_indexed_cdnNotPt : - case Hexagon::POST_LDriub_cdnPt_V4 : - case Hexagon::POST_LDriub_cdnNotPt_V4 : - - case Hexagon::LDrid_indexed_shl_cdnPt_V4 : - case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : - -// Coditional add - case Hexagon::ADD_ri_cdnPt: - case Hexagon::ADD_ri_cdnNotPt: - case Hexagon::ADD_rr_cdnPt: - case Hexagon::ADD_rr_cdnNotPt: - - // Conditional logical operations - case Hexagon::XOR_rr_cdnPt : - case Hexagon::XOR_rr_cdnNotPt : - case Hexagon::AND_rr_cdnPt : - case Hexagon::AND_rr_cdnNotPt : - case Hexagon::OR_rr_cdnPt : - case Hexagon::OR_rr_cdnNotPt : - - // Conditonal subtract - case Hexagon::SUB_rr_cdnPt : - case Hexagon::SUB_rr_cdnNotPt : - - // Conditional combine - case Hexagon::COMBINE_rr_cdnPt : - case Hexagon::COMBINE_rr_cdnNotPt : - - // Conditional shift operations - case Hexagon::ASLH_cdnPt_V4: - case Hexagon::ASLH_cdnNotPt_V4: - case Hexagon::ASRH_cdnPt_V4: - case Hexagon::ASRH_cdnNotPt_V4: - case Hexagon::SXTB_cdnPt_V4: - case Hexagon::SXTB_cdnNotPt_V4: - case Hexagon::SXTH_cdnPt_V4: - case Hexagon::SXTH_cdnNotPt_V4: - case Hexagon::ZXTB_cdnPt_V4: - case Hexagon::ZXTB_cdnNotPt_V4: - case Hexagon::ZXTH_cdnPt_V4: - case Hexagon::ZXTH_cdnNotPt_V4: - - // Conditional stores - case Hexagon::STrib_imm_cdnPt_V4 : - case Hexagon::STrib_imm_cdnNotPt_V4 : - case Hexagon::STrib_cdnPt_V4 : - case Hexagon::STrib_cdnNotPt_V4 : - case Hexagon::STrib_indexed_cdnPt_V4 : - case Hexagon::STrib_indexed_cdnNotPt_V4 : - case Hexagon::POST_STbri_cdnPt_V4 : - case Hexagon::POST_STbri_cdnNotPt_V4 : - case Hexagon::STrib_indexed_shl_cdnPt_V4 : - case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : - - // Store doubleword conditionally - case Hexagon::STrid_indexed_cdnPt_V4 : - case Hexagon::STrid_indexed_cdnNotPt_V4 : - case Hexagon::STrid_indexed_shl_cdnPt_V4 : - case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STdri_cdnPt_V4 : - case Hexagon::POST_STdri_cdnNotPt_V4 : - - // Store halfword conditionally - case Hexagon::STrih_cdnPt_V4 : - case Hexagon::STrih_cdnNotPt_V4 : - case Hexagon::STrih_indexed_cdnPt_V4 : - case Hexagon::STrih_indexed_cdnNotPt_V4 : - case Hexagon::STrih_imm_cdnPt_V4 : - case Hexagon::STrih_imm_cdnNotPt_V4 : - case Hexagon::STrih_indexed_shl_cdnPt_V4 : - case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_SThri_cdnPt_V4 : - case Hexagon::POST_SThri_cdnNotPt_V4 : - - // Store word conditionally - case Hexagon::STriw_cdnPt_V4 : - case Hexagon::STriw_cdnNotPt_V4 : - case Hexagon::STriw_indexed_cdnPt_V4 : - case Hexagon::STriw_indexed_cdnNotPt_V4 : - case Hexagon::STriw_imm_cdnPt_V4 : - case Hexagon::STriw_imm_cdnNotPt_V4 : - case Hexagon::STriw_indexed_shl_cdnPt_V4 : - case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STwri_cdnPt_V4 : - case Hexagon::POST_STwri_cdnNotPt_V4 : - - case Hexagon::LDd_GP_cdnPt_V4: - case Hexagon::LDd_GP_cdnNotPt_V4: - case Hexagon::LDb_GP_cdnPt_V4: - case Hexagon::LDb_GP_cdnNotPt_V4: - case Hexagon::LDub_GP_cdnPt_V4: - case Hexagon::LDub_GP_cdnNotPt_V4: - case Hexagon::LDh_GP_cdnPt_V4: - case Hexagon::LDh_GP_cdnNotPt_V4: - case Hexagon::LDuh_GP_cdnPt_V4: - case Hexagon::LDuh_GP_cdnNotPt_V4: - case Hexagon::LDw_GP_cdnPt_V4: - case Hexagon::LDw_GP_cdnNotPt_V4: - - case Hexagon::STd_GP_cdnPt_V4: - case Hexagon::STd_GP_cdnNotPt_V4: - case Hexagon::STb_GP_cdnPt_V4: - case Hexagon::STb_GP_cdnNotPt_V4: - case Hexagon::STh_GP_cdnPt_V4: - case Hexagon::STh_GP_cdnNotPt_V4: - case Hexagon::STw_GP_cdnPt_V4: - case Hexagon::STw_GP_cdnNotPt_V4: - return true; - } - return false; -} - static MachineOperand& GetPostIncrementOperand(MachineInstr *MI, const HexagonInstrInfo *QII) { assert(QII->isPostIncrement(MI) && "Not a post increment operation."); @@ -2490,7 +2358,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI, // sense, i.e, either both should be negated or both should be none negated. if (( predRegNumDst != predRegNumSrc) || - isDotNewInst(PacketMI) != isDotNewInst(MI) || + QII->isDotNewInst(PacketMI) != QII->isDotNewInst(MI) || GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) { return false; } @@ -2600,8 +2468,9 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI, MachineBasicBlock::iterator &MII, const TargetRegisterClass* RC ) { - // already a dot new instruction - if (isDotNewInst(MI) && !IsNewifyStore(MI)) + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + // Already a dot new instruction. + if (QII->isDotNewInst(MI) && !IsNewifyStore(MI)) return false; if (!isNewifiable(MI)) @@ -2616,7 +2485,6 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI, else { // Create a dot new machine instruction to see if resources can be // allocated. If not, bail out now. - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; int NewOpcode = GetDotNewOp(MI->getOpcode()); const MCInstrDesc &desc = QII->get(NewOpcode); DebugLoc dl; @@ -2759,7 +2627,7 @@ bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1, // !p0 is not complimentary to p0.new return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) && (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) && - (isDotNewInst(MI1) == isDotNewInst(MI2))); + (QII->isDotNewInst(MI1) == QII->isDotNewInst(MI2))); } // initPacketizerState - Initialize packetizer flags diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp index 86f75d1..3deb8d1 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp @@ -31,6 +31,7 @@ HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) { AscizDirective = "\t.string\t"; WeakRefDirective = "\t.weak\t"; + SupportsDebugInformation = true; UsesELFSectionDirectiveForBSS = true; ExceptionsType = ExceptionHandling::DwarfCFI; } diff --git a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp index 78ad24d..34e33fd 100644 --- a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp +++ b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp @@ -237,7 +237,7 @@ SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) { // Use load to get GOT target SDValue Ops[] = { Callee, GPReg, Chain }; SDValue Load = SDValue(CurDAG->getMachineNode(MBlaze::LW, dl, - MVT::i32, MVT::Other, Ops, 3), 0); + MVT::i32, MVT::Other, Ops), 0); Chain = Load.getValue(1); // Call target must be on T9 diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.td b/lib/Target/MBlaze/MBlazeInstrInfo.td index f86bc0b..d27cd39 100644 --- a/lib/Target/MBlaze/MBlazeInstrInfo.td +++ b/lib/Target/MBlaze/MBlazeInstrInfo.td @@ -724,8 +724,7 @@ let usesCustomInserter=1 in { [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$val))]>; def MEMBARRIER : MBlazePseudo<(outs), (ins), - "# memory barrier", - [(membarrier (i32 imm), (i32 imm), (i32 imm), (i32 imm), (i32 imm))]>; + "# memory barrier", []>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Mangler.cpp b/lib/Target/Mangler.cpp index edfd421..d31efa8 100644 --- a/lib/Target/Mangler.cpp +++ b/lib/Target/Mangler.cpp @@ -188,7 +188,12 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName, // If this global has a name, handle it simply. if (GV->hasName()) { - getNameWithPrefix(OutName, GV->getName(), PrefixTy); + StringRef Name = GV->getName(); + getNameWithPrefix(OutName, Name, PrefixTy); + // No need to do anything else if the global has the special "do not mangle" + // flag in the name. + if (Name[0] == 1) + return; } else { // Get the ID for the global, assigning a new one if we haven't got one // already. diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index ebe12c9..0795cb9 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -63,7 +63,6 @@ class MipsAsmParser : public MCTargetAsmParser { MCAsmParser &Parser; MipsAssemblerOptions Options; - #define GET_ASSEMBLER_HEADER #include "MipsGenAsmMatcher.inc" @@ -101,6 +100,9 @@ class MipsAsmParser : public MCTargetAsmParser { MipsAsmParser::OperandMatchResultTy parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands); + bool searchSymbolAlias(SmallVectorImpl<MCParsedAsmOperand*> &Operands, + unsigned RegisterClass); + bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); @@ -119,11 +121,17 @@ class MipsAsmParser : public MCTargetAsmParser { SmallVectorImpl<MCInst> &Instructions); void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions); + void expandMemInst(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl<MCInst> &Instructions, + bool isLoad,bool isImmOpnd); bool reportParseError(StringRef ErrorMsg); - bool parseMemOffset(const MCExpr *&Res); + bool parseMemOffset(const MCExpr *&Res, bool isParenExpr); bool parseRelocOperand(const MCExpr *&Res); + const MCExpr* evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr); + + bool isEvaluated(const MCExpr *Expr); bool parseDirectiveSet(); bool parseSetAtDirective(); @@ -133,6 +141,8 @@ class MipsAsmParser : public MCTargetAsmParser { bool parseSetReorderDirective(); bool parseSetNoReorderDirective(); + bool parseSetAssignment(); + bool parseDirectiveWord(unsigned Size, SMLoc L); MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol); @@ -163,9 +173,12 @@ class MipsAsmParser : public MCTargetAsmParser { bool requestsDoubleOperand(StringRef Mnemonic); - unsigned getReg(int RC,int RegNo); + unsigned getReg(int RC, int RegNo); int getATReg(); + + bool processInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl<MCInst> &Instructions); public: MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) : MCTargetAsmParser(), STI(sti), Parser(parser) { @@ -258,7 +271,7 @@ public: void addImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); const MCExpr *Expr = getImm(); - addExpr(Inst,Expr); + addExpr(Inst, Expr); } void addMemOperands(MCInst &Inst, unsigned N) const { @@ -267,7 +280,7 @@ public: Inst.addOperand(MCOperand::CreateReg(getMemBase())); const MCExpr *Expr = getMemOff(); - addExpr(Inst,Expr); + addExpr(Inst, Expr); } bool isReg() const { return Kind == k_Register; } @@ -380,42 +393,112 @@ public: } /// getStartLoc - Get the location of the first token of this operand. - SMLoc getStartLoc() const { return StartLoc; } + SMLoc getStartLoc() const { + return StartLoc; + } /// getEndLoc - Get the location of the last token of this operand. - SMLoc getEndLoc() const { return EndLoc; } + SMLoc getEndLoc() const { + return EndLoc; + } virtual void print(raw_ostream &OS) const { llvm_unreachable("unimplemented!"); } -}; +}; // class MipsOperand +} // namespace + +namespace llvm { +extern const MCInstrDesc MipsInsts[]; +} +static const MCInstrDesc &getInstDesc(unsigned Opcode) { + return MipsInsts[Opcode]; +} + +bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl<MCInst> &Instructions) { + const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); + Inst.setLoc(IDLoc); + if (MCID.hasDelaySlot() && Options.isReorder()) { + // If this instruction has a delay slot and .set reorder is active, + // emit a NOP after it. + Instructions.push_back(Inst); + MCInst NopInst; + NopInst.setOpcode(Mips::SLL); + NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO)); + NopInst.addOperand(MCOperand::CreateReg(Mips::ZERO)); + NopInst.addOperand(MCOperand::CreateImm(0)); + Instructions.push_back(NopInst); + return false; + } + + if (MCID.mayLoad() || MCID.mayStore()) { + // Check the offset of memory operand, if it is a symbol + // reference or immediate we may have to expand instructions. + for (unsigned i = 0; i < MCID.getNumOperands(); i++) { + const MCOperandInfo &OpInfo = MCID.OpInfo[i]; + if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) + || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) { + MCOperand &Op = Inst.getOperand(i); + if (Op.isImm()) { + int MemOffset = Op.getImm(); + if (MemOffset < -32768 || MemOffset > 32767) { + // Offset can't exceed 16bit value. + expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), true); + return false; + } + } else if (Op.isExpr()) { + const MCExpr *Expr = Op.getExpr(); + if (Expr->getKind() == MCExpr::SymbolRef) { + const MCSymbolRefExpr *SR = + static_cast<const MCSymbolRefExpr*>(Expr); + if (SR->getKind() == MCSymbolRefExpr::VK_None) { + // Expand symbol. + expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false); + return false; + } + } else if (!isEvaluated(Expr)) { + expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false); + return false; + } + } + } + } // for + } // if load/store + + if (needsExpansion(Inst)) + expandInstruction(Inst, IDLoc, Instructions); + else + Instructions.push_back(Inst); + + return false; } bool MipsAsmParser::needsExpansion(MCInst &Inst) { - switch(Inst.getOpcode()) { - case Mips::LoadImm32Reg: - case Mips::LoadAddr32Imm: - case Mips::LoadAddr32Reg: - return true; - default: - return false; + switch (Inst.getOpcode()) { + case Mips::LoadImm32Reg: + case Mips::LoadAddr32Imm: + case Mips::LoadAddr32Reg: + return true; + default: + return false; } } void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl<MCInst> &Instructions){ - switch(Inst.getOpcode()) { - case Mips::LoadImm32Reg: - return expandLoadImm(Inst, IDLoc, Instructions); - case Mips::LoadAddr32Imm: - return expandLoadAddressImm(Inst,IDLoc,Instructions); - case Mips::LoadAddr32Reg: - return expandLoadAddressReg(Inst,IDLoc,Instructions); - } + SmallVectorImpl<MCInst> &Instructions) { + switch (Inst.getOpcode()) { + case Mips::LoadImm32Reg: + return expandLoadImm(Inst, IDLoc, Instructions); + case Mips::LoadAddr32Imm: + return expandLoadAddressImm(Inst, IDLoc, Instructions); + case Mips::LoadAddr32Reg: + return expandLoadAddressReg(Inst, IDLoc, Instructions); + } } void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl<MCInst> &Instructions){ + SmallVectorImpl<MCInst> &Instructions) { MCInst tmpInst; const MCOperand &ImmOp = Inst.getOperand(1); assert(ImmOp.isImm() && "expected immediate operand kind"); @@ -424,26 +507,24 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc, int ImmValue = ImmOp.getImm(); tmpInst.setLoc(IDLoc); - if ( 0 <= ImmValue && ImmValue <= 65535) { - // for 0 <= j <= 65535. + if (0 <= ImmValue && ImmValue <= 65535) { + // For 0 <= j <= 65535. // li d,j => ori d,$zero,j tmpInst.setOpcode(Mips::ORi); tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); - tmpInst.addOperand( - MCOperand::CreateReg(Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateReg(Mips::ZERO)); tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); Instructions.push_back(tmpInst); - } else if ( ImmValue < 0 && ImmValue >= -32768) { - // for -32768 <= j < 0. + } else if (ImmValue < 0 && ImmValue >= -32768) { + // For -32768 <= j < 0. // li d,j => addiu d,$zero,j tmpInst.setOpcode(Mips::ADDiu); tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); - tmpInst.addOperand( - MCOperand::CreateReg(Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateReg(Mips::ZERO)); tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); Instructions.push_back(tmpInst); } else { - // for any other value of j that is representable as a 32-bit integer. + // For any other value of j that is representable as a 32-bit integer. // li d,j => lui d,hi16(j) // ori d,d,lo16(j) tmpInst.setOpcode(Mips::LUi); @@ -461,7 +542,7 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc, } void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl<MCInst> &Instructions){ + SmallVectorImpl<MCInst> &Instructions) { MCInst tmpInst; const MCOperand &ImmOp = Inst.getOperand(2); assert(ImmOp.isImm() && "expected immediate operand kind"); @@ -470,19 +551,19 @@ void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, const MCOperand &DstRegOp = Inst.getOperand(0); assert(DstRegOp.isReg() && "expected register operand kind"); int ImmValue = ImmOp.getImm(); - if ( -32768 <= ImmValue && ImmValue <= 65535) { - //for -32768 <= j <= 65535. - //la d,j(s) => addiu d,s,j + if (-32768 <= ImmValue && ImmValue <= 65535) { + // For -32768 <= j <= 65535. + // la d,j(s) => addiu d,s,j tmpInst.setOpcode(Mips::ADDiu); tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg())); tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); Instructions.push_back(tmpInst); } else { - //for any other value of j that is representable as a 32-bit integer. - //la d,j(s) => lui d,hi16(j) - // ori d,d,lo16(j) - // addu d,d,s + // For any other value of j that is representable as a 32-bit integer. + // la d,j(s) => lui d,hi16(j) + // ori d,d,lo16(j) + // addu d,d,s tmpInst.setOpcode(Mips::LUi); tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16)); @@ -503,26 +584,25 @@ void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, } void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc, - SmallVectorImpl<MCInst> &Instructions){ + SmallVectorImpl<MCInst> &Instructions) { MCInst tmpInst; const MCOperand &ImmOp = Inst.getOperand(1); assert(ImmOp.isImm() && "expected immediate operand kind"); const MCOperand &RegOp = Inst.getOperand(0); assert(RegOp.isReg() && "expected register operand kind"); int ImmValue = ImmOp.getImm(); - if ( -32768 <= ImmValue && ImmValue <= 65535) { - //for -32768 <= j <= 65535. - //la d,j => addiu d,$zero,j + if (-32768 <= ImmValue && ImmValue <= 65535) { + // For -32768 <= j <= 65535. + // la d,j => addiu d,$zero,j tmpInst.setOpcode(Mips::ADDiu); tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); - tmpInst.addOperand( - MCOperand::CreateReg(Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateReg(Mips::ZERO)); tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); Instructions.push_back(tmpInst); } else { - //for any other value of j that is representable as a 32-bit integer. - //la d,j => lui d,hi16(j) - // ori d,d,lo16(j) + // For any other value of j that is representable as a 32-bit integer. + // la d,j => lui d,hi16(j) + // ori d,d,lo16(j) tmpInst.setOpcode(Mips::LUi); tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16)); @@ -536,28 +616,105 @@ void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc, } } +void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl<MCInst> &Instructions, bool isLoad, bool isImmOpnd) { + const MCSymbolRefExpr *SR; + MCInst TempInst; + unsigned ImmOffset, HiOffset, LoOffset; + const MCExpr *ExprOffset; + unsigned TmpRegNum; + unsigned AtRegNum = getReg((isMips64()) ? Mips::CPU64RegsRegClassID + : Mips::CPURegsRegClassID, getATReg()); + // 1st operand is either the source or destination register. + assert(Inst.getOperand(0).isReg() && "expected register operand kind"); + unsigned RegOpNum = Inst.getOperand(0).getReg(); + // 2nd operand is the base register. + assert(Inst.getOperand(1).isReg() && "expected register operand kind"); + unsigned BaseRegNum = Inst.getOperand(1).getReg(); + // 3rd operand is either an immediate or expression. + if (isImmOpnd) { + assert(Inst.getOperand(2).isImm() && "expected immediate operand kind"); + ImmOffset = Inst.getOperand(2).getImm(); + LoOffset = ImmOffset & 0x0000ffff; + HiOffset = (ImmOffset & 0xffff0000) >> 16; + // If msb of LoOffset is 1(negative number) we must increment HiOffset. + if (LoOffset & 0x8000) + HiOffset++; + } else + ExprOffset = Inst.getOperand(2).getExpr(); + // All instructions will have the same location. + TempInst.setLoc(IDLoc); + // 1st instruction in expansion is LUi. For load instruction we can use + // the dst register as a temporary if base and dst are different, + // but for stores we must use $at. + TmpRegNum = (isLoad && (BaseRegNum != RegOpNum)) ? RegOpNum : AtRegNum; + TempInst.setOpcode(Mips::LUi); + TempInst.addOperand(MCOperand::CreateReg(TmpRegNum)); + if (isImmOpnd) + TempInst.addOperand(MCOperand::CreateImm(HiOffset)); + else { + if (ExprOffset->getKind() == MCExpr::SymbolRef) { + SR = static_cast<const MCSymbolRefExpr*>(ExprOffset); + const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::Create( + SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_HI, + getContext()); + TempInst.addOperand(MCOperand::CreateExpr(HiExpr)); + } else { + const MCExpr *HiExpr = evaluateRelocExpr(ExprOffset, "hi"); + TempInst.addOperand(MCOperand::CreateExpr(HiExpr)); + } + } + // Add the instruction to the list. + Instructions.push_back(TempInst); + // Prepare TempInst for next instruction. + TempInst.clear(); + // Add temp register to base. + TempInst.setOpcode(Mips::ADDu); + TempInst.addOperand(MCOperand::CreateReg(TmpRegNum)); + TempInst.addOperand(MCOperand::CreateReg(TmpRegNum)); + TempInst.addOperand(MCOperand::CreateReg(BaseRegNum)); + Instructions.push_back(TempInst); + TempInst.clear(); + // And finaly, create original instruction with low part + // of offset and new base. + TempInst.setOpcode(Inst.getOpcode()); + TempInst.addOperand(MCOperand::CreateReg(RegOpNum)); + TempInst.addOperand(MCOperand::CreateReg(TmpRegNum)); + if (isImmOpnd) + TempInst.addOperand(MCOperand::CreateImm(LoOffset)); + else { + if (ExprOffset->getKind() == MCExpr::SymbolRef) { + const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::Create( + SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_LO, + getContext()); + TempInst.addOperand(MCOperand::CreateExpr(LoExpr)); + } else { + const MCExpr *LoExpr = evaluateRelocExpr(ExprOffset, "lo"); + TempInst.addOperand(MCOperand::CreateExpr(LoExpr)); + } + } + Instructions.push_back(TempInst); + TempInst.clear(); +} + bool MipsAsmParser:: MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl<MCParsedAsmOperand*> &Operands, MCStreamer &Out, unsigned &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; + SmallVector<MCInst, 8> Instructions; unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { - default: break; + default: + break; case Match_Success: { - if (needsExpansion(Inst)) { - SmallVector<MCInst, 4> Instructions; - expandInstruction(Inst, IDLoc, Instructions); - for(unsigned i =0; i < Instructions.size(); i++){ - Out.EmitInstruction(Instructions[i]); - } - } else { - Inst.setLoc(IDLoc); - Out.EmitInstruction(Inst); - } + if (processInstruction(Inst, IDLoc, Instructions)) + return true; + for (unsigned i = 0; i < Instructions.size(); i++) + Out.EmitInstruction(Instructions[i]); return false; } case Match_MissingFeature: @@ -569,8 +726,9 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); - ErrorLoc = ((MipsOperand*)Operands[ErrorInfo])->getStartLoc(); - if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; + ErrorLoc = ((MipsOperand*) Operands[ErrorInfo])->getStartLoc(); + if (ErrorLoc == SMLoc()) + ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); @@ -621,10 +779,10 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) { .Case("t9", 25) .Default(-1); - // Although SGI documentation just cut out t0-t3 for n32/n64, + // Although SGI documentation just cuts out t0-t3 for n32/n64, // GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7 // We are supporting both cases, so for t0-t3 we'll just push them to t4-t7. - if (isMips64() && 8 <= CC && CC <= 11) + if (isMips64() && 8 <= CC && CC <= 11) CC += 4; if (CC == -1 && isMips64()) @@ -640,19 +798,23 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) { return CC; } + int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { + if (Name.equals("fcc0")) + return Mips::FCC0; + int CC; CC = matchCPURegisterName(Name); if (CC != -1) - return matchRegisterByNumber(CC,is64BitReg?Mips::CPU64RegsRegClassID: - Mips::CPURegsRegClassID); + return matchRegisterByNumber(CC, is64BitReg ? Mips::CPU64RegsRegClassID + : Mips::CPURegsRegClassID); if (Name[0] == 'f') { StringRef NumString = Name.substr(1); unsigned IntVal; - if( NumString.getAsInteger(10, IntVal)) - return -1; // not integer + if (NumString.getAsInteger(10, IntVal)) + return -1; // This is not an integer. if (IntVal > 31) return -1; @@ -661,18 +823,19 @@ int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) { if (Format == FP_FORMAT_S || Format == FP_FORMAT_W) return getReg(Mips::FGR32RegClassID, IntVal); if (Format == FP_FORMAT_D) { - if(isFP64()) { + if (isFP64()) { return getReg(Mips::FGR64RegClassID, IntVal); } - // only even numbers available as register pairs - if (( IntVal > 31) || (IntVal%2 != 0)) + // Only even numbers available as register pairs. + if ((IntVal > 31) || (IntVal % 2 != 0)) return -1; - return getReg(Mips::AFGR64RegClassID, IntVal/2); + return getReg(Mips::AFGR64RegClassID, IntVal / 2); } } return -1; } + void MipsAsmParser::setDefaultFpFormat() { if (isMips64() || isFP64()) @@ -692,6 +855,7 @@ bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){ return IsDouble; } + void MipsAsmParser::setFpFormat(StringRef Format) { FpFormat = StringSwitch<FpFormatTy>(Format.lower()) @@ -714,7 +878,7 @@ int MipsAsmParser::getATReg() { return Options.getATRegNum(); } -unsigned MipsAsmParser::getReg(int RC,int RegNo) { +unsigned MipsAsmParser::getReg(int RC, int RegNo) { return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo); } @@ -735,14 +899,12 @@ int MipsAsmParser::tryParseRegister(bool is64BitReg) { RegNum = matchRegisterName(lowerCase, is64BitReg); } else if (Tok.is(AsmToken::Integer)) RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()), - is64BitReg ? Mips::CPU64RegsRegClassID - : Mips::CPURegsRegClassID); + is64BitReg ? Mips::CPU64RegsRegClassID : Mips::CPURegsRegClassID); return RegNum; } -bool MipsAsmParser:: - tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, - bool is64BitReg){ +bool MipsAsmParser::tryParseRegisterOperand( + SmallVectorImpl<MCParsedAsmOperand*> &Operands, bool is64BitReg) { SMLoc S = Parser.getTok().getLoc(); int RegNo = -1; @@ -752,7 +914,7 @@ bool MipsAsmParser:: return true; Operands.push_back(MipsOperand::CreateReg(RegNo, S, - Parser.getTok().getLoc())); + Parser.getTok().getLoc())); Parser.Lex(); // Eat register token. return false; } @@ -775,19 +937,19 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, Error(Parser.getTok().getLoc(), "unexpected token in operand"); return true; case AsmToken::Dollar: { - // parse register + // Parse the register. SMLoc S = Parser.getTok().getLoc(); Parser.Lex(); // Eat dollar token. - // parse register operand + // Parse the register operand. if (!tryParseRegisterOperand(Operands, isMips64())) { if (getLexer().is(AsmToken::LParen)) { - // check if it is indexed addressing operand + // Check if it is indexed addressing operand. Operands.push_back(MipsOperand::CreateToken("(", S)); - Parser.Lex(); // eat parenthesis + Parser.Lex(); // Eat the parenthesis. if (getLexer().isNot(AsmToken::Dollar)) return true; - Parser.Lex(); // eat dollar + Parser.Lex(); // Eat the dollar if (tryParseRegisterOperand(Operands, isMips64())) return true; @@ -800,7 +962,7 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, } return false; } - // maybe it is a symbol reference + // Maybe it is a symbol reference. StringRef Identifier; if (Parser.parseIdentifier(Identifier)) return true; @@ -809,7 +971,7 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier); - // Otherwise create a symbol ref. + // Otherwise create a symbol reference. const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext()); @@ -817,12 +979,17 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, return false; } case AsmToken::Identifier: + // Look for the existing symbol, we should check if + // we need to assigne the propper RegisterKind. + if (searchSymbolAlias(Operands, MipsOperand::Kind_None)) + return false; + // Else drop to expression parsing. case AsmToken::LParen: case AsmToken::Minus: case AsmToken::Plus: case AsmToken::Integer: case AsmToken::String: { - // quoted label names + // Quoted label names. const MCExpr *IdVal; SMLoc S = Parser.getTok().getLoc(); if (getParser().parseExpression(IdVal)) @@ -832,9 +999,9 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, return false; } case AsmToken::Percent: { - // it is a symbol reference or constant expression + // It is a symbol reference or constant expression. const MCExpr *IdVal; - SMLoc S = Parser.getTok().getLoc(); // start location of the operand + SMLoc S = Parser.getTok().getLoc(); // Start location of the operand. if (parseRelocOperand(IdVal)) return true; @@ -847,129 +1014,200 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands, return true; } -bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) { +const MCExpr* MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr, + StringRef RelocStr) { + const MCExpr *Res; + // Check the type of the expression. + if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Expr)) { + // It's a constant, evaluate lo or hi value. + if (RelocStr == "lo") { + short Val = MCE->getValue(); + Res = MCConstantExpr::Create(Val, getContext()); + } else if (RelocStr == "hi") { + int Val = MCE->getValue(); + int LoSign = Val & 0x8000; + Val = (Val & 0xffff0000) >> 16; + // Lower part is treated as a signed int, so if it is negative + // we must add 1 to the hi part to compensate. + if (LoSign) + Val++; + Res = MCConstantExpr::Create(Val, getContext()); + } else { + llvm_unreachable("Invalid RelocStr value"); + } + return Res; + } + + if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(Expr)) { + // It's a symbol, create a symbolic expression from the symbol. + StringRef Symbol = MSRE->getSymbol().getName(); + MCSymbolRefExpr::VariantKind VK = getVariantKind(RelocStr); + Res = MCSymbolRefExpr::Create(Symbol, VK, getContext()); + return Res; + } - Parser.Lex(); // eat % token - const AsmToken &Tok = Parser.getTok(); // get next token, operation + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) { + const MCExpr *LExp = evaluateRelocExpr(BE->getLHS(), RelocStr); + const MCExpr *RExp = evaluateRelocExpr(BE->getRHS(), RelocStr); + Res = MCBinaryExpr::Create(BE->getOpcode(), LExp, RExp, getContext()); + return Res; + } + + if (const MCUnaryExpr *UN = dyn_cast<MCUnaryExpr>(Expr)) { + const MCExpr *UnExp = evaluateRelocExpr(UN->getSubExpr(), RelocStr); + Res = MCUnaryExpr::Create(UN->getOpcode(), UnExp, getContext()); + return Res; + } + // Just return the original expression. + return Expr; +} + +bool MipsAsmParser::isEvaluated(const MCExpr *Expr) { + + switch (Expr->getKind()) { + case MCExpr::Constant: + return true; + case MCExpr::SymbolRef: + return (cast<MCSymbolRefExpr>(Expr)->getKind() != MCSymbolRefExpr::VK_None); + case MCExpr::Binary: + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) { + if (!isEvaluated(BE->getLHS())) + return false; + return isEvaluated(BE->getRHS()); + } + case MCExpr::Unary: + return isEvaluated(cast<MCUnaryExpr>(Expr)->getSubExpr()); + default: + return false; + } + return false; +} + +bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) { + Parser.Lex(); // Eat the % token. + const AsmToken &Tok = Parser.getTok(); // Get next token, operation. if (Tok.isNot(AsmToken::Identifier)) return true; std::string Str = Tok.getIdentifier().str(); - Parser.Lex(); // eat identifier - // now make expression from the rest of the operand + Parser.Lex(); // Eat the identifier. + // Now make an expression from the rest of the operand. const MCExpr *IdVal; SMLoc EndLoc; if (getLexer().getKind() == AsmToken::LParen) { while (1) { - Parser.Lex(); // eat '(' token + Parser.Lex(); // Eat the '(' token. if (getLexer().getKind() == AsmToken::Percent) { - Parser.Lex(); // eat % token + Parser.Lex(); // Eat the % token. const AsmToken &nextTok = Parser.getTok(); if (nextTok.isNot(AsmToken::Identifier)) return true; Str += "(%"; Str += nextTok.getIdentifier(); - Parser.Lex(); // eat identifier + Parser.Lex(); // Eat the identifier. if (getLexer().getKind() != AsmToken::LParen) return true; } else break; } - if (getParser().parseParenExpression(IdVal,EndLoc)) + if (getParser().parseParenExpression(IdVal, EndLoc)) return true; while (getLexer().getKind() == AsmToken::RParen) - Parser.Lex(); // eat ')' token + Parser.Lex(); // Eat the ')' token. } else - return true; // parenthesis must follow reloc operand - - // Check the type of the expression - if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal)) { - // it's a constant, evaluate lo or hi value - int Val = MCE->getValue(); - if (Str == "lo") { - Val = Val & 0xffff; - } else if (Str == "hi") { - int LoSign = Val & 0x8000; - Val = (Val & 0xffff0000) >> 16; - //lower part is treated as signed int, so if it is negative - //we must add 1 to hi part to compensate - if (LoSign) - Val++; - } - Res = MCConstantExpr::Create(Val, getContext()); - return false; - } + return true; // Parenthesis must follow the relocation operand. - if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(IdVal)) { - // it's a symbol, create symbolic expression from symbol - StringRef Symbol = MSRE->getSymbol().getName(); - MCSymbolRefExpr::VariantKind VK = getVariantKind(Str); - Res = MCSymbolRefExpr::Create(Symbol,VK,getContext()); - return false; - } - return true; + Res = evaluateRelocExpr(IdVal, Str); + return false; } bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { - StartLoc = Parser.getTok().getLoc(); RegNo = tryParseRegister(isMips64()); EndLoc = Parser.getTok().getLoc(); - return (RegNo == (unsigned)-1); + return (RegNo == (unsigned) -1); } -bool MipsAsmParser::parseMemOffset(const MCExpr *&Res) { - +bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) { SMLoc S; + bool Result = true; - switch(getLexer().getKind()) { + while (getLexer().getKind() == AsmToken::LParen) + Parser.Lex(); + + switch (getLexer().getKind()) { default: return true; + case AsmToken::Identifier: + case AsmToken::LParen: case AsmToken::Integer: case AsmToken::Minus: case AsmToken::Plus: - return (getParser().parseExpression(Res)); + if (isParenExpr) + Result = getParser().parseParenExpression(Res, S); + else + Result = (getParser().parseExpression(Res)); + while (getLexer().getKind() == AsmToken::RParen) + Parser.Lex(); + break; case AsmToken::Percent: - return parseRelocOperand(Res); - case AsmToken::LParen: - return false; // it's probably assuming 0 + Result = parseRelocOperand(Res); } - return true; + return Result; } MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( - SmallVectorImpl<MCParsedAsmOperand*>&Operands) { + SmallVectorImpl<MCParsedAsmOperand*>&Operands) { const MCExpr *IdVal = 0; SMLoc S; - // first operand is the offset + bool isParenExpr = false; + // First operand is the offset. S = Parser.getTok().getLoc(); - if (parseMemOffset(IdVal)) - return MatchOperand_ParseFail; + if (getLexer().getKind() == AsmToken::LParen) { + Parser.Lex(); + isParenExpr = true; + } - const AsmToken &Tok = Parser.getTok(); // get next token - if (Tok.isNot(AsmToken::LParen)) { - MipsOperand *Mnemonic = static_cast<MipsOperand*>(Operands[0]); - if (Mnemonic->getToken() == "la") { - SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() -1); - Operands.push_back(MipsOperand::CreateImm(IdVal, S, E)); - return MatchOperand_Success; + if (getLexer().getKind() != AsmToken::Dollar) { + if (parseMemOffset(IdVal, isParenExpr)) + return MatchOperand_ParseFail; + + const AsmToken &Tok = Parser.getTok(); // Get the next token. + if (Tok.isNot(AsmToken::LParen)) { + MipsOperand *Mnemonic = static_cast<MipsOperand*>(Operands[0]); + if (Mnemonic->getToken() == "la") { + SMLoc E = SMLoc::getFromPointer( + Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(MipsOperand::CreateImm(IdVal, S, E)); + return MatchOperand_Success; + } + if (Tok.is(AsmToken::EndOfStatement)) { + SMLoc E = SMLoc::getFromPointer( + Parser.getTok().getLoc().getPointer() - 1); + + // Zero register assumed, add a memory operand with ZERO as its base. + Operands.push_back(MipsOperand::CreateMem(isMips64() ? Mips::ZERO_64 + : Mips::ZERO, + IdVal, S, E)); + return MatchOperand_Success; + } + Error(Parser.getTok().getLoc(), "'(' expected"); + return MatchOperand_ParseFail; } - Error(Parser.getTok().getLoc(), "'(' expected"); - return MatchOperand_ParseFail; - } - Parser.Lex(); // Eat '(' token. + Parser.Lex(); // Eat the '(' token. + } - const AsmToken &Tok1 = Parser.getTok(); // get next token + const AsmToken &Tok1 = Parser.getTok(); // Get next token if (Tok1.is(AsmToken::Dollar)) { - Parser.Lex(); // Eat '$' token. + Parser.Lex(); // Eat the '$' token. if (tryParseRegisterOperand(Operands, isMips64())) { Error(Parser.getTok().getLoc(), "unexpected token in operand"); return MatchOperand_ParseFail; @@ -980,7 +1218,7 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( return MatchOperand_ParseFail; } - const AsmToken &Tok2 = Parser.getTok(); // get next token + const AsmToken &Tok2 = Parser.getTok(); // Get next token. if (Tok2.isNot(AsmToken::RParen)) { Error(Parser.getTok().getLoc(), "')' expected"); return MatchOperand_ParseFail; @@ -988,17 +1226,26 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); - Parser.Lex(); // Eat ')' token. + Parser.Lex(); // Eat the ')' token. if (IdVal == 0) IdVal = MCConstantExpr::Create(0, getContext()); - // now replace register operand with the mem operand + // Replace the register operand with the memory operand. MipsOperand* op = static_cast<MipsOperand*>(Operands.back()); int RegNo = op->getReg(); - // remove register from operands + // Remove the register from the operands. Operands.pop_back(); - // and add memory operand + // Add the memory operand. + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(IdVal)) { + int64_t Imm; + if (IdVal->EvaluateAsAbsolute(Imm)) + IdVal = MCConstantExpr::Create(Imm, getContext()); + else if (BE->getLHS()->getKind() != MCExpr::SymbolRef) + IdVal = MCBinaryExpr::Create(BE->getOpcode(), BE->getRHS(), BE->getLHS(), + getContext()); + } + Operands.push_back(MipsOperand::CreateMem(RegNo, IdVal, S, E)); delete op; return MatchOperand_Success; @@ -1009,13 +1256,18 @@ MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { if (!isMips64()) return MatchOperand_NoMatch; - // if the first token is not '$' we have an error + if (getLexer().getKind() == AsmToken::Identifier) { + if (searchSymbolAlias(Operands, MipsOperand::Kind_CPU64Regs)) + return MatchOperand_Success; + return MatchOperand_NoMatch; + } + // If the first token is not '$', we have an error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; Parser.Lex(); // Eat $ - if(!tryParseRegisterOperand(Operands, true)) { - // set the proper register kind + if (!tryParseRegisterOperand(Operands, true)) { + // Set the proper register kind. MipsOperand* op = static_cast<MipsOperand*>(Operands.back()); op->setRegKind(MipsOperand::Kind_CPU64Regs); return MatchOperand_Success; @@ -1023,16 +1275,59 @@ MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { return MatchOperand_NoMatch; } +bool MipsAsmParser::searchSymbolAlias( + SmallVectorImpl<MCParsedAsmOperand*> &Operands, unsigned RegisterKind) { + + MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier()); + if (Sym) { + SMLoc S = Parser.getTok().getLoc(); + const MCExpr *Expr; + if (Sym->isVariable()) + Expr = Sym->getVariableValue(); + else + return false; + if (Expr->getKind() == MCExpr::SymbolRef) { + const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); + const StringRef DefSymbol = Ref->getSymbol().getName(); + if (DefSymbol.startswith("$")) { + // Lookup for the register with the corresponding name. + int RegNum = matchRegisterName(DefSymbol.substr(1), isMips64()); + if (RegNum > -1) { + Parser.Lex(); + MipsOperand *op = MipsOperand::CreateReg(RegNum, S, + Parser.getTok().getLoc()); + op->setRegKind((MipsOperand::RegisterKind) RegisterKind); + Operands.push_back(op); + return true; + } + } + } else if (Expr->getKind() == MCExpr::Constant) { + Parser.Lex(); + const MCConstantExpr *Const = static_cast<const MCConstantExpr*>(Expr); + MipsOperand *op = MipsOperand::CreateImm(Const, S, + Parser.getTok().getLoc()); + Operands.push_back(op); + return true; + } + } + return false; +} + MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // if the first token is not '$' we have an error + if (getLexer().getKind() == AsmToken::Identifier) { + if (searchSymbolAlias(Operands, MipsOperand::Kind_CPURegs)) + return MatchOperand_Success; + return MatchOperand_NoMatch; + } + // If the first token is not '$' we have an error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; Parser.Lex(); // Eat $ - if(!tryParseRegisterOperand(Operands, false)) { - // set the propper register kind + if (!tryParseRegisterOperand(Operands, false)) { + // Set the proper register kind. MipsOperand* op = static_cast<MipsOperand*>(Operands.back()); op->setRegKind(MipsOperand::Kind_CPURegs); return MatchOperand_Success; @@ -1046,87 +1341,88 @@ MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { if (isMips64()) return MatchOperand_NoMatch; - // if the first token is not '$' we have error + // If the first token is not '$' we have error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; SMLoc S = Parser.getTok().getLoc(); - Parser.Lex(); // Eat $ + Parser.Lex(); // Eat the '$'. - const AsmToken &Tok = Parser.getTok(); // get next token + const AsmToken &Tok = Parser.getTok(); // Get the next token. if (Tok.isNot(AsmToken::Integer)) return MatchOperand_NoMatch; unsigned RegNum = Tok.getIntVal(); - // at the moment only hwreg29 is supported + // At the moment only hwreg29 is supported. if (RegNum != 29) return MatchOperand_ParseFail; MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29, S, - Parser.getTok().getLoc()); + Parser.getTok().getLoc()); op->setRegKind(MipsOperand::Kind_HWRegs); Operands.push_back(op); - Parser.Lex(); // Eat reg number + Parser.Lex(); // Eat the register number. return MatchOperand_Success; } MipsAsmParser::OperandMatchResultTy -MipsAsmParser::parseHW64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { +MipsAsmParser::parseHW64Regs( + SmallVectorImpl<MCParsedAsmOperand*> &Operands) { if (!isMips64()) return MatchOperand_NoMatch; - //if the first token is not '$' we have error + // If the first token is not '$' we have an error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; SMLoc S = Parser.getTok().getLoc(); Parser.Lex(); // Eat $ - const AsmToken &Tok = Parser.getTok(); // get next token + const AsmToken &Tok = Parser.getTok(); // Get the next token. if (Tok.isNot(AsmToken::Integer)) return MatchOperand_NoMatch; unsigned RegNum = Tok.getIntVal(); - // at the moment only hwreg29 is supported + // At the moment only hwreg29 is supported. if (RegNum != 29) return MatchOperand_ParseFail; MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29_64, S, - Parser.getTok().getLoc()); + Parser.getTok().getLoc()); op->setRegKind(MipsOperand::Kind_HW64Regs); Operands.push_back(op); - Parser.Lex(); // Eat reg number + Parser.Lex(); // Eat the register number. return MatchOperand_Success; } MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { unsigned RegNum; - //if the first token is not '$' we have error + // If the first token is not '$' we have an error. if (Parser.getTok().isNot(AsmToken::Dollar)) return MatchOperand_NoMatch; SMLoc S = Parser.getTok().getLoc(); - Parser.Lex(); // Eat $ + Parser.Lex(); // Eat the '$' - const AsmToken &Tok = Parser.getTok(); // get next token + const AsmToken &Tok = Parser.getTok(); // Get next token. if (Tok.is(AsmToken::Integer)) { RegNum = Tok.getIntVal(); - // at the moment only fcc0 is supported + // At the moment only fcc0 is supported. if (RegNum != 0) return MatchOperand_ParseFail; } else if (Tok.is(AsmToken::Identifier)) { - // at the moment only fcc0 is supported + // At the moment only fcc0 is supported. if (Tok.getIdentifier() != "fcc0") return MatchOperand_ParseFail; } else return MatchOperand_NoMatch; MipsOperand *op = MipsOperand::CreateReg(Mips::FCC0, S, - Parser.getTok().getLoc()); + Parser.getTok().getLoc()); op->setRegKind(MipsOperand::Kind_CCRRegs); Operands.push_back(op); - Parser.Lex(); // Eat reg number + Parser.Lex(); // Eat the register number. return MatchOperand_Success; } @@ -1158,23 +1454,23 @@ MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) { static int ConvertCcString(StringRef CondString) { int CC = StringSwitch<unsigned>(CondString) - .Case(".f", 0) - .Case(".un", 1) - .Case(".eq", 2) - .Case(".ueq", 3) - .Case(".olt", 4) - .Case(".ult", 5) - .Case(".ole", 6) - .Case(".ule", 7) - .Case(".sf", 8) - .Case(".ngle", 9) - .Case(".seq", 10) - .Case(".ngl", 11) - .Case(".lt", 12) - .Case(".nge", 13) - .Case(".le", 14) - .Case(".ngt", 15) - .Default(-1); + .Case(".f", 0) + .Case(".un", 1) + .Case(".eq", 2) + .Case(".ueq", 3) + .Case(".olt", 4) + .Case(".ult", 5) + .Case(".ole", 6) + .Case(".ule", 7) + .Case(".sf", 8) + .Case(".ngle", 9) + .Case(".seq", 10) + .Case(".ngl", 11) + .Case(".lt", 12) + .Case(".nge", 13) + .Case(".le", 14) + .Case(".ngt", 15) + .Default(-1); return CC; } @@ -1182,16 +1478,16 @@ static int ConvertCcString(StringRef CondString) { bool MipsAsmParser:: parseMathOperation(StringRef Name, SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands) { - // split the format + // Split the format. size_t Start = Name.find('.'), Next = Name.rfind('.'); StringRef Format1 = Name.slice(Start, Next); - // and add the first format to the operands + // Add the first format to the operands. Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc)); - // now for the second format + // Now for the second format. StringRef Format2 = Name.slice(Next, StringRef::npos); Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc)); - // set the format for the first register + // Set the format for the first register. setFpFormat(Format1); // Read the remaining operands. @@ -1207,11 +1503,10 @@ parseMathOperation(StringRef Name, SMLoc NameLoc, SMLoc Loc = getLexer().getLoc(); Parser.eatToEndOfStatement(); return Error(Loc, "unexpected token in argument list"); - } - Parser.Lex(); // Eat the comma. + Parser.Lex(); // Eat the comma. - //set the format for the first register + // Set the format for the first register setFpFormat(Format2); // Parse and remember the operand. @@ -1228,7 +1523,7 @@ parseMathOperation(StringRef Name, SMLoc NameLoc, return Error(Loc, "unexpected token in argument list"); } - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } @@ -1236,13 +1531,12 @@ bool MipsAsmParser:: ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands) { StringRef Mnemonic; - // floating point instructions: should register be treated as double? + // Floating point instructions: Should the register be treated as a double? if (requestsDoubleOperand(Name)) { setFpFormat(FP_FORMAT_D); - Operands.push_back(MipsOperand::CreateToken(Name, NameLoc)); - Mnemonic = Name; - } - else { + Operands.push_back(MipsOperand::CreateToken(Name, NameLoc)); + Mnemonic = Name; + } else { setDefaultFpFormat(); // Create the leading tokens for the mnemonic, split by '.' characters. size_t Start = 0, Next = Name.find('.'); @@ -1251,30 +1545,30 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc)); if (Next != StringRef::npos) { - // there is a format token in mnemonic - // StringRef Rest = Name.slice(Next, StringRef::npos); - size_t Dot = Name.find('.', Next+1); + // There is a format token in mnemonic. + size_t Dot = Name.find('.', Next + 1); StringRef Format = Name.slice(Next, Dot); - if (Dot == StringRef::npos) //only one '.' in a string, it's a format + if (Dot == StringRef::npos) // Only one '.' in a string, it's a format. Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); else { - if (Name.startswith("c.")){ - // floating point compare, add '.' and immediate represent for cc + if (Name.startswith("c.")) { + // Floating point compare, add '.' and immediate represent for cc. Operands.push_back(MipsOperand::CreateToken(".", NameLoc)); int Cc = ConvertCcString(Format); if (Cc == -1) { return Error(NameLoc, "Invalid conditional code"); } SMLoc E = SMLoc::getFromPointer( - Parser.getTok().getLoc().getPointer() -1 ); - Operands.push_back(MipsOperand::CreateImm( - MCConstantExpr::Create(Cc, getContext()), NameLoc, E)); + Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back( + MipsOperand::CreateImm(MCConstantExpr::Create(Cc, getContext()), + NameLoc, E)); } else { // trunc, ceil, floor ... return parseMathOperation(Name, NameLoc, Operands); } - // the rest is a format + // The rest is a format. Format = Name.slice(Dot, StringRef::npos); Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); } @@ -1292,8 +1586,8 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, return Error(Loc, "unexpected token in argument list"); } - while (getLexer().is(AsmToken::Comma) ) { - Parser.Lex(); // Eat the comma. + while (getLexer().is(AsmToken::Comma)) { + Parser.Lex(); // Eat the comma. // Parse and remember the operand. if (ParseOperand(Operands, Name)) { @@ -1310,48 +1604,47 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, return Error(Loc, "unexpected token in argument list"); } - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } bool MipsAsmParser::reportParseError(StringRef ErrorMsg) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, ErrorMsg); + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + return Error(Loc, ErrorMsg); } bool MipsAsmParser::parseSetNoAtDirective() { - // line should look like: - // .set noat - // set at reg to 0 + // Line should look like: ".set noat". + // set at reg to 0. Options.setATReg(0); // eat noat Parser.Lex(); - // if this is not the end of the statement, report error + // If this is not the end of the statement, report an error. if (getLexer().isNot(AsmToken::EndOfStatement)) { reportParseError("unexpected token in statement"); return false; } - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } + bool MipsAsmParser::parseSetAtDirective() { - // line can be - // .set at - defaults to $1 + // Line can be .set at - defaults to $1 // or .set at=$reg int AtRegNo; getParser().Lex(); if (getLexer().is(AsmToken::EndOfStatement)) { Options.setATReg(1); - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } else if (getLexer().is(AsmToken::Equal)) { - getParser().Lex(); //eat '=' + getParser().Lex(); // Eat the '='. if (getLexer().isNot(AsmToken::Dollar)) { reportParseError("unexpected token in statement"); return false; } - Parser.Lex(); // eat '$' + Parser.Lex(); // Eat the '$'. const AsmToken &Reg = Parser.getTok(); if (Reg.is(AsmToken::Identifier)) { AtRegNo = matchCPURegisterName(Reg.getIdentifier()); @@ -1362,7 +1655,7 @@ bool MipsAsmParser::parseSetAtDirective() { return false; } - if ( AtRegNo < 1 || AtRegNo > 31) { + if (AtRegNo < 1 || AtRegNo > 31) { reportParseError("unexpected token in statement"); return false; } @@ -1371,13 +1664,13 @@ bool MipsAsmParser::parseSetAtDirective() { reportParseError("unexpected token in statement"); return false; } - getParser().Lex(); //eat reg + getParser().Lex(); // Eat the register. if (getLexer().isNot(AsmToken::EndOfStatement)) { reportParseError("unexpected token in statement"); return false; - } - Parser.Lex(); // Consume the EndOfStatement + } + Parser.Lex(); // Consume the EndOfStatement. return false; } else { reportParseError("unexpected token in statement"); @@ -1387,43 +1680,43 @@ bool MipsAsmParser::parseSetAtDirective() { bool MipsAsmParser::parseSetReorderDirective() { Parser.Lex(); - // if this is not the end of the statement, report error + // If this is not the end of the statement, report an error. if (getLexer().isNot(AsmToken::EndOfStatement)) { reportParseError("unexpected token in statement"); return false; } Options.setReorder(); - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } bool MipsAsmParser::parseSetNoReorderDirective() { - Parser.Lex(); - // if this is not the end of the statement, report error - if (getLexer().isNot(AsmToken::EndOfStatement)) { - reportParseError("unexpected token in statement"); - return false; - } - Options.setNoreorder(); - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); + // If this is not the end of the statement, report an error. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); return false; + } + Options.setNoreorder(); + Parser.Lex(); // Consume the EndOfStatement. + return false; } bool MipsAsmParser::parseSetMacroDirective() { Parser.Lex(); - // if this is not the end of the statement, report error + // If this is not the end of the statement, report an error. if (getLexer().isNot(AsmToken::EndOfStatement)) { reportParseError("unexpected token in statement"); return false; } Options.setMacro(); - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } bool MipsAsmParser::parseSetNoMacroDirective() { Parser.Lex(); - // if this is not the end of the statement, report error + // If this is not the end of the statement, report an error. if (getLexer().isNot(AsmToken::EndOfStatement)) { reportParseError("`noreorder' must be set before `nomacro'"); return false; @@ -1433,12 +1726,37 @@ bool MipsAsmParser::parseSetNoMacroDirective() { return false; } Options.setNomacro(); - Parser.Lex(); // Consume the EndOfStatement + Parser.Lex(); // Consume the EndOfStatement. return false; } + +bool MipsAsmParser::parseSetAssignment() { + StringRef Name; + const MCExpr *Value; + + if (Parser.parseIdentifier(Name)) + reportParseError("expected identifier after .set"); + + if (getLexer().isNot(AsmToken::Comma)) + return reportParseError("unexpected token in .set directive"); + Lex(); // Eat comma + + if (Parser.parseExpression(Value)) + reportParseError("expected valid expression after comma"); + + // Check if the Name already exists as a symbol. + MCSymbol *Sym = getContext().LookupSymbol(Name); + if (Sym) + return reportParseError("symbol already defined"); + Sym = getContext().GetOrCreateSymbol(Name); + Sym->setVariableValue(Value); + + return false; +} + bool MipsAsmParser::parseDirectiveSet() { - // get next token + // Get the next token. const AsmToken &Tok = Parser.getTok(); if (Tok.getString() == "noat") { @@ -1454,13 +1772,17 @@ bool MipsAsmParser::parseDirectiveSet() { } else if (Tok.getString() == "nomacro") { return parseSetNoMacroDirective(); } else if (Tok.getString() == "nomips16") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; } else if (Tok.getString() == "nomicromips") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; + } else { + // It is just an identifier, look for an assignment. + parseSetAssignment(); + return false; } return true; @@ -1495,20 +1817,20 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { StringRef IDVal = DirectiveID.getString(); - if ( IDVal == ".ent") { - // ignore this directive for now + if (IDVal == ".ent") { + // Ignore this directive for now. Parser.Lex(); return false; } if (IDVal == ".end") { - // ignore this directive for now + // Ignore this directive for now. Parser.Lex(); return false; } if (IDVal == ".frame") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; } @@ -1518,19 +1840,19 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { } if (IDVal == ".fmask") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; } if (IDVal == ".mask") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; } if (IDVal == ".gpword") { - // ignore this directive for now + // Ignore this directive for now. Parser.eatToEndOfStatement(); return false; } diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt index cf8bb18..78a9f70 100644 --- a/lib/Target/Mips/CMakeLists.txt +++ b/lib/Target/Mips/CMakeLists.txt @@ -32,6 +32,8 @@ add_llvm_target(MipsCodeGen MipsLongBranch.cpp MipsMCInstLower.cpp MipsMachineFunction.cpp + MipsModuleISelDAGToDAG.cpp + MipsOs16.cpp MipsRegisterInfo.cpp MipsSEFrameLowering.cpp MipsSEInstrInfo.cpp diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index 025a783..0dba33a 100644 --- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -138,10 +138,20 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); -static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder); +static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); static DecodeStatus DecodeBranchTarget(MCInst &Inst, unsigned Offset, @@ -484,14 +494,38 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, return MCDisassembler::Success; } -static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst, - unsigned RegNo, - uint64_t Address, - const void *Decoder) { +static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 4) + return MCDisassembler::Fail; + + unsigned Reg = getReg(Decoder, Mips::ACRegsDSPRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 4) + return MCDisassembler::Fail; + + unsigned Reg = getReg(Decoder, Mips::HIRegsDSPRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { if (RegNo >= 4) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::ACRegsRegClassID, RegNo); + unsigned Reg = getReg(Decoder, Mips::LORegsDSPRegClassID, RegNo); Inst.addOperand(MCOperand::CreateReg(Reg)); return MCDisassembler::Success; } diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index 96f93a0..9460731 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -27,6 +27,9 @@ #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Support/raw_ostream.h" +#define GET_INSTRMAP_INFO +#include "MipsGenInstrInfo.inc" + using namespace llvm; namespace { @@ -35,12 +38,13 @@ class MipsMCCodeEmitter : public MCCodeEmitter { void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; MCContext &Ctx; + const MCSubtargetInfo &STI; bool IsLittleEndian; public: MipsMCCodeEmitter(const MCInstrInfo &mcii, MCContext &Ctx_, const MCSubtargetInfo &sti, bool IsLittle) : - MCII(mcii), Ctx(Ctx_), IsLittleEndian(IsLittle) {} + MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {} ~MipsMCCodeEmitter() {} @@ -88,6 +92,9 @@ public: unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; + unsigned + getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const; + }; // class MipsMCCodeEmitter } // namespace @@ -141,6 +148,15 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary) llvm_unreachable("unimplemented opcode in EncodeInstruction()"); + if (STI.getFeatureBits() & Mips::FeatureMicroMips) { + int NewOpcode = Mips::Std2MicroMips (Opcode, Mips::Arch_micromips); + if (NewOpcode != -1) { + Opcode = NewOpcode; + TmpInst.setOpcode (NewOpcode); + Binary = getBinaryCodeForInstr(TmpInst, Fixups); + } + } + const MCInstrDesc &Desc = MCII.get(TmpInst.getOpcode()); // Get byte count of instruction @@ -160,8 +176,9 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, const MCOperand &MO = MI.getOperand(OpNo); - // If the destination is an immediate, we have nothing to do. - if (MO.isImm()) return MO.getImm(); + // If the destination is an immediate, divide by 4. + if (MO.isImm()) return MO.getImm() >> 2; + assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions or immediates"); @@ -179,8 +196,9 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); - // If the destination is an immediate, we have nothing to do. - if (MO.isImm()) return MO.getImm(); + // If the destination is an immediate, divide by 4. + if (MO.isImm()) return MO.getImm()>>2; + assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions or an immediate"); @@ -190,35 +208,24 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, return 0; } -/// getMachineOpValue - Return binary encoding of operand. If the machine -/// operand requires relocation, record the relocation and return zero. unsigned MipsMCCodeEmitter:: -getMachineOpValue(const MCInst &MI, const MCOperand &MO, - SmallVectorImpl<MCFixup> &Fixups) const { - if (MO.isReg()) { - unsigned Reg = MO.getReg(); - unsigned RegNo = Ctx.getRegisterInfo().getEncodingValue(Reg); - return RegNo; - } else if (MO.isImm()) { - return static_cast<unsigned>(MO.getImm()); - } else if (MO.isFPImm()) { - return static_cast<unsigned>(APFloat(MO.getFPImm()) - .bitcastToAPInt().getHiBits(32).getLimitedValue()); - } +getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const { + int64_t Res; - // MO must be an Expr. - assert(MO.isExpr()); + if (Expr->EvaluateAsAbsolute(Res)) + return Res; - const MCExpr *Expr = MO.getExpr(); MCExpr::ExprKind Kind = Expr->getKind(); + if (Kind == MCExpr::Constant) { + return cast<MCConstantExpr>(Expr)->getValue(); + } if (Kind == MCExpr::Binary) { - Expr = static_cast<const MCBinaryExpr*>(Expr)->getLHS(); - Kind = Expr->getKind(); + unsigned Res = getExprOpValue(cast<MCBinaryExpr>(Expr)->getLHS(), Fixups); + Res += getExprOpValue(cast<MCBinaryExpr>(Expr)->getRHS(), Fixups); + return Res; } - - assert (Kind == MCExpr::SymbolRef); - + if (Kind == MCExpr::SymbolRef) { Mips::Fixups FixupKind = Mips::Fixups(0); switch(cast<MCSymbolRefExpr>(Expr)->getKind()) { @@ -298,12 +305,32 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, break; } // switch - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), MCFixupKind(FixupKind))); - - // All of the information is in the fixup. + Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind))); + return 0; + } return 0; } +/// getMachineOpValue - Return binary encoding of operand. If the machine +/// operand requires relocation, record the relocation and return zero. +unsigned MipsMCCodeEmitter:: +getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const { + if (MO.isReg()) { + unsigned Reg = MO.getReg(); + unsigned RegNo = Ctx.getRegisterInfo().getEncodingValue(Reg); + return RegNo; + } else if (MO.isImm()) { + return static_cast<unsigned>(MO.getImm()); + } else if (MO.isFPImm()) { + return static_cast<unsigned>(APFloat(MO.getFPImm()) + .bitcastToAPInt().getHiBits(32).getLimitedValue()); + } + // MO must be an Expr. + assert(MO.isExpr()); + return getExprOpValue(MO.getExpr(),Fixups); +} + /// getMemEncoding - Return binary encoding of memory related operand. /// If the offset operand requires relocation, record the relocation. unsigned diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td new file mode 100644 index 0000000..665b4d2 --- /dev/null +++ b/lib/Target/Mips/MicroMipsInstrFormats.td @@ -0,0 +1,112 @@ +class MMArch { + string Arch = "micromips"; + list<dag> Pattern = []; +} + +class ADD_FM_MM<bits<6> op, bits<10> funct> : MMArch { + bits<5> rt; + bits<5> rs; + bits<5> rd; + + bits<32> Inst; + + let Inst{31-26} = op; + let Inst{25-21} = rt; + let Inst{20-16} = rs; + let Inst{15-11} = rd; + let Inst{10} = 0; + let Inst{9-0} = funct; +} + +class ADDI_FM_MM<bits<6> op> : MMArch { + bits<5> rs; + bits<5> rt; + bits<16> imm16; + + bits<32> Inst; + + let Inst{31-26} = op; + let Inst{25-21} = rt; + let Inst{20-16} = rs; + let Inst{15-0} = imm16; +} + +class SLTI_FM_MM<bits<6> op> : MMArch { + bits<5> rt; + bits<5> rs; + bits<16> imm16; + + bits<32> Inst; + + let Inst{31-26} = op; + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-0} = imm16; +} + +class LUI_FM_MM : MMArch { + bits<5> rt; + bits<16> imm16; + + bits<32> Inst; + + let Inst{31-26} = 0x10; + let Inst{25-21} = 0xd; + let Inst{20-16} = rt; + let Inst{15-0} = imm16; +} + +class MULT_FM_MM<bits<10> funct> : MMArch { + bits<5> rs; + bits<5> rt; + + bits<32> Inst; + + let Inst{31-26} = 0x00; + let Inst{25-21} = rt; + let Inst{20-16} = rs; + let Inst{15-6} = funct; + let Inst{5-0} = 0x3c; +} + +class SRA_FM_MM<bits<10> funct, bit rotate> : MMArch { + bits<5> rd; + bits<5> rt; + bits<5> shamt; + + bits<32> Inst; + + let Inst{31-26} = 0; + let Inst{25-21} = rd; + let Inst{20-16} = rt; + let Inst{15-11} = shamt; + let Inst{10} = rotate; + let Inst{9-0} = funct; +} + +class SRLV_FM_MM<bits<10> funct, bit rotate> : MMArch { + bits<5> rd; + bits<5> rt; + bits<5> rs; + + bits<32> Inst; + + let Inst{31-26} = 0; + let Inst{25-21} = rt; + let Inst{20-16} = rs; + let Inst{15-11} = rd; + let Inst{10} = rotate; + let Inst{9-0} = funct; +} + +class LW_FM_MM<bits<6> op> : MMArch { + bits<5> rt; + bits<21> addr; + + bits<32> Inst; + + let Inst{31-26} = op; + let Inst{25-21} = rt; + let Inst{20-16} = addr{20-16}; + let Inst{15-0} = addr{15-0}; +} diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td new file mode 100644 index 0000000..74cdccd --- /dev/null +++ b/lib/Target/Mips/MicroMipsInstrInfo.td @@ -0,0 +1,67 @@ +let isCodeGenOnly = 1 in { + /// Arithmetic Instructions (ALU Immediate) + def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>, + ADDI_FM_MM<0xc>; + def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, + ADDI_FM_MM<0x4>; + def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>, + SLTI_FM_MM<0x24>; + def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>, + SLTI_FM_MM<0x2c>; + def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>, + ADDI_FM_MM<0x34>; + def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>, + ADDI_FM_MM<0x14>; + def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>, + ADDI_FM_MM<0x1c>; + def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM; + + /// Arithmetic Instructions (3-Operand, R-Type) + def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>; + def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>; + def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>; + def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>; + def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>; + def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>; + def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>, + ADD_FM_MM<0, 0x390>; + def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>, + ADD_FM_MM<0, 0x250>; + def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>, + ADD_FM_MM<0, 0x290>; + def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>, + ADD_FM_MM<0, 0x310>; + def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>; + def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>, + MULT_FM_MM<0x22c>; + def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>, + MULT_FM_MM<0x26c>; + + /// Shift Instructions + def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>, + SRA_FM_MM<0, 0>; + def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>, + SRA_FM_MM<0x40, 0>; + def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>, + SRA_FM_MM<0x80, 0>; + def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>, + SRLV_FM_MM<0x10, 0>; + def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>, + SRLV_FM_MM<0x50, 0>; + def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>, + SRLV_FM_MM<0x90, 0>; + def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>, + SRA_FM_MM<0xc0, 0>; + def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>, + SRLV_FM_MM<0xd0, 0>; + + /// Load and Store Instructions - aligned + defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>; + defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>; + defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>; + defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>; + defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>; + defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>; + defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>; + defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>; +} diff --git a/lib/Target/Mips/Mips16FrameLowering.h b/lib/Target/Mips/Mips16FrameLowering.h index 25f4ffb..54fdb78 100644 --- a/lib/Target/Mips/Mips16FrameLowering.h +++ b/lib/Target/Mips/Mips16FrameLowering.h @@ -20,7 +20,7 @@ namespace llvm { class Mips16FrameLowering : public MipsFrameLowering { public: explicit Mips16FrameLowering(const MipsSubtarget &STI) - : MipsFrameLowering(STI) {} + : MipsFrameLowering(STI, 8) {} /// emitProlog/emitEpilog - These methods insert prolog and epilog code into /// the function. diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp index 00b3449..c1c635c 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -35,6 +35,11 @@ #include "llvm/Target/TargetMachine.h" using namespace llvm; +bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + if (!Subtarget.inMips16Mode()) + return false; + return MipsDAGToDAGISel::runOnMachineFunction(MF); +} /// Select multiply instructions. std::pair<SDNode*, SDNode*> Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, EVT Ty, @@ -267,7 +272,7 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) { EVT VT = LHS.getValueType(); unsigned Sltu_op = Mips::SltuRxRyRz16; - SDNode *Carry = CurDAG->getMachineNode(Sltu_op, DL, VT, Ops, 2); + SDNode *Carry = CurDAG->getMachineNode(Sltu_op, DL, VT, Ops); unsigned Addu_op = Mips::AdduRxRyRz16; SDNode *AddCarry = CurDAG->getMachineNode(Addu_op, DL, VT, SDValue(Carry,0), RHS); diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h index baa8587..f05f9b7 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.h +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h @@ -28,6 +28,8 @@ private: SDValue getMips16SPAliasReg(); + virtual bool runOnMachineFunction(MachineFunction &MF); + void getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg); virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base, diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index 23eb537..f63318f 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -53,7 +53,6 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM) if (Mips16HardFloat) setMips16HardFloatLibCalls(); - setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); @@ -614,7 +613,8 @@ MachineBasicBlock unsigned regX = MI->getOperand(0).getReg(); unsigned regY = MI->getOperand(1).getReg(); MachineBasicBlock *target = MI->getOperand(2).getMBB(); - BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX).addReg(regY); + BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX) + .addReg(regY); BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(BtOpc)).addMBB(target); MI->eraseFromParent(); // The pseudo instruction is gone now. return BB; @@ -636,7 +636,8 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins( CmpOpc = CmpiXOpc; else llvm_unreachable("immediate field not usable"); - BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX).addImm(imm); + BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX) + .addImm(imm); BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(BtOpc)).addMBB(target); MI->eraseFromParent(); // The pseudo instruction is gone now. return BB; diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp index fd3cc8f..17dd2c0 100644 --- a/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/lib/Target/Mips/Mips16InstrInfo.cpp @@ -98,10 +98,10 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, } void Mips16InstrInfo:: -storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned SrcReg, bool isKill, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { +storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, int FI, + const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, + int64_t Offset) const { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore); @@ -110,14 +110,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = Mips::SwRxSpImmX16; assert(Opc && "Register class not handled!"); BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO); + .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); } void Mips16InstrInfo:: -loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { +loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DestReg, int FI, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, int64_t Offset) const { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); @@ -126,7 +125,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, if (Mips::CPU16RegsRegClass.hasSubClassEq(RC)) Opc = Mips::LwRxSpImmX16; assert(Opc && "Register class not handled!"); - BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0) + BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset) .addMemOperand(MMO); } diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h index 1cb1dfe..a77a904 100644 --- a/lib/Target/Mips/Mips16InstrInfo.h +++ b/lib/Target/Mips/Mips16InstrInfo.h @@ -48,17 +48,19 @@ public: unsigned DestReg, unsigned SrcReg, bool KillSrc) const; - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; + virtual void storeRegToStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const; + + virtual void loadRegFromStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const; virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const; diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td index 6293829..aa51aaf 100644 --- a/lib/Target/Mips/Mips16InstrInfo.td +++ b/lib/Target/Mips/Mips16InstrInfo.td @@ -1466,14 +1466,14 @@ def: Mips16Pat<(i32 immZExt16:$in), (LiRxImmX16 immZExt16:$in)>; // MipsDivRem // def: Mips16Pat - <(MipsDivRem CPU16Regs:$rx, CPU16Regs:$ry), + <(MipsDivRem16 CPU16Regs:$rx, CPU16Regs:$ry), (DivRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>; // // MipsDivRemU // def: Mips16Pat - <(MipsDivRemU CPU16Regs:$rx, CPU16Regs:$ry), + <(MipsDivRemU16 CPU16Regs:$rx, CPU16Regs:$ry), (DivuRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>; // signed a,b diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp index 0ea9368..7ad18f2 100644 --- a/lib/Target/Mips/Mips16RegisterInfo.cpp +++ b/lib/Target/Mips/Mips16RegisterInfo.cpp @@ -1,5 +1,4 @@ - -//===-- Mips16RegisterInfo.cpp - MIPS16 Register Information -== ----------===// +//===-- Mips16RegisterInfo.cpp - MIPS16 Register Information --------------===// // // The LLVM Compiler Infrastructure // @@ -72,6 +71,12 @@ bool Mips16RegisterInfo::saveScavengerRegister return true; } +const TargetRegisterClass * +Mips16RegisterInfo::intRegClass(unsigned Size) const { + assert(Size == 4); + return &Mips::CPU16RegsRegClass; +} + void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/lib/Target/Mips/Mips16RegisterInfo.h b/lib/Target/Mips/Mips16RegisterInfo.h index b8f818a..2b3d2b1 100644 --- a/lib/Target/Mips/Mips16RegisterInfo.h +++ b/lib/Target/Mips/Mips16RegisterInfo.h @@ -37,6 +37,8 @@ public: const TargetRegisterClass *RC, unsigned Reg) const; + virtual const TargetRegisterClass *intRegClass(unsigned Size) const; + private: virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td index 5903b9e..fc533fb 100644 --- a/lib/Target/Mips/Mips64InstrInfo.td +++ b/lib/Target/Mips/Mips64InstrInfo.td @@ -66,6 +66,12 @@ let usesCustomInserter = 1, Predicates = [HasStdEnc], defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64>; } +/// Pseudo instructions for loading and storing accumulator registers. +let isPseudo = 1 in { + defm LOAD_AC128 : LoadM<"load_ac128", ACRegs128>; + defm STORE_AC128 : StoreM<"store_ac128", ACRegs128>; +} + //===----------------------------------------------------------------------===// // Instruction definition //===----------------------------------------------------------------------===// @@ -179,10 +185,16 @@ def DMULT : Mult<"dmult", IIImul, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1c>; def DMULTu : Mult<"dmultu", IIImul, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1d>; -def DSDIV : Div<MipsDivRem, "ddiv", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, - MULT_FM<0, 0x1e>; -def DUDIV : Div<MipsDivRemU, "ddivu", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, - MULT_FM<0, 0x1f>; +def PseudoDMULT : MultDivPseudo<DMULT, ACRegs128, CPU64RegsOpnd, MipsMult, + IIImul>; +def PseudoDMULTu : MultDivPseudo<DMULTu, ACRegs128, CPU64RegsOpnd, MipsMultu, + IIImul>; +def DSDIV : Div<"ddiv", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1e>; +def DUDIV : Div<"ddivu", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1f>; +def PseudoDSDIV : MultDivPseudo<DSDIV, ACRegs128, CPU64RegsOpnd, MipsDivRem, + IIIdiv, 0>; +def PseudoDUDIV : MultDivPseudo<DUDIV, ACRegs128, CPU64RegsOpnd, MipsDivRemU, + IIIdiv, 0>; def MTHI64 : MoveToLOHI<"mthi", CPU64Regs, [HI64]>, MTLO_FM<0x11>; def MTLO64 : MoveToLOHI<"mtlo", CPU64Regs, [LO64]>, MTLO_FM<0x13>; @@ -306,6 +318,10 @@ def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)), // bswap MipsPattern def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>; +// mflo/hi patterns. +def : MipsPat<(i64 (ExtractLOHI ACRegs128:$ac, imm:$lohi_idx)), + (EXTRACT_SUBREG ACRegs128:$ac, imm:$lohi_idx)>; + //===----------------------------------------------------------------------===// // Instruction aliases //===----------------------------------------------------------------------===// @@ -332,13 +348,19 @@ def : InstAlias<"not $rt, $rs", def : InstAlias<"j $rs", (JR64 CPU64Regs:$rs), 0>, Requires<[HasMips64]>; def : InstAlias<"jalr $rs", (JALR64 RA_64, CPU64Regs:$rs)>, Requires<[HasMips64]>; +def : InstAlias<"jal $rs", (JALR64 RA_64, CPU64Regs:$rs), 0>, + Requires<[HasMips64]>; +def : InstAlias<"jal $rd,$rs", (JALR64 CPU64Regs:$rd, CPU64Regs:$rs), 0>, + Requires<[HasMips64]>; def : InstAlias<"daddu $rs, $rt, $imm", (DADDiu CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm), 1>; def : InstAlias<"dadd $rs, $rt, $imm", (DADDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm), 1>; - +def : InstAlias<"or $rs, $rt, $imm", + (ORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm), + 1>, Requires<[HasMips64]>; /// Move between CPU and coprocessor registers let DecoderNamespace = "Mips64" in { diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 1876cb6..f4f71cb 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -46,6 +46,10 @@ using namespace llvm; bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + // Initialize TargetLoweringObjectFile. + if (Subtarget->allowMixed16_32()) + const_cast<TargetLoweringObjectFile&>(getObjFileLowering()) + .Initialize(OutContext, TM); MipsFI = MF.getInfo<MipsFunctionInfo>(); AsmPrinter::runOnMachineFunction(MF); return true; @@ -419,12 +423,18 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, unsigned AsmVariant, const char *ExtraCode, raw_ostream &O) { - if (ExtraCode && ExtraCode[0]) - return true; // Unknown modifier. + int Offset = 0; + // Currently we are expecting either no ExtraCode or 'D' + if (ExtraCode) { + if (ExtraCode[0] == 'D') + Offset = 4; + else + return true; // Unknown modifier. + } const MachineOperand &MO = MI->getOperand(OpNum); assert(MO.isReg() && "unexpected inline asm memory operand"); - O << "0($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")"; + O << Offset << "($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")"; return false; } diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp index df877b6..3fc402b 100644 --- a/lib/Target/Mips/MipsCodeEmitter.cpp +++ b/lib/Target/Mips/MipsCodeEmitter.cpp @@ -115,6 +115,10 @@ private: void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc, int Offset) const; + /// Expand pseudo instructions with accumulator register operands. + void expandACCInstr(MachineBasicBlock::instr_iterator MI, + MachineBasicBlock &MBB, unsigned Opc) const; + /// \brief Expand pseudo instruction. Return true if MI was expanded. bool expandPseudos(MachineBasicBlock::instr_iterator &MI, MachineBasicBlock &MBB) const; @@ -298,6 +302,14 @@ void MipsCodeEmitter::emitWord(unsigned Word) { MCE.emitWordBE(Word); } +void MipsCodeEmitter::expandACCInstr(MachineBasicBlock::instr_iterator MI, + MachineBasicBlock &MBB, + unsigned Opc) const { + // Expand "pseudomult $ac0, $t0, $t1" to "mult $t0, $t1". + BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Opc)) + .addReg(MI->getOperand(1).getReg()).addReg(MI->getOperand(2).getReg()); +} + bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI, MachineBasicBlock &MBB) const { switch (MI->getOpcode()) { @@ -309,6 +321,30 @@ bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI, BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::JALR), Mips::RA) .addReg(MI->getOperand(0).getReg()); break; + case Mips::PseudoMULT: + expandACCInstr(MI, MBB, Mips::MULT); + break; + case Mips::PseudoMULTu: + expandACCInstr(MI, MBB, Mips::MULTu); + break; + case Mips::PseudoSDIV: + expandACCInstr(MI, MBB, Mips::SDIV); + break; + case Mips::PseudoUDIV: + expandACCInstr(MI, MBB, Mips::UDIV); + break; + case Mips::PseudoMADD: + expandACCInstr(MI, MBB, Mips::MADD); + break; + case Mips::PseudoMADDU: + expandACCInstr(MI, MBB, Mips::MADDU); + break; + case Mips::PseudoMSUB: + expandACCInstr(MI, MBB, Mips::MSUB); + break; + case Mips::PseudoMSUBU: + expandACCInstr(MI, MBB, Mips::MSUBU); + break; default: return false; } diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index b5de1eb..1951324 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -80,6 +80,10 @@ FunctionPass *llvm::createMipsConstantIslandPass(MipsTargetMachine &tm) { } bool MipsConstantIslands::runOnMachineFunction(MachineFunction &F) { - return true; + // The intention is for this to be a mips16 only pass for now + // FIXME: + // if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode()) + // return false; + return false; } diff --git a/lib/Target/Mips/MipsDSPInstrFormats.td b/lib/Target/Mips/MipsDSPInstrFormats.td index a72a763..cf09113 100644 --- a/lib/Target/Mips/MipsDSPInstrFormats.td +++ b/lib/Target/Mips/MipsDSPInstrFormats.td @@ -219,6 +219,33 @@ class MULT_FMT<bits<6> opcode, bits<6> funct> : DSPInst { let Inst{5-0} = funct; } +// MFHI sub-class format. +class MFHI_FMT<bits<6> funct> : DSPInst { + bits<5> rd; + bits<2> ac; + + let Inst{31-26} = 0; + let Inst{25-23} = 0; + let Inst{22-21} = ac; + let Inst{20-16} = 0; + let Inst{15-11} = rd; + let Inst{10-6} = 0; + let Inst{5-0} = funct; +} + +// MTHI sub-class format. +class MTHI_FMT<bits<6> funct> : DSPInst { + bits<5> rs; + bits<2> ac; + + let Inst{31-26} = 0; + let Inst{25-21} = rs; + let Inst{20-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = 0; + let Inst{5-0} = funct; +} + // EXTR.W sub-class format (type 1). class EXTR_W_TY1_FMT<bits<5> op> : DSPInst { bits<5> rt; diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td index 9531b91..710b40d 100644 --- a/lib/Target/Mips/MipsDSPInstrInfo.td +++ b/lib/Target/Mips/MipsDSPInstrInfo.td @@ -20,17 +20,20 @@ def immZExt10 : ImmLeaf<i32, [{return isUInt<10>(Imm);}]>; def immSExt6 : ImmLeaf<i32, [{return isInt<6>(Imm);}]>; // Mips-specific dsp nodes -def SDT_MipsExtr : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>; -def SDT_MipsShilo : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; -def SDT_MipsDPA : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>; +def SDT_MipsExtr : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, + SDTCisVT<2, untyped>]>; +def SDT_MipsShilo : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, + SDTCisSameAs<0, 2>, SDTCisVT<1, i32>]>; +def SDT_MipsDPA : SDTypeProfile<1, 3, [SDTCisVT<0, untyped>, SDTCisSameAs<0, 3>, + SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; +def SDT_MipsSHIFT_DSP : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>, + SDTCisVT<2, i32>]>; class MipsDSPBase<string Opc, SDTypeProfile Prof> : - SDNode<!strconcat("MipsISD::", Opc), Prof, - [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>; + SDNode<!strconcat("MipsISD::", Opc), Prof>; class MipsDSPSideEffectBase<string Opc, SDTypeProfile Prof> : - SDNode<!strconcat("MipsISD::", Opc), Prof, - [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPSideEffect]>; + SDNode<!strconcat("MipsISD::", Opc), Prof, [SDNPHasChain, SDNPSideEffect]>; def MipsEXTP : MipsDSPSideEffectBase<"EXTP", SDT_MipsExtr>; def MipsEXTPDP : MipsDSPSideEffectBase<"EXTPDP", SDT_MipsExtr>; @@ -40,7 +43,7 @@ def MipsEXTR_R_W : MipsDSPSideEffectBase<"EXTR_R_W", SDT_MipsExtr>; def MipsEXTR_RS_W : MipsDSPSideEffectBase<"EXTR_RS_W", SDT_MipsExtr>; def MipsSHILO : MipsDSPBase<"SHILO", SDT_MipsShilo>; -def MipsMTHLIP : MipsDSPBase<"MTHLIP", SDT_MipsShilo>; +def MipsMTHLIP : MipsDSPSideEffectBase<"MTHLIP", SDT_MipsShilo>; def MipsMULSAQ_S_W_PH : MipsDSPSideEffectBase<"MULSAQ_S_W_PH", SDT_MipsDPA>; def MipsMAQ_S_W_PHL : MipsDSPSideEffectBase<"MAQ_S_W_PHL", SDT_MipsDPA>; @@ -73,6 +76,11 @@ def MipsMADD_DSP : MipsDSPBase<"MADD_DSP", SDT_MipsDPA>; def MipsMADDU_DSP : MipsDSPBase<"MADDU_DSP", SDT_MipsDPA>; def MipsMSUB_DSP : MipsDSPBase<"MSUB_DSP", SDT_MipsDPA>; def MipsMSUBU_DSP : MipsDSPBase<"MSUBU_DSP", SDT_MipsDPA>; +def MipsSHLL_DSP : MipsDSPBase<"SHLL_DSP", SDT_MipsSHIFT_DSP>; +def MipsSHRA_DSP : MipsDSPBase<"SHRA_DSP", SDT_MipsSHIFT_DSP>; +def MipsSHRL_DSP : MipsDSPBase<"SHRL_DSP", SDT_MipsSHIFT_DSP>; +def MipsSETCC_DSP : MipsDSPBase<"SETCC_DSP", SDTSetCC>; +def MipsSELECT_CC_DSP : MipsDSPBase<"SELECT_CC_DSP", SDTSelectCC>; // Flags. class UseAC { @@ -144,6 +152,10 @@ class MAQ_S_W_PHL_ENC : DPA_W_PH_FMT<0b10100>; class MAQ_S_W_PHR_ENC : DPA_W_PH_FMT<0b10110>; class MAQ_SA_W_PHL_ENC : DPA_W_PH_FMT<0b10000>; class MAQ_SA_W_PHR_ENC : DPA_W_PH_FMT<0b10010>; +class MFHI_ENC : MFHI_FMT<0b010000>; +class MFLO_ENC : MFHI_FMT<0b010010>; +class MTHI_ENC : MTHI_FMT<0b010001>; +class MTLO_ENC : MTHI_FMT<0b010011>; class DPAU_H_QBL_ENC : DPA_W_PH_FMT<0b00011>; class DPAU_H_QBR_ENC : DPA_W_PH_FMT<0b00111>; class DPSU_H_QBL_ENC : DPA_W_PH_FMT<0b01011>; @@ -343,6 +355,7 @@ class SHLL_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode, list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, ImmPat:$rs_sa))]; InstrItinClass Itinerary = itin; list<Register> Defs = [DSPCtrl]; + bit hasSideEffects = 1; } class LX_DESC_BASE<string instr_asm, SDPatternOperator OpNode, @@ -383,7 +396,7 @@ class APPEND_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { dag OutOperandList = (outs CPURegs:$rt); - dag InOperandList = (ins ACRegs:$ac, CPURegs:$shift_rs); + dag InOperandList = (ins ACRegsDSP:$ac, CPURegs:$shift_rs); string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); InstrItinClass Itinerary = itin; list<Register> Defs = [DSPCtrl]; @@ -392,46 +405,40 @@ class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode, class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode, InstrItinClass itin> { dag OutOperandList = (outs CPURegs:$rt); - dag InOperandList = (ins ACRegs:$ac, uimm16:$shift_rs); + dag InOperandList = (ins ACRegsDSP:$ac, uimm16:$shift_rs); string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); InstrItinClass Itinerary = itin; list<Register> Defs = [DSPCtrl]; } -class SHILO_R1_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin, - Instruction realinst> : - PseudoDSP<(outs), (ins simm16:$shift), [(OpNode immSExt6:$shift)]>, - PseudoInstExpansion<(realinst AC0, simm16:$shift)> { - list<Register> Defs = [DSPCtrl, AC0]; - list<Register> Uses = [AC0]; - InstrItinClass Itinerary = itin; -} - -class SHILO_R1_DESC_BASE<string instr_asm> { - dag OutOperandList = (outs ACRegs:$ac); - dag InOperandList = (ins simm16:$shift); +class SHILO_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { + dag OutOperandList = (outs ACRegsDSP:$ac); + dag InOperandList = (ins simm16:$shift, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$ac, $shift"); + list<dag> Pattern = [(set ACRegsDSP:$ac, + (OpNode immSExt6:$shift, ACRegsDSP:$acin))]; + list<Register> Defs = [DSPCtrl]; + string Constraints = "$acin = $ac"; } -class SHILO_R2_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin, - Instruction realinst> : - PseudoDSP<(outs), (ins CPURegs:$rs), [(OpNode CPURegs:$rs)]>, - PseudoInstExpansion<(realinst AC0, CPURegs:$rs)> { - list<Register> Defs = [DSPCtrl, AC0]; - list<Register> Uses = [AC0]; - InstrItinClass Itinerary = itin; -} - -class SHILO_R2_DESC_BASE<string instr_asm> { - dag OutOperandList = (outs ACRegs:$ac); - dag InOperandList = (ins CPURegs:$rs); +class SHILO_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { + dag OutOperandList = (outs ACRegsDSP:$ac); + dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$ac, $rs"); + list<dag> Pattern = [(set ACRegsDSP:$ac, + (OpNode CPURegs:$rs, ACRegsDSP:$acin))]; + list<Register> Defs = [DSPCtrl]; + string Constraints = "$acin = $ac"; } -class MTHLIP_DESC_BASE<string instr_asm> { - dag OutOperandList = (outs ACRegs:$ac); - dag InOperandList = (ins CPURegs:$rs); +class MTHLIP_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { + dag OutOperandList = (outs ACRegsDSP:$ac); + dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin); string AsmString = !strconcat(instr_asm, "\t$rs, $ac"); + list<dag> Pattern = [(set ACRegsDSP:$ac, + (OpNode CPURegs:$rs, ACRegsDSP:$acin))]; + list<Register> Uses = [DSPCtrl]; + string Constraints = "$acin = $ac"; } class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode, @@ -454,35 +461,51 @@ class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode, list<Register> Defs = [DSPCtrl]; } -class DPA_W_PH_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin, - Instruction realinst> : - PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt), - [(OpNode CPURegs:$rs, CPURegs:$rt)]>, - PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> { - list<Register> Defs = [DSPCtrl, AC0]; - list<Register> Uses = [AC0]; - InstrItinClass Itinerary = itin; +class DPA_W_PH_DESC_BASE<string instr_asm, SDPatternOperator OpNode> { + dag OutOperandList = (outs ACRegsDSP:$ac); + dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin); + string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); + list<dag> Pattern = [(set ACRegsDSP:$ac, + (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))]; + list<Register> Defs = [DSPCtrl]; + string Constraints = "$acin = $ac"; } -class DPA_W_PH_DESC_BASE<string instr_asm> { - dag OutOperandList = (outs ACRegs:$ac); +class MULT_DESC_BASE<string instr_asm, SDPatternOperator OpNode, + InstrItinClass itin> { + dag OutOperandList = (outs ACRegsDSP:$ac); dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt); string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); + list<dag> Pattern = [(set ACRegsDSP:$ac, (OpNode CPURegs:$rs, CPURegs:$rt))]; + InstrItinClass Itinerary = itin; + int AddedComplexity = 20; + bit isCommutable = 1; } -class MULT_PSEUDO_BASE<SDPatternOperator OpNode, InstrItinClass itin, - Instruction realinst> : - PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt), - [(OpNode CPURegs:$rs, CPURegs:$rt)]>, - PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> { - list<Register> Defs = [DSPCtrl, AC0]; +class MADD_DESC_BASE<string instr_asm, SDPatternOperator OpNode, + InstrItinClass itin> { + dag OutOperandList = (outs ACRegsDSP:$ac); + dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin); + string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); + list<dag> Pattern = [(set ACRegsDSP:$ac, + (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))]; InstrItinClass Itinerary = itin; + int AddedComplexity = 20; + string Constraints = "$acin = $ac"; } -class MULT_DESC_BASE<string instr_asm> { - dag OutOperandList = (outs ACRegs:$ac); - dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt); - string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); +class MFHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> { + dag OutOperandList = (outs CPURegs:$rd); + dag InOperandList = (ins RC:$ac); + string AsmString = !strconcat(instr_asm, "\t$rd, $ac"); + InstrItinClass Itinerary = itin; +} + +class MTHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> { + dag OutOperandList = (outs RC:$ac); + dag InOperandList = (ins CPURegs:$rs); + string AsmString = !strconcat(instr_asm, "\t$rs, $ac"); + InstrItinClass Itinerary = itin; } class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> : @@ -518,27 +541,27 @@ class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode, //===----------------------------------------------------------------------===// // Addition/subtraction -class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", int_mips_addu_qb, NoItinerary, +class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", null_frag, NoItinerary, DSPRegs, DSPRegs>, IsCommutable; class ADDU_S_QB_DESC : ADDU_QB_DESC_BASE<"addu_s.qb", int_mips_addu_s_qb, NoItinerary, DSPRegs, DSPRegs>, IsCommutable; -class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", int_mips_subu_qb, NoItinerary, +class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", null_frag, NoItinerary, DSPRegs, DSPRegs>; class SUBU_S_QB_DESC : ADDU_QB_DESC_BASE<"subu_s.qb", int_mips_subu_s_qb, NoItinerary, DSPRegs, DSPRegs>; -class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", int_mips_addq_ph, NoItinerary, +class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", null_frag, NoItinerary, DSPRegs, DSPRegs>, IsCommutable; class ADDQ_S_PH_DESC : ADDU_QB_DESC_BASE<"addq_s.ph", int_mips_addq_s_ph, NoItinerary, DSPRegs, DSPRegs>, IsCommutable; -class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", int_mips_subq_ph, NoItinerary, +class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", null_frag, NoItinerary, DSPRegs, DSPRegs>; class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph, @@ -551,10 +574,10 @@ class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w, class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w, NoItinerary, CPURegs, CPURegs>; -class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", int_mips_addsc, NoItinerary, +class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", null_frag, NoItinerary, CPURegs, CPURegs>, IsCommutable; -class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", int_mips_addwc, NoItinerary, +class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", null_frag, NoItinerary, CPURegs, CPURegs>, IsCommutable, UseDSPCtrl; @@ -644,19 +667,19 @@ class PRECEU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbra", ClearDefs; // Shift -class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", int_mips_shll_qb, immZExt3, +class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", null_frag, immZExt3, NoItinerary, DSPRegs>; class SHLLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shllv.qb", int_mips_shll_qb, NoItinerary, DSPRegs>; -class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", int_mips_shrl_qb, immZExt3, +class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", null_frag, immZExt3, NoItinerary, DSPRegs>, ClearDefs; class SHRLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.qb", int_mips_shrl_qb, NoItinerary, DSPRegs>, ClearDefs; -class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", int_mips_shll_ph, immZExt4, +class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", null_frag, immZExt4, NoItinerary, DSPRegs>; class SHLLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv.ph", int_mips_shll_ph, @@ -668,7 +691,7 @@ class SHLL_S_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.ph", int_mips_shll_s_ph, class SHLLV_S_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.ph", int_mips_shll_s_ph, NoItinerary, DSPRegs>; -class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", int_mips_shra_ph, immZExt4, +class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", null_frag, immZExt4, NoItinerary, DSPRegs>, ClearDefs; class SHRAV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav.ph", int_mips_shra_ph, @@ -717,44 +740,46 @@ class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph, NoItinerary, DSPRegs, DSPRegs>, IsCommutable; -class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph">; +class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph", + MipsMULSAQ_S_W_PH>; -class MAQ_S_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phl">; +class MAQ_S_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phl", MipsMAQ_S_W_PHL>; -class MAQ_S_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phr">; +class MAQ_S_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phr", MipsMAQ_S_W_PHR>; -class MAQ_SA_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phl">; +class MAQ_SA_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phl", MipsMAQ_SA_W_PHL>; -class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr">; - -// Dot product with accumulate/subtract -class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl">; +class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr", MipsMAQ_SA_W_PHR>; -class DPAU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbr">; +// Move from/to hi/lo. +class MFHI_DESC : MFHI_DESC_BASE<"mfhi", HIRegsDSP, NoItinerary>; +class MFLO_DESC : MFHI_DESC_BASE<"mflo", LORegsDSP, NoItinerary>; +class MTHI_DESC : MTHI_DESC_BASE<"mthi", HIRegsDSP, NoItinerary>; +class MTLO_DESC : MTHI_DESC_BASE<"mtlo", LORegsDSP, NoItinerary>; -class DPSU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbl">; - -class DPSU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbr">; - -class DPAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaq_s.w.ph">; - -class DPSQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsq_s.w.ph">; +// Dot product with accumulate/subtract +class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl", MipsDPAU_H_QBL>; -class DPAQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpaq_sa.l.w">; +class DPAU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbr", MipsDPAU_H_QBR>; -class DPSQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpsq_sa.l.w">; +class DPSU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbl", MipsDPSU_H_QBL>; -class MULT_DSP_DESC : MULT_DESC_BASE<"mult">; +class DPSU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbr", MipsDPSU_H_QBR>; -class MULTU_DSP_DESC : MULT_DESC_BASE<"multu">; +class DPAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaq_s.w.ph", MipsDPAQ_S_W_PH>; -class MADD_DSP_DESC : MULT_DESC_BASE<"madd">; +class DPSQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsq_s.w.ph", MipsDPSQ_S_W_PH>; -class MADDU_DSP_DESC : MULT_DESC_BASE<"maddu">; +class DPAQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpaq_sa.l.w", MipsDPAQ_SA_L_W>; -class MSUB_DSP_DESC : MULT_DESC_BASE<"msub">; +class DPSQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpsq_sa.l.w", MipsDPSQ_SA_L_W>; -class MSUBU_DSP_DESC : MULT_DESC_BASE<"msubu">; +class MULT_DSP_DESC : MULT_DESC_BASE<"mult", MipsMult, NoItinerary>; +class MULTU_DSP_DESC : MULT_DESC_BASE<"multu", MipsMultu, NoItinerary>; +class MADD_DSP_DESC : MADD_DESC_BASE<"madd", MipsMAdd, NoItinerary>; +class MADDU_DSP_DESC : MADD_DESC_BASE<"maddu", MipsMAddu, NoItinerary>; +class MSUB_DSP_DESC : MADD_DESC_BASE<"msub", MipsMSub, NoItinerary>; +class MSUBU_DSP_DESC : MADD_DESC_BASE<"msubu", MipsMSubu, NoItinerary>; // Comparison class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb", @@ -763,11 +788,11 @@ class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb", class CMPU_LT_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.lt.qb", int_mips_cmpu_lt_qb, NoItinerary, - DSPRegs>, IsCommutable; + DSPRegs>; class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb", int_mips_cmpu_le_qb, NoItinerary, - DSPRegs>, IsCommutable; + DSPRegs>; class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb", int_mips_cmpgu_eq_qb, @@ -776,25 +801,21 @@ class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb", class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb", int_mips_cmpgu_lt_qb, - NoItinerary, CPURegs, DSPRegs>, - IsCommutable; + NoItinerary, CPURegs, DSPRegs>; class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb", int_mips_cmpgu_le_qb, - NoItinerary, CPURegs, DSPRegs>, - IsCommutable; + NoItinerary, CPURegs, DSPRegs>; class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph, NoItinerary, DSPRegs>, IsCommutable; class CMP_LT_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.lt.ph", int_mips_cmp_lt_ph, - NoItinerary, DSPRegs>, - IsCommutable; + NoItinerary, DSPRegs>; class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph, - NoItinerary, DSPRegs>, - IsCommutable; + NoItinerary, DSPRegs>; // Misc class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev, @@ -867,11 +888,11 @@ class EXTR_S_H_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_s.h", MipsEXTR_S_H, class EXTRV_S_H_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_s.h", MipsEXTR_S_H, NoItinerary>; -class SHILO_DESC : SHILO_R1_DESC_BASE<"shilo">; +class SHILO_DESC : SHILO_R1_DESC_BASE<"shilo", MipsSHILO>; -class SHILOV_DESC : SHILO_R2_DESC_BASE<"shilov">; +class SHILOV_DESC : SHILO_R2_DESC_BASE<"shilov", MipsSHILO>; -class MTHLIP_DESC : MTHLIP_DESC_BASE<"mthlip">; +class MTHLIP_DESC : MTHLIP_DESC_BASE<"mthlip", MipsMTHLIP>; class RDDSP_DESC : RDDSP_DESC_BASE<"rddsp", int_mips_rddsp, NoItinerary>; @@ -945,20 +966,18 @@ class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb", class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb", int_mips_cmpgdu_lt_qb, - NoItinerary, CPURegs, DSPRegs>, - IsCommutable; + NoItinerary, CPURegs, DSPRegs>; class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb", int_mips_cmpgdu_le_qb, - NoItinerary, CPURegs, DSPRegs>, - IsCommutable; + NoItinerary, CPURegs, DSPRegs>; // Absolute class ABSQ_S_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.qb", int_mips_absq_s_qb, NoItinerary, DSPRegs>; // Multiplication -class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", int_mips_mul_ph, NoItinerary, +class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", null_frag, NoItinerary, DSPRegs>, IsCommutable; class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph, @@ -975,23 +994,25 @@ class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph, IsCommutable; // Dot product with accumulate/subtract -class DPA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpa.w.ph">; +class DPA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpa.w.ph", MipsDPA_W_PH>; -class DPS_W_PH_DESC : DPA_W_PH_DESC_BASE<"dps.w.ph">; +class DPS_W_PH_DESC : DPA_W_PH_DESC_BASE<"dps.w.ph", MipsDPS_W_PH>; -class DPAQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_s.w.ph">; +class DPAQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_s.w.ph", MipsDPAQX_S_W_PH>; -class DPAQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_sa.w.ph">; +class DPAQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_sa.w.ph", + MipsDPAQX_SA_W_PH>; -class DPAX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpax.w.ph">; +class DPAX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpax.w.ph", MipsDPAX_W_PH>; -class DPSX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsx.w.ph">; +class DPSX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsx.w.ph", MipsDPSX_W_PH>; -class DPSQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_s.w.ph">; +class DPSQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_s.w.ph", MipsDPSQX_S_W_PH>; -class DPSQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_sa.w.ph">; +class DPSQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_sa.w.ph", + MipsDPSQX_SA_W_PH>; -class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph">; +class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph", MipsMULSA_W_PH>; // Precision reduce/expand class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph", @@ -1009,7 +1030,7 @@ class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w", CPURegs>, ClearDefs; // Shift -class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", int_mips_shra_qb, immZExt3, +class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", null_frag, immZExt3, NoItinerary, DSPRegs>, ClearDefs; class SHRAV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav.qb", int_mips_shra_qb, @@ -1022,7 +1043,7 @@ class SHRA_R_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.qb", int_mips_shra_r_qb, class SHRAV_R_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.qb", int_mips_shra_r_qb, NoItinerary, DSPRegs>, ClearDefs; -class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", int_mips_shrl_ph, immZExt4, +class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", null_frag, immZExt4, NoItinerary, DSPRegs>, ClearDefs; class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph, @@ -1099,6 +1120,10 @@ def MAQ_S_W_PHL : MAQ_S_W_PHL_ENC, MAQ_S_W_PHL_DESC; def MAQ_S_W_PHR : MAQ_S_W_PHR_ENC, MAQ_S_W_PHR_DESC; def MAQ_SA_W_PHL : MAQ_SA_W_PHL_ENC, MAQ_SA_W_PHL_DESC; def MAQ_SA_W_PHR : MAQ_SA_W_PHR_ENC, MAQ_SA_W_PHR_DESC; +def MFHI_DSP : MFHI_ENC, MFHI_DESC; +def MFLO_DSP : MFLO_ENC, MFLO_DESC; +def MTHI_DSP : MTHI_ENC, MTHI_DESC; +def MTLO_DSP : MTLO_ENC, MTLO_DESC; def DPAU_H_QBL : DPAU_H_QBL_ENC, DPAU_H_QBL_DESC; def DPAU_H_QBR : DPAU_H_QBR_ENC, DPAU_H_QBR_DESC; def DPSU_H_QBL : DPSU_H_QBL_ENC, DPSU_H_QBL_DESC; @@ -1206,70 +1231,31 @@ def PREPEND : PREPEND_ENC, PREPEND_DESC; } // Pseudos. -def MULSAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMULSAQ_S_W_PH, NoItinerary, - MULSAQ_S_W_PH>; -def MAQ_S_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_S_W_PHL, NoItinerary, - MAQ_S_W_PHL>; -def MAQ_S_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_S_W_PHR, NoItinerary, - MAQ_S_W_PHR>; -def MAQ_SA_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_SA_W_PHL, NoItinerary, - MAQ_SA_W_PHL>; -def MAQ_SA_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMAQ_SA_W_PHR, NoItinerary, - MAQ_SA_W_PHR>; -def DPAU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAU_H_QBL, NoItinerary, - DPAU_H_QBL>; -def DPAU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAU_H_QBR, NoItinerary, - DPAU_H_QBR>; -def DPSU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSU_H_QBL, NoItinerary, - DPSU_H_QBL>; -def DPSU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSU_H_QBR, NoItinerary, - DPSU_H_QBR>; -def DPAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQ_S_W_PH, NoItinerary, - DPAQ_S_W_PH>; -def DPSQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQ_S_W_PH, NoItinerary, - DPSQ_S_W_PH>; -def DPAQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQ_SA_L_W, NoItinerary, - DPAQ_SA_L_W>; -def DPSQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQ_SA_L_W, NoItinerary, - DPSQ_SA_L_W>; - -def MULT_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMULT, NoItinerary, MULT_DSP>, - IsCommutable; -def MULTU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMULTU, NoItinerary, MULTU_DSP>, - IsCommutable; -def MADD_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMADD_DSP, NoItinerary, MADD_DSP>, - IsCommutable, UseAC; -def MADDU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMADDU_DSP, NoItinerary, MADDU_DSP>, - IsCommutable, UseAC; -def MSUB_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMSUB_DSP, NoItinerary, MSUB_DSP>, - UseAC; -def MSUBU_DSP_PSEUDO : MULT_PSEUDO_BASE<MipsMSUBU_DSP, NoItinerary, MSUBU_DSP>, - UseAC; - -def SHILO_PSEUDO : SHILO_R1_PSEUDO_BASE<MipsSHILO, NoItinerary, SHILO>; -def SHILOV_PSEUDO : SHILO_R2_PSEUDO_BASE<MipsSHILO, NoItinerary, SHILOV>; -def MTHLIP_PSEUDO : SHILO_R2_PSEUDO_BASE<MipsMTHLIP, NoItinerary, MTHLIP>; +/// Pseudo instructions for loading and storing accumulator registers. +let isPseudo = 1 in { + defm LOAD_AC_DSP : LoadM<"load_ac_dsp", ACRegsDSP>; + defm STORE_AC_DSP : StoreM<"store_ac_dsp", ACRegsDSP>; +} -let Predicates = [HasDSPR2] in { +// Pseudo CMP and PICK instructions. +class PseudoCMP<Instruction RealInst> : + PseudoDSP<(outs DSPCC:$cmp), (ins DSPRegs:$rs, DSPRegs:$rt), []>, + PseudoInstExpansion<(RealInst DSPRegs:$rs, DSPRegs:$rt)>, NeverHasSideEffects; -def DPA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPA_W_PH, NoItinerary, DPA_W_PH>; -def DPS_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPS_W_PH, NoItinerary, DPS_W_PH>; -def DPAQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQX_S_W_PH, NoItinerary, - DPAQX_S_W_PH>; -def DPAQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAQX_SA_W_PH, NoItinerary, - DPAQX_SA_W_PH>; -def DPAX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPAX_W_PH, NoItinerary, - DPAX_W_PH>; -def DPSX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSX_W_PH, NoItinerary, - DPSX_W_PH>; -def DPSQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQX_S_W_PH, NoItinerary, - DPSQX_S_W_PH>; -def DPSQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsDPSQX_SA_W_PH, NoItinerary, - DPSQX_SA_W_PH>; -def MULSA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE<MipsMULSA_W_PH, NoItinerary, - MULSA_W_PH>; +class PseudoPICK<Instruction RealInst> : + PseudoDSP<(outs DSPRegs:$rd), (ins DSPCC:$cmp, DSPRegs:$rs, DSPRegs:$rt), []>, + PseudoInstExpansion<(RealInst DSPRegs:$rd, DSPRegs:$rs, DSPRegs:$rt)>, + NeverHasSideEffects; -} +def PseudoCMP_EQ_PH : PseudoCMP<CMP_EQ_PH>; +def PseudoCMP_LT_PH : PseudoCMP<CMP_LT_PH>; +def PseudoCMP_LE_PH : PseudoCMP<CMP_LE_PH>; +def PseudoCMPU_EQ_QB : PseudoCMP<CMPU_EQ_QB>; +def PseudoCMPU_LT_QB : PseudoCMP<CMPU_LT_QB>; +def PseudoCMPU_LE_QB : PseudoCMP<CMPU_LE_QB>; + +def PseudoPICK_PH : PseudoPICK<PICK_PH>; +def PseudoPICK_QB : PseudoPICK<PICK_QB>; // Patterns. class DSPPat<dag pattern, dag result, Predicate pred = HasDSP> : @@ -1294,12 +1280,103 @@ def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a), def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a), (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>; +// Binary operations. +class DSPBinPat<Instruction Inst, ValueType ValTy, SDPatternOperator Node, + Predicate Pred = HasDSP> : + DSPPat<(Node ValTy:$a, ValTy:$b), (Inst ValTy:$a, ValTy:$b), Pred>; + +def : DSPBinPat<ADDQ_PH, v2i16, int_mips_addq_ph>; +def : DSPBinPat<ADDQ_PH, v2i16, add>; +def : DSPBinPat<SUBQ_PH, v2i16, int_mips_subq_ph>; +def : DSPBinPat<SUBQ_PH, v2i16, sub>; +def : DSPBinPat<MUL_PH, v2i16, int_mips_mul_ph, HasDSPR2>; +def : DSPBinPat<MUL_PH, v2i16, mul, HasDSPR2>; +def : DSPBinPat<ADDU_QB, v4i8, int_mips_addu_qb>; +def : DSPBinPat<ADDU_QB, v4i8, add>; +def : DSPBinPat<SUBU_QB, v4i8, int_mips_subu_qb>; +def : DSPBinPat<SUBU_QB, v4i8, sub>; +def : DSPBinPat<ADDSC, i32, int_mips_addsc>; +def : DSPBinPat<ADDSC, i32, addc>; +def : DSPBinPat<ADDWC, i32, int_mips_addwc>; +def : DSPBinPat<ADDWC, i32, adde>; + +// Shift immediate patterns. +class DSPShiftPat<Instruction Inst, ValueType ValTy, SDPatternOperator Node, + SDPatternOperator Imm, Predicate Pred = HasDSP> : + DSPPat<(Node ValTy:$a, Imm:$shamt), (Inst ValTy:$a, Imm:$shamt), Pred>; + +def : DSPShiftPat<SHLL_PH, v2i16, MipsSHLL_DSP, imm>; +def : DSPShiftPat<SHRA_PH, v2i16, MipsSHRA_DSP, imm>; +def : DSPShiftPat<SHRL_PH, v2i16, MipsSHRL_DSP, imm, HasDSPR2>; +def : DSPShiftPat<SHLL_PH, v2i16, int_mips_shll_ph, immZExt4>; +def : DSPShiftPat<SHRA_PH, v2i16, int_mips_shra_ph, immZExt4>; +def : DSPShiftPat<SHRL_PH, v2i16, int_mips_shrl_ph, immZExt4, HasDSPR2>; +def : DSPShiftPat<SHLL_QB, v4i8, MipsSHLL_DSP, imm>; +def : DSPShiftPat<SHRA_QB, v4i8, MipsSHRA_DSP, imm, HasDSPR2>; +def : DSPShiftPat<SHRL_QB, v4i8, MipsSHRL_DSP, imm>; +def : DSPShiftPat<SHLL_QB, v4i8, int_mips_shll_qb, immZExt3>; +def : DSPShiftPat<SHRA_QB, v4i8, int_mips_shra_qb, immZExt3, HasDSPR2>; +def : DSPShiftPat<SHRL_QB, v4i8, int_mips_shrl_qb, immZExt3>; + +// SETCC/SELECT_CC patterns. +class DSPSetCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy, + CondCode CC> : + DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)), + (ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)), + (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs)), + (ValTy ZERO)))>; + +class DSPSetCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy, + CondCode CC> : + DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)), + (ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)), + (ValTy ZERO), + (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs))))>; + +class DSPSelectCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy, + CondCode CC> : + DSPPat<(ValTy (MipsSELECT_CC_DSP ValTy:$a, ValTy:$b, ValTy:$c, ValTy:$d, CC)), + (ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)), $c, $d))>; + +class DSPSelectCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy, + CondCode CC> : + DSPPat<(ValTy (MipsSELECT_CC_DSP ValTy:$a, ValTy:$b, ValTy:$c, ValTy:$d, CC)), + (ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)), $d, $c))>; + +def : DSPSetCCPat<PseudoCMP_EQ_PH, PseudoPICK_PH, v2i16, SETEQ>; +def : DSPSetCCPat<PseudoCMP_LT_PH, PseudoPICK_PH, v2i16, SETLT>; +def : DSPSetCCPat<PseudoCMP_LE_PH, PseudoPICK_PH, v2i16, SETLE>; +def : DSPSetCCPatInv<PseudoCMP_EQ_PH, PseudoPICK_PH, v2i16, SETNE>; +def : DSPSetCCPatInv<PseudoCMP_LT_PH, PseudoPICK_PH, v2i16, SETGE>; +def : DSPSetCCPatInv<PseudoCMP_LE_PH, PseudoPICK_PH, v2i16, SETGT>; +def : DSPSetCCPat<PseudoCMPU_EQ_QB, PseudoPICK_QB, v4i8, SETEQ>; +def : DSPSetCCPat<PseudoCMPU_LT_QB, PseudoPICK_QB, v4i8, SETULT>; +def : DSPSetCCPat<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETULE>; +def : DSPSetCCPatInv<PseudoCMPU_EQ_QB, PseudoPICK_QB, v4i8, SETNE>; +def : DSPSetCCPatInv<PseudoCMPU_LT_QB, PseudoPICK_QB, v4i8, SETUGE>; +def : DSPSetCCPatInv<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETUGT>; + +def : DSPSelectCCPat<PseudoCMP_EQ_PH, PseudoPICK_PH, v2i16, SETEQ>; +def : DSPSelectCCPat<PseudoCMP_LT_PH, PseudoPICK_PH, v2i16, SETLT>; +def : DSPSelectCCPat<PseudoCMP_LE_PH, PseudoPICK_PH, v2i16, SETLE>; +def : DSPSelectCCPatInv<PseudoCMP_EQ_PH, PseudoPICK_PH, v2i16, SETNE>; +def : DSPSelectCCPatInv<PseudoCMP_LT_PH, PseudoPICK_PH, v2i16, SETGE>; +def : DSPSelectCCPatInv<PseudoCMP_LE_PH, PseudoPICK_PH, v2i16, SETGT>; +def : DSPSelectCCPat<PseudoCMPU_EQ_QB, PseudoPICK_QB, v4i8, SETEQ>; +def : DSPSelectCCPat<PseudoCMPU_LT_QB, PseudoPICK_QB, v4i8, SETULT>; +def : DSPSelectCCPat<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETULE>; +def : DSPSelectCCPatInv<PseudoCMPU_EQ_QB, PseudoPICK_QB, v4i8, SETNE>; +def : DSPSelectCCPatInv<PseudoCMPU_LT_QB, PseudoPICK_QB, v4i8, SETUGE>; +def : DSPSelectCCPatInv<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETUGT>; + // Extr patterns. class EXTR_W_TY1_R2_Pat<SDPatternOperator OpNode, Instruction Instr> : - DSPPat<(i32 (OpNode CPURegs:$rs)), (Instr AC0, CPURegs:$rs)>; + DSPPat<(i32 (OpNode CPURegs:$rs, ACRegsDSP:$ac)), + (Instr ACRegsDSP:$ac, CPURegs:$rs)>; class EXTR_W_TY1_R1_Pat<SDPatternOperator OpNode, Instruction Instr> : - DSPPat<(i32 (OpNode immZExt5:$shift)), (Instr AC0, immZExt5:$shift)>; + DSPPat<(i32 (OpNode immZExt5:$shift, ACRegsDSP:$ac)), + (Instr ACRegsDSP:$ac, immZExt5:$shift)>; def : EXTR_W_TY1_R1_Pat<MipsEXTP, EXTP>; def : EXTR_W_TY1_R2_Pat<MipsEXTP, EXTPV>; @@ -1313,3 +1390,19 @@ def : EXTR_W_TY1_R1_Pat<MipsEXTR_RS_W, EXTR_RS_W>; def : EXTR_W_TY1_R2_Pat<MipsEXTR_RS_W, EXTRV_RS_W>; def : EXTR_W_TY1_R1_Pat<MipsEXTR_S_H, EXTR_S_H>; def : EXTR_W_TY1_R2_Pat<MipsEXTR_S_H, EXTRV_S_H>; + +// mflo/hi patterns. +let AddedComplexity = 20 in +def : DSPPat<(i32 (ExtractLOHI ACRegsDSP:$ac, imm:$lohi_idx)), + (EXTRACT_SUBREG ACRegsDSP:$ac, imm:$lohi_idx)>; + +// Indexed load patterns. +class IndexedLoadPat<SDPatternOperator LoadNode, Instruction Instr> : + DSPPat<(i32 (LoadNode (add i32:$base, i32:$index))), + (Instr i32:$base, i32:$index)>; + +let AddedComplexity = 20 in { + def : IndexedLoadPat<zextloadi8, LBUX>; + def : IndexedLoadPat<sextloadi16, LHX>; + def : IndexedLoadPat<load, LWX>; +} diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index e265590..d07a595 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -220,9 +220,9 @@ namespace { /// that can be moved to the delay slot. Returns true on success. bool searchForward(MachineBasicBlock &MBB, Iter Slot) const; - /// This function searches MBB's successor blocks for an instruction that - /// can be moved to the delay slot and inserts clones of the instruction - /// into the successor blocks. + /// This function searches one of MBB's successor blocks for an instruction + /// that can be moved to the delay slot and inserts clones of the + /// instruction into the successor's predecessor blocks. bool searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const; /// Pick a successor block of MBB. Return NULL if MBB doesn't have a diff --git a/lib/Target/Mips/MipsFrameLowering.h b/lib/Target/Mips/MipsFrameLowering.h index 14268d2..6a5f79d 100644 --- a/lib/Target/Mips/MipsFrameLowering.h +++ b/lib/Target/Mips/MipsFrameLowering.h @@ -26,9 +26,8 @@ protected: const MipsSubtarget &STI; public: - explicit MipsFrameLowering(const MipsSubtarget &sti) - : TargetFrameLowering(StackGrowsDown, sti.hasMips64() ? 16 : 8, 0, - sti.hasMips64() ? 16 : 8), STI(sti) {} + explicit MipsFrameLowering(const MipsSubtarget &sti, unsigned Alignment) + : TargetFrameLowering(StackGrowsDown, Alignment, 0, Alignment), STI(sti) {} static const MipsFrameLowering *create(MipsTargetMachine &TM, const MipsSubtarget &ST); diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index 77b08cb..968e536 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -17,7 +17,6 @@ #include "MipsSEISelDAGToDAG.h" #include "Mips.h" #include "MCTargetDesc/MipsBaseInfo.h" -#include "MipsAnalyzeImmediate.h" #include "MipsMachineFunction.h" #include "MipsRegisterInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 4bf43f4..4d76181 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -30,7 +30,6 @@ #include "llvm/IR/CallingConv.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalVariable.h" -#include "llvm/IR/Intrinsics.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -158,12 +157,18 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T"; case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F"; case MipsISD::FPRound: return "MipsISD::FPRound"; + case MipsISD::ExtractLOHI: return "MipsISD::ExtractLOHI"; + case MipsISD::InsertLOHI: return "MipsISD::InsertLOHI"; + case MipsISD::Mult: return "MipsISD::Mult"; + case MipsISD::Multu: return "MipsISD::Multu"; case MipsISD::MAdd: return "MipsISD::MAdd"; case MipsISD::MAddu: return "MipsISD::MAddu"; case MipsISD::MSub: return "MipsISD::MSub"; case MipsISD::MSubu: return "MipsISD::MSubu"; case MipsISD::DivRem: return "MipsISD::DivRem"; case MipsISD::DivRemU: return "MipsISD::DivRemU"; + case MipsISD::DivRem16: return "MipsISD::DivRem16"; + case MipsISD::DivRemU16: return "MipsISD::DivRemU16"; case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64"; case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64"; case MipsISD::Wrapper: return "MipsISD::Wrapper"; @@ -192,6 +197,11 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP"; case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP"; case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP"; + case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP"; + case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP"; + case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP"; + case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP"; + case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP"; default: return NULL; } } @@ -205,7 +215,7 @@ MipsTargetLowering(MipsTargetMachine &TM) // Mips does not have i1 type, so use i32 for // setcc operations results (slt, sgt, ...). setBooleanContents(ZeroOrOneBooleanContent); - setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? + setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Load extented operations for i1 types must be promoted setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); @@ -340,9 +350,6 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::VACOPY, MVT::Other, Expand); setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); - // Use the default for now setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); @@ -376,8 +383,6 @@ MipsTargetLowering(MipsTargetMachine &TM) setTruncStoreAction(MVT::i64, MVT::i32, Custom); } - setTargetDAGCombine(ISD::ADDE); - setTargetDAGCombine(ISD::SUBE); setTargetDAGCombine(ISD::SDIVREM); setTargetDAGCombine(ISD::UDIVREM); setTargetDAGCombine(ISD::SELECT); @@ -408,178 +413,6 @@ EVT MipsTargetLowering::getSetCCResultType(EVT VT) const { return VT.changeVectorElementTypeToInteger(); } -// selectMADD - -// Transforms a subgraph in CurDAG if the following pattern is found: -// (addc multLo, Lo0), (adde multHi, Hi0), -// where, -// multHi/Lo: product of multiplication -// Lo0: initial value of Lo register -// Hi0: initial value of Hi register -// Return true if pattern matching was successful. -static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) { - // ADDENode's second operand must be a flag output of an ADDC node in order - // for the matching to be successful. - SDNode *ADDCNode = ADDENode->getOperand(2).getNode(); - - if (ADDCNode->getOpcode() != ISD::ADDC) - return false; - - SDValue MultHi = ADDENode->getOperand(0); - SDValue MultLo = ADDCNode->getOperand(0); - SDNode *MultNode = MultHi.getNode(); - unsigned MultOpc = MultHi.getOpcode(); - - // MultHi and MultLo must be generated by the same node, - if (MultLo.getNode() != MultNode) - return false; - - // and it must be a multiplication. - if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) - return false; - - // MultLo amd MultHi must be the first and second output of MultNode - // respectively. - if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) - return false; - - // Transform this to a MADD only if ADDENode and ADDCNode are the only users - // of the values of MultNode, in which case MultNode will be removed in later - // phases. - // If there exist users other than ADDENode or ADDCNode, this function returns - // here, which will result in MultNode being mapped to a single MULT - // instruction node rather than a pair of MULT and MADD instructions being - // produced. - if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) - return false; - - SDValue Chain = CurDAG->getEntryNode(); - DebugLoc DL = ADDENode->getDebugLoc(); - - // create MipsMAdd(u) node - MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd; - - SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Glue, - MultNode->getOperand(0),// Factor 0 - MultNode->getOperand(1),// Factor 1 - ADDCNode->getOperand(1),// Lo0 - ADDENode->getOperand(1));// Hi0 - - // create CopyFromReg nodes - SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, DL, Mips::LO, MVT::i32, - MAdd); - SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), DL, - Mips::HI, MVT::i32, - CopyFromLo.getValue(2)); - - // replace uses of adde and addc here - if (!SDValue(ADDCNode, 0).use_empty()) - CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), CopyFromLo); - - if (!SDValue(ADDENode, 0).use_empty()) - CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), CopyFromHi); - - return true; -} - -// selectMSUB - -// Transforms a subgraph in CurDAG if the following pattern is found: -// (addc Lo0, multLo), (sube Hi0, multHi), -// where, -// multHi/Lo: product of multiplication -// Lo0: initial value of Lo register -// Hi0: initial value of Hi register -// Return true if pattern matching was successful. -static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) { - // SUBENode's second operand must be a flag output of an SUBC node in order - // for the matching to be successful. - SDNode *SUBCNode = SUBENode->getOperand(2).getNode(); - - if (SUBCNode->getOpcode() != ISD::SUBC) - return false; - - SDValue MultHi = SUBENode->getOperand(1); - SDValue MultLo = SUBCNode->getOperand(1); - SDNode *MultNode = MultHi.getNode(); - unsigned MultOpc = MultHi.getOpcode(); - - // MultHi and MultLo must be generated by the same node, - if (MultLo.getNode() != MultNode) - return false; - - // and it must be a multiplication. - if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) - return false; - - // MultLo amd MultHi must be the first and second output of MultNode - // respectively. - if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) - return false; - - // Transform this to a MSUB only if SUBENode and SUBCNode are the only users - // of the values of MultNode, in which case MultNode will be removed in later - // phases. - // If there exist users other than SUBENode or SUBCNode, this function returns - // here, which will result in MultNode being mapped to a single MULT - // instruction node rather than a pair of MULT and MSUB instructions being - // produced. - if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) - return false; - - SDValue Chain = CurDAG->getEntryNode(); - DebugLoc DL = SUBENode->getDebugLoc(); - - // create MipsSub(u) node - MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub; - - SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue, - MultNode->getOperand(0),// Factor 0 - MultNode->getOperand(1),// Factor 1 - SUBCNode->getOperand(0),// Lo0 - SUBENode->getOperand(0));// Hi0 - - // create CopyFromReg nodes - SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, DL, Mips::LO, MVT::i32, - MSub); - SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), DL, - Mips::HI, MVT::i32, - CopyFromLo.getValue(2)); - - // replace uses of sube and subc here - if (!SDValue(SUBCNode, 0).use_empty()) - CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), CopyFromLo); - - if (!SDValue(SUBENode, 0).use_empty()) - CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), CopyFromHi); - - return true; -} - -static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget *Subtarget) { - if (DCI.isBeforeLegalize()) - return SDValue(); - - if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && - selectMADD(N, &DAG)) - return SDValue(N, 0); - - return SDValue(); -} - -static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI, - const MipsSubtarget *Subtarget) { - if (DCI.isBeforeLegalize()) - return SDValue(); - - if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && - selectMSUB(N, &DAG)) - return SDValue(N, 0); - - return SDValue(); -} - static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget *Subtarget) { @@ -589,8 +422,8 @@ static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, EVT Ty = N->getValueType(0); unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64; unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64; - unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem : - MipsISD::DivRemU; + unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 : + MipsISD::DivRemU16; DebugLoc DL = N->getDebugLoc(); SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue, @@ -617,7 +450,7 @@ static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) { +static Mips::CondCode condCodeToFCC(ISD::CondCode CC) { switch (CC) { default: llvm_unreachable("Unknown fp condition code!"); case ISD::SETEQ: @@ -644,8 +477,9 @@ static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) { } -// Returns true if condition code has to be inverted. -static bool invertFPCondCode(Mips::CondCode CC) { +/// This function returns true if the floating point conditional branches and +/// conditional moves which use condition code CC should be inverted. +static bool invertFPCondCodeUser(Mips::CondCode CC) { if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT) return false; @@ -675,15 +509,14 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS, - DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32)); + DAG.getConstant(condCodeToFCC(CC), MVT::i32)); } // Creates and returns a CMovFPT/F node. static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, DebugLoc DL) { - bool invert = invertFPCondCode((Mips::CondCode) - cast<ConstantSDNode>(Cond.getOperand(2)) - ->getSExtValue()); + ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2)); + bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue()); return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL, True.getValueType(), True, False, Cond); @@ -850,10 +683,6 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) switch (Opc) { default: break; - case ISD::ADDE: - return performADDECombine(N, DAG, DCI, Subtarget); - case ISD::SUBE: - return performSUBECombine(N, DAG, DCI, Subtarget); case ISD::SDIVREM: case ISD::UDIVREM: return performDivRemCombine(N, DAG, DCI, Subtarget); @@ -884,10 +713,7 @@ void MipsTargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { - SDValue Res = LowerOperation(SDValue(N, 0), DAG); - - for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) - Results.push_back(Res.getValue(I)); + return LowerOperationWrapper(N, Results, DAG); } SDValue MipsTargetLowering:: @@ -895,32 +721,29 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { - case ISD::BR_JT: return lowerBR_JT(Op, DAG); - case ISD::BRCOND: return lowerBRCOND(Op, DAG); - case ISD::ConstantPool: return lowerConstantPool(Op, DAG); - case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG); - case ISD::BlockAddress: return lowerBlockAddress(Op, DAG); - case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG); - case ISD::JumpTable: return lowerJumpTable(Op, DAG); - case ISD::SELECT: return lowerSELECT(Op, DAG); - case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG); - case ISD::SETCC: return lowerSETCC(Op, DAG); - case ISD::VASTART: return lowerVASTART(Op, DAG); - case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG); - case ISD::FABS: return lowerFABS(Op, DAG); - case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG); - case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG); - case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG); - case ISD::MEMBARRIER: return lowerMEMBARRIER(Op, DAG); - case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG); - case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG); - case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true); - case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false); - case ISD::LOAD: return lowerLOAD(Op, DAG); - case ISD::STORE: return lowerSTORE(Op, DAG); - case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG); - case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG); - case ISD::ADD: return lowerADD(Op, DAG); + case ISD::BR_JT: return lowerBR_JT(Op, DAG); + case ISD::BRCOND: return lowerBRCOND(Op, DAG); + case ISD::ConstantPool: return lowerConstantPool(Op, DAG); + case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: return lowerBlockAddress(Op, DAG); + case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG); + case ISD::JumpTable: return lowerJumpTable(Op, DAG); + case ISD::SELECT: return lowerSELECT(Op, DAG); + case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG); + case ISD::SETCC: return lowerSETCC(Op, DAG); + case ISD::VASTART: return lowerVASTART(Op, DAG); + case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG); + case ISD::FABS: return lowerFABS(Op, DAG); + case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG); + case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG); + case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG); + case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG); + case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true); + case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false); + case ISD::LOAD: return lowerLOAD(Op, DAG); + case ISD::STORE: return lowerSTORE(Op, DAG); + case ISD::ADD: return lowerADD(Op, DAG); } return SDValue(); } @@ -940,17 +763,6 @@ addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC) return VReg; } -// Get fp branch code (not opcode) from condition code. -static Mips::FPBranchCode getFPBranchCodeFromCond(Mips::CondCode CC) { - if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT) - return Mips::BRANCH_T; - - assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) && - "Invalid CondCode."); - - return Mips::BRANCH_F; -} - MachineBasicBlock * MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { @@ -1581,8 +1393,8 @@ lowerBRCOND(SDValue Op, SelectionDAG &DAG) const SDValue CCNode = CondRes.getOperand(2); Mips::CondCode CC = (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue(); - SDValue BrCode = DAG.getConstant(getFPBranchCodeFromCond(CC), MVT::i32); - + unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T; + SDValue BrCode = DAG.getConstant(Opc, MVT::i32); return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode, Dest, CondRes); } @@ -2010,15 +1822,6 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG) Chain.getValue(1)); } -// TODO: set SType according to the desired memory barrier behavior. -SDValue -MipsTargetLowering::lowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const { - unsigned SType = 0; - DebugLoc DL = Op.getDebugLoc(); - return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0), - DAG.getConstant(SType, MVT::i32)); -} - SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { // FIXME: Need pseudo-fence for 'singlethread' fences @@ -2101,7 +1904,7 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, return DAG.getMergeValues(Ops, 2, DL); } -static SDValue CreateLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, +static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset) { SDValue Ptr = LD->getBasePtr(); EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT(); @@ -2141,15 +1944,15 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const { // (set tmp, (ldl (add baseptr, 7), undef)) // (set dst, (ldr baseptr, tmp)) if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) { - SDValue LDL = CreateLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef, + SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef, IsLittle ? 7 : 0); - return CreateLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL, + return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL, IsLittle ? 0 : 7); } - SDValue LWL = CreateLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef, + SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef, IsLittle ? 3 : 0); - SDValue LWR = CreateLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL, + SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL, IsLittle ? 0 : 3); // Expand @@ -2180,7 +1983,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const { return DAG.getMergeValues(Ops, 2, DL); } -static SDValue CreateStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, +static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset) { SDValue Ptr = SD->getBasePtr(), Value = SD->getValue(); EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType(); @@ -2217,9 +2020,9 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const { // (swl val, (add baseptr, 3)) // (swr val, baseptr) if ((VT == MVT::i32) || SD->isTruncatingStore()) { - SDValue SWL = CreateStoreLR(MipsISD::SWL, DAG, SD, Chain, + SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain, IsLittle ? 3 : 0); - return CreateStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3); + return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3); } assert(VT == MVT::i64); @@ -2229,153 +2032,8 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const { // to // (sdl val, (add baseptr, 7)) // (sdr val, baseptr) - SDValue SDL = CreateStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0); - return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7); -} - -// This function expands mips intrinsic nodes which have 64-bit input operands -// or output values. -// -// out64 = intrinsic-node in64 -// => -// lo = copy (extract-element (in64, 0)) -// hi = copy (extract-element (in64, 1)) -// mips-specific-node -// v0 = copy lo -// v1 = copy hi -// out64 = merge-values (v0, v1) -// -static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, - unsigned Opc, bool HasI64In, bool HasI64Out) { - DebugLoc DL = Op.getDebugLoc(); - bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; - SDValue Chain = HasChainIn ? Op->getOperand(0) : DAG.getEntryNode(); - SmallVector<SDValue, 3> Ops; - - if (HasI64In) { - SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, - Op->getOperand(1 + HasChainIn), - DAG.getConstant(0, MVT::i32)); - SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, - Op->getOperand(1 + HasChainIn), - DAG.getConstant(1, MVT::i32)); - - Chain = DAG.getCopyToReg(Chain, DL, Mips::LO, InLo, SDValue()); - Chain = DAG.getCopyToReg(Chain, DL, Mips::HI, InHi, Chain.getValue(1)); - - Ops.push_back(Chain); - Ops.append(Op->op_begin() + HasChainIn + 2, Op->op_end()); - Ops.push_back(Chain.getValue(1)); - } else { - Ops.push_back(Chain); - Ops.append(Op->op_begin() + HasChainIn + 1, Op->op_end()); - } - - if (!HasI64Out) - return DAG.getNode(Opc, DL, Op->value_begin(), Op->getNumValues(), - Ops.begin(), Ops.size()); - - SDValue Intr = DAG.getNode(Opc, DL, DAG.getVTList(MVT::Other, MVT::Glue), - Ops.begin(), Ops.size()); - SDValue OutLo = DAG.getCopyFromReg(Intr.getValue(0), DL, Mips::LO, MVT::i32, - Intr.getValue(1)); - SDValue OutHi = DAG.getCopyFromReg(OutLo.getValue(1), DL, Mips::HI, MVT::i32, - OutLo.getValue(2)); - SDValue Out = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, OutLo, OutHi); - - if (!HasChainIn) - return Out; - - SDValue Vals[] = { Out, OutHi.getValue(1) }; - return DAG.getMergeValues(Vals, 2, DL); -} - -SDValue MipsTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, - SelectionDAG &DAG) const { - switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) { - default: - return SDValue(); - case Intrinsic::mips_shilo: - return lowerDSPIntr(Op, DAG, MipsISD::SHILO, true, true); - case Intrinsic::mips_dpau_h_qbl: - return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL, true, true); - case Intrinsic::mips_dpau_h_qbr: - return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR, true, true); - case Intrinsic::mips_dpsu_h_qbl: - return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL, true, true); - case Intrinsic::mips_dpsu_h_qbr: - return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR, true, true); - case Intrinsic::mips_dpa_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH, true, true); - case Intrinsic::mips_dps_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH, true, true); - case Intrinsic::mips_dpax_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH, true, true); - case Intrinsic::mips_dpsx_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH, true, true); - case Intrinsic::mips_mulsa_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH, true, true); - case Intrinsic::mips_mult: - return lowerDSPIntr(Op, DAG, MipsISD::MULT, false, true); - case Intrinsic::mips_multu: - return lowerDSPIntr(Op, DAG, MipsISD::MULTU, false, true); - case Intrinsic::mips_madd: - return lowerDSPIntr(Op, DAG, MipsISD::MADD_DSP, true, true); - case Intrinsic::mips_maddu: - return lowerDSPIntr(Op, DAG, MipsISD::MADDU_DSP, true, true); - case Intrinsic::mips_msub: - return lowerDSPIntr(Op, DAG, MipsISD::MSUB_DSP, true, true); - case Intrinsic::mips_msubu: - return lowerDSPIntr(Op, DAG, MipsISD::MSUBU_DSP, true, true); - } -} - -SDValue MipsTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, - SelectionDAG &DAG) const { - switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) { - default: - return SDValue(); - case Intrinsic::mips_extp: - return lowerDSPIntr(Op, DAG, MipsISD::EXTP, true, false); - case Intrinsic::mips_extpdp: - return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP, true, false); - case Intrinsic::mips_extr_w: - return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W, true, false); - case Intrinsic::mips_extr_r_w: - return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W, true, false); - case Intrinsic::mips_extr_rs_w: - return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W, true, false); - case Intrinsic::mips_extr_s_h: - return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H, true, false); - case Intrinsic::mips_mthlip: - return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP, true, true); - case Intrinsic::mips_mulsaq_s_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH, true, true); - case Intrinsic::mips_maq_s_w_phl: - return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL, true, true); - case Intrinsic::mips_maq_s_w_phr: - return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR, true, true); - case Intrinsic::mips_maq_sa_w_phl: - return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL, true, true); - case Intrinsic::mips_maq_sa_w_phr: - return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR, true, true); - case Intrinsic::mips_dpaq_s_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH, true, true); - case Intrinsic::mips_dpsq_s_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH, true, true); - case Intrinsic::mips_dpaq_sa_l_w: - return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W, true, true); - case Intrinsic::mips_dpsq_sa_l_w: - return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W, true, true); - case Intrinsic::mips_dpaqx_s_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH, true, true); - case Intrinsic::mips_dpaqx_sa_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH, true, true); - case Intrinsic::mips_dpsqx_s_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH, true, true); - case Intrinsic::mips_dpsqx_sa_w_ph: - return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH, true, true); - } + SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0); + return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7); } SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const { @@ -3173,8 +2831,8 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass); case 'l': // register suitable for indirect jump if (VT == MVT::i32) - return std::make_pair((unsigned)Mips::LO, &Mips::HILORegClass); - return std::make_pair((unsigned)Mips::LO64, &Mips::HILO64RegClass); + return std::make_pair((unsigned)Mips::LO, &Mips::LORegsRegClass); + return std::make_pair((unsigned)Mips::LO64, &Mips::LORegs64RegClass); case 'x': // register suitable for indirect jump // Fixme: Not triggering the use of both hi and low // This will generate an error message diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index 71977d7..5587e8f 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -68,6 +68,16 @@ namespace llvm { EH_RETURN, + // Node used to extract integer from accumulator. + ExtractLOHI, + + // Node used to insert integers to accumulator. + InsertLOHI, + + // Mult nodes. + Mult, + Multu, + // MAdd/Sub nodes MAdd, MAddu, @@ -77,6 +87,8 @@ namespace llvm { // DivRem(u) DivRem, DivRemU, + DivRem16, + DivRemU16, BuildPairF64, ExtractElementF64, @@ -131,6 +143,15 @@ namespace llvm { MSUB_DSP, MSUBU_DSP, + // DSP shift nodes. + SHLL_DSP, + SHRA_DSP, + SHRL_DSP, + + // DSP setcc and select_cc nodes. + SETCC_DSP, + SELECT_CC_DSP, + // Load/Store Left/Right nodes. LWL = ISD::FIRST_TARGET_MEMORY_OPCODE, LWR, @@ -326,15 +347,12 @@ namespace llvm { SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue lowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const; SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG& DAG) const; SDValue lowerShiftRightParts(SDValue Op, SelectionDAG& DAG, bool IsSRA) const; SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue lowerADD(SDValue Op, SelectionDAG &DAG) const; /// isEligibleForTailCallOptimization - Check whether the call is eligible diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index 891bdc1..6b23057 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -503,32 +503,27 @@ let Predicates = [IsFP64bit, HasStdEnc] in { def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>; } -// Load/Store patterns. +// Patterns for loads/stores with a reg+imm operand. let AddedComplexity = 40 in { let Predicates = [IsN64, HasStdEnc] in { - def : MipsPat<(f32 (load addrRegImm:$a)), (LWC1_P8 addrRegImm:$a)>; - def : MipsPat<(store FGR32:$v, addrRegImm:$a), - (SWC1_P8 FGR32:$v, addrRegImm:$a)>; - def : MipsPat<(f64 (load addrRegImm:$a)), (LDC164_P8 addrRegImm:$a)>; - def : MipsPat<(store FGR64:$v, addrRegImm:$a), - (SDC164_P8 FGR64:$v, addrRegImm:$a)>; + def : LoadRegImmPat<LWC1_P8, f32, load>; + def : StoreRegImmPat<SWC1_P8, f32>; + def : LoadRegImmPat<LDC164_P8, f64, load>; + def : StoreRegImmPat<SDC164_P8, f64>; } let Predicates = [NotN64, HasStdEnc] in { - def : MipsPat<(f32 (load addrRegImm:$a)), (LWC1 addrRegImm:$a)>; - def : MipsPat<(store FGR32:$v, addrRegImm:$a), - (SWC1 FGR32:$v, addrRegImm:$a)>; + def : LoadRegImmPat<LWC1, f32, load>; + def : StoreRegImmPat<SWC1, f32>; } let Predicates = [NotN64, HasMips64, HasStdEnc] in { - def : MipsPat<(f64 (load addrRegImm:$a)), (LDC164 addrRegImm:$a)>; - def : MipsPat<(store FGR64:$v, addrRegImm:$a), - (SDC164 FGR64:$v, addrRegImm:$a)>; + def : LoadRegImmPat<LDC164, f64, load>; + def : StoreRegImmPat<SDC164, f64>; } let Predicates = [NotN64, NotMips64, HasStdEnc] in { - def : MipsPat<(f64 (load addrRegImm:$a)), (LDC1 addrRegImm:$a)>; - def : MipsPat<(store AFGR64:$v, addrRegImm:$a), - (SDC1 AFGR64:$v, addrRegImm:$a)>; + def : LoadRegImmPat<LDC1, f64, load>; + def : StoreRegImmPat<SDC1, f64>; } } diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td index ee432c8..ea07372 100644 --- a/lib/Target/Mips/MipsInstrFormats.td +++ b/lib/Target/Mips/MipsInstrFormats.td @@ -36,6 +36,24 @@ def FrmFR : Format<4>; def FrmFI : Format<5>; def FrmOther : Format<6>; // Instruction w/ a custom format +class MMRel; + +def Std2MicroMips : InstrMapping { + let FilterClass = "MMRel"; + // Instructions with the same BaseOpcode and isNVStore values form a row. + let RowFields = ["BaseOpcode"]; + // Instructions with the same predicate sense form a column. + let ColFields = ["Arch"]; + // The key column is the unpredicated instructions. + let KeyCol = ["se"]; + // Value columns are PredSense=true and PredSense=false + let ValueCols = [["se"], ["micromips"]]; +} + +class StdArch { + string Arch = "se"; +} + // Generic Mips Format class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern, InstrItinClass itin, Format f>: Instruction @@ -74,9 +92,11 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern, // Mips32/64 Instruction Format class InstSE<dag outs, dag ins, string asmstr, list<dag> pattern, - InstrItinClass itin, Format f>: + InstrItinClass itin, Format f, string opstr = ""> : MipsInst<outs, ins, asmstr, pattern, itin, f> { let Predicates = [HasStdEnc]; + string BaseOpcode = opstr; + string Arch; } // Mips Pseudo Instructions Format @@ -192,7 +212,7 @@ class MFC3OP_FM<bits<6> op, bits<5> mfmt> let Inst{2-0} = sel; } -class ADD_FM<bits<6> op, bits<6> funct> { +class ADD_FM<bits<6> op, bits<6> funct> : StdArch { bits<5> rd; bits<5> rs; bits<5> rt; @@ -207,7 +227,7 @@ class ADD_FM<bits<6> op, bits<6> funct> { let Inst{5-0} = funct; } -class ADDI_FM<bits<6> op> { +class ADDI_FM<bits<6> op> : StdArch { bits<5> rs; bits<5> rt; bits<16> imm16; @@ -220,7 +240,7 @@ class ADDI_FM<bits<6> op> { let Inst{15-0} = imm16; } -class SRA_FM<bits<6> funct, bit rotate> { +class SRA_FM<bits<6> funct, bit rotate> : StdArch { bits<5> rd; bits<5> rt; bits<5> shamt; @@ -236,7 +256,7 @@ class SRA_FM<bits<6> funct, bit rotate> { let Inst{5-0} = funct; } -class SRLV_FM<bits<6> funct, bit rotate> { +class SRLV_FM<bits<6> funct, bit rotate> : StdArch { bits<5> rd; bits<5> rt; bits<5> rs; @@ -288,7 +308,7 @@ class B_FM { let Inst{15-0} = offset; } -class SLTI_FM<bits<6> op> { +class SLTI_FM<bits<6> op> : StdArch { bits<5> rt; bits<5> rs; bits<16> imm16; @@ -413,7 +433,7 @@ class SYNC_FM { let Inst{5-0} = 0xf; } -class MULT_FM<bits<6> op, bits<6> funct> { +class MULT_FM<bits<6> op, bits<6> funct> : StdArch { bits<5> rs; bits<5> rt; @@ -529,7 +549,7 @@ class MFC1_FM<bits<5> funct> { let Inst{10-0} = 0; } -class LW_FM<bits<6> op> { +class LW_FM<bits<6> op> : StdArch { bits<5> rt; bits<21> addr; diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h index 3cd9088..8c05d97 100644 --- a/lib/Target/Mips/MipsInstrInfo.h +++ b/lib/Target/Mips/MipsInstrInfo.h @@ -86,6 +86,36 @@ public: /// Return the number of bytes of code the specified instruction may be. unsigned GetInstSizeInBytes(const MachineInstr *MI) const; + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, TRI, 0); + } + + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, TRI, 0); + } + + virtual void storeRegToStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const = 0; + + virtual void loadRegFromStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const = 0; + protected: bool isZeroImm(const MachineOperand &op) const; diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index 25b5d24..86ec729 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -23,13 +23,16 @@ def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisInt<4>]>; def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>; def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; -def SDT_MipsMAddMSub : SDTypeProfile<0, 4, - [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, - SDTCisSameAs<1, 2>, - SDTCisSameAs<2, 3>]>; -def SDT_MipsDivRem : SDTypeProfile<0, 2, - [SDTCisInt<0>, - SDTCisSameAs<0, 1>]>; +def SDT_ExtractLOHI : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVT<1, untyped>, + SDTCisVT<2, i32>]>; +def SDT_InsertLOHI : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, + SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; +def SDT_MipsMultDiv : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, SDTCisInt<1>, + SDTCisSameAs<1, 2>]>; +def SDT_MipsMAddMSub : SDTypeProfile<1, 3, + [SDTCisVT<0, untyped>, SDTCisSameAs<0, 3>, + SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; +def SDT_MipsDivRem16 : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>]>; def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; @@ -82,20 +85,27 @@ def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd, [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue, SDNPOutGlue]>; +// Node used to extract integer from LO/HI register. +def ExtractLOHI : SDNode<"MipsISD::ExtractLOHI", SDT_ExtractLOHI>; + +// Node used to insert 32-bit integers to LOHI register pair. +def InsertLOHI : SDNode<"MipsISD::InsertLOHI", SDT_InsertLOHI>; + +// Mult nodes. +def MipsMult : SDNode<"MipsISD::Mult", SDT_MipsMultDiv>; +def MipsMultu : SDNode<"MipsISD::Multu", SDT_MipsMultDiv>; + // MAdd*/MSub* nodes -def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub, - [SDNPOptInGlue, SDNPOutGlue]>; -def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub, - [SDNPOptInGlue, SDNPOutGlue]>; -def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub, - [SDNPOptInGlue, SDNPOutGlue]>; -def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub, - [SDNPOptInGlue, SDNPOutGlue]>; +def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub>; +def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub>; +def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub>; +def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub>; // DivRem(u) nodes -def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsDivRem, - [SDNPOutGlue]>; -def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem, +def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsMultDiv>; +def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsMultDiv>; +def MipsDivRem16 : SDNode<"MipsISD::DivRem16", SDT_MipsDivRem16, [SDNPOutGlue]>; +def MipsDivRemU16 : SDNode<"MipsISD::DivRemU16", SDT_MipsDivRem16, [SDNPOutGlue]>; // Target constant nodes that are not part of any isel patterns and remain @@ -169,6 +179,7 @@ def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">, AssemblerPredicate<"FeatureMips32">; def HasStdEnc : Predicate<"Subtarget.hasStandardEncoding()">, AssemblerPredicate<"!FeatureMips16">; +def NotDSP : Predicate<"!Subtarget.hasDSP()">; class MipsPat<dag pattern, dag result> : Pat<pattern, result> { let Predicates = [HasStdEnc]; @@ -256,6 +267,7 @@ def mem : Operand<i32> { let MIOperandInfo = (ops CPURegs, simm16); let EncoderMethod = "getMemEncoding"; let ParserMatchClass = MipsMemAsmOperand; + let OperandType = "OPERAND_MEMORY"; } def mem64 : Operand<i64> { @@ -263,18 +275,21 @@ def mem64 : Operand<i64> { let MIOperandInfo = (ops CPU64Regs, simm16_64); let EncoderMethod = "getMemEncoding"; let ParserMatchClass = MipsMemAsmOperand; + let OperandType = "OPERAND_MEMORY"; } def mem_ea : Operand<i32> { let PrintMethod = "printMemOperandEA"; let MIOperandInfo = (ops CPURegs, simm16); let EncoderMethod = "getMemEncoding"; + let OperandType = "OPERAND_MEMORY"; } def mem_ea_64 : Operand<i64> { let PrintMethod = "printMemOperandEA"; let MIOperandInfo = (ops CPU64Regs, simm16_64); let EncoderMethod = "getMemEncoding"; + let OperandType = "OPERAND_MEMORY"; } // size operand of ext instruction @@ -360,11 +375,9 @@ class ArithLogicR<string opstr, RegisterOperand RO, bit isComm = 0, SDPatternOperator OpNode = null_frag>: InstSE<(outs RO:$rd), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set RO:$rd, (OpNode RO:$rs, RO:$rt))], Itin, FrmR> { + [(set RO:$rd, (OpNode RO:$rs, RO:$rt))], Itin, FrmR, opstr> { let isCommutable = isComm; let isReMaterializable = 1; - string BaseOpcode; - string Arch; } // Arithmetic and logical instructions with 2 register operands. @@ -373,15 +386,15 @@ class ArithLogicI<string opstr, Operand Od, RegisterOperand RO, SDPatternOperator OpNode = null_frag> : InstSE<(outs RO:$rt), (ins RO:$rs, Od:$imm16), !strconcat(opstr, "\t$rt, $rs, $imm16"), - [(set RO:$rt, (OpNode RO:$rs, imm_type:$imm16))], IIAlu, FrmI> { + [(set RO:$rt, (OpNode RO:$rs, imm_type:$imm16))], + IIAlu, FrmI, opstr> { let isReMaterializable = 1; } // Arithmetic Multiply ADD/SUB -class MArithR<string opstr, SDPatternOperator op = null_frag, bit isComm = 0> : +class MArithR<string opstr, bit isComm = 0> : InstSE<(outs), (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt), - !strconcat(opstr, "\t$rs, $rt"), - [(op CPURegsOpnd:$rs, CPURegsOpnd:$rt, LO, HI)], IIImul, FrmR> { + !strconcat(opstr, "\t$rs, $rt"), [], IIImul, FrmR> { let Defs = [HI, LO]; let Uses = [HI, LO]; let isCommutable = isComm; @@ -391,7 +404,7 @@ class MArithR<string opstr, SDPatternOperator op = null_frag, bit isComm = 0> : class LogicNOR<string opstr, RegisterOperand RC>: InstSE<(outs RC:$rd), (ins RC:$rs, RC:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu, FrmR> { + [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu, FrmR, opstr> { let isCommutable = 1; } @@ -401,13 +414,13 @@ class shift_rotate_imm<string opstr, Operand ImmOpnd, SDPatternOperator PF = null_frag> : InstSE<(outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt), !strconcat(opstr, "\t$rd, $rt, $shamt"), - [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu, FrmR>; + [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu, FrmR, opstr>; class shift_rotate_reg<string opstr, RegisterOperand RC, SDPatternOperator OpNode = null_frag>: InstSE<(outs RC:$rd), (ins CPURegsOpnd:$rs, RC:$rt), !strconcat(opstr, "\t$rd, $rt, $rs"), - [(set RC:$rd, (OpNode RC:$rt, CPURegsOpnd:$rs))], IIAlu, FrmR>; + [(set RC:$rd, (OpNode RC:$rt, CPURegsOpnd:$rs))], IIAlu, FrmR, opstr>; // Load Upper Imediate class LoadUpper<string opstr, RegisterClass RC, Operand Imm>: @@ -427,33 +440,43 @@ class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, // Memory Load/Store class Load<string opstr, SDPatternOperator OpNode, RegisterClass RC, - Operand MemOpnd> : + Operand MemOpnd, ComplexPattern Addr, string ofsuffix> : InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(set RC:$rt, (OpNode addr:$addr))], NoItinerary, FrmI> { + [(set RC:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI, + !strconcat(opstr, ofsuffix)> { let DecoderMethod = "DecodeMem"; let canFoldAsLoad = 1; + let mayLoad = 1; } class Store<string opstr, SDPatternOperator OpNode, RegisterClass RC, - Operand MemOpnd> : + Operand MemOpnd, ComplexPattern Addr, string ofsuffix> : InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(OpNode RC:$rt, addr:$addr)], NoItinerary, FrmI> { + [(OpNode RC:$rt, Addr:$addr)], NoItinerary, FrmI, + !strconcat(opstr, ofsuffix)> { let DecoderMethod = "DecodeMem"; + let mayStore = 1; } multiclass LoadM<string opstr, RegisterClass RC, - SDPatternOperator OpNode = null_frag> { - def NAME : Load<opstr, OpNode, RC, mem>, Requires<[NotN64, HasStdEnc]>; - def _P8 : Load<opstr, OpNode, RC, mem64>, Requires<[IsN64, HasStdEnc]> { + SDPatternOperator OpNode = null_frag, + ComplexPattern Addr = addr> { + def NAME : Load<opstr, OpNode, RC, mem, Addr, "">, + Requires<[NotN64, HasStdEnc]>; + def _P8 : Load<opstr, OpNode, RC, mem64, Addr, "_p8">, + Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; } } multiclass StoreM<string opstr, RegisterClass RC, - SDPatternOperator OpNode = null_frag> { - def NAME : Store<opstr, OpNode, RC, mem>, Requires<[NotN64, HasStdEnc]>; - def _P8 : Store<opstr, OpNode, RC, mem64>, Requires<[IsN64, HasStdEnc]> { + SDPatternOperator OpNode = null_frag, + ComplexPattern Addr = addr> { + def NAME : Store<opstr, OpNode, RC, mem, Addr, "">, + Requires<[NotN64, HasStdEnc]>; + def _P8 : Store<opstr, OpNode, RC, mem64, Addr, "_p8">, + Requires<[IsN64, HasStdEnc]> { let DecoderNamespace = "Mips64"; let isCodeGenOnly = 1; } @@ -523,14 +546,15 @@ class CBranchZero<string opstr, PatFrag cond_op, RegisterClass RC> : class SetCC_R<string opstr, PatFrag cond_op, RegisterClass RC> : InstSE<(outs CPURegsOpnd:$rd), (ins RC:$rs, RC:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set CPURegsOpnd:$rd, (cond_op RC:$rs, RC:$rt))], IIAlu, FrmR>; + [(set CPURegsOpnd:$rd, (cond_op RC:$rs, RC:$rt))], + IIAlu, FrmR, opstr>; class SetCC_I<string opstr, PatFrag cond_op, Operand Od, PatLeaf imm_type, RegisterClass RC>: InstSE<(outs CPURegsOpnd:$rt), (ins RC:$rs, Od:$imm16), !strconcat(opstr, "\t$rt, $rs, $imm16"), [(set CPURegsOpnd:$rt, (cond_op RC:$rs, imm_type:$imm16))], - IIAlu, FrmI>; + IIAlu, FrmI, opstr>; // Jump class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator, @@ -617,17 +641,40 @@ class SYNC_FT : class Mult<string opstr, InstrItinClass itin, RegisterOperand RO, list<Register> DefRegs> : InstSE<(outs), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$rs, $rt"), [], - itin, FrmR> { + itin, FrmR, opstr> { let isCommutable = 1; let Defs = DefRegs; let neverHasSideEffects = 1; } -class Div<SDNode op, string opstr, InstrItinClass itin, RegisterOperand RO, +// Pseudo multiply/divide instruction with explicit accumulator register +// operands. +class MultDivPseudo<Instruction RealInst, RegisterClass R0, RegisterOperand R1, + SDPatternOperator OpNode, InstrItinClass Itin, + bit IsComm = 1, bit HasSideEffects = 0> : + PseudoSE<(outs R0:$ac), (ins R1:$rs, R1:$rt), + [(set R0:$ac, (OpNode R1:$rs, R1:$rt))], Itin>, + PseudoInstExpansion<(RealInst R1:$rs, R1:$rt)> { + let isCommutable = IsComm; + let hasSideEffects = HasSideEffects; +} + +// Pseudo multiply add/sub instruction with explicit accumulator register +// operands. +class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode> + : PseudoSE<(outs ACRegs:$ac), + (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin), + [(set ACRegs:$ac, + (OpNode CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin))], + IIImul>, + PseudoInstExpansion<(RealInst CPURegsOpnd:$rs, CPURegsOpnd:$rt)> { + string Constraints = "$acin = $ac"; +} + +class Div<string opstr, InstrItinClass itin, RegisterOperand RO, list<Register> DefRegs> : - InstSE<(outs), (ins RO:$rs, RO:$rt), - !strconcat(opstr, "\t$$zero, $rs, $rt"), [(op RO:$rs, RO:$rt)], itin, - FrmR> { + InstSE<(outs), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$$zero, $rs, $rt"), + [], itin, FrmR> { let Defs = DefRegs; } @@ -790,6 +837,12 @@ let usesCustomInserter = 1 in { defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32>; } +/// Pseudo instructions for loading and storing accumulator registers. +let isPseudo = 1 in { + defm LOAD_AC64 : LoadM<"load_ac64", ACRegs>; + defm STORE_AC64 : StoreM<"store_ac64", ACRegs>; +} + //===----------------------------------------------------------------------===// // Instruction definition //===----------------------------------------------------------------------===// @@ -798,60 +851,70 @@ let usesCustomInserter = 1 in { //===----------------------------------------------------------------------===// /// Arithmetic Instructions (ALU Immediate) -def ADDiu : ArithLogicI<"addiu", simm16, CPURegsOpnd, immSExt16, add>, +def ADDiu : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd, immSExt16, add>, ADDI_FM<0x9>, IsAsCheapAsAMove; -def ADDi : ArithLogicI<"addi", simm16, CPURegsOpnd>, ADDI_FM<0x8>; -def SLTi : SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>, SLTI_FM<0xa>; -def SLTiu : SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>, SLTI_FM<0xb>; -def ANDi : ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>, +def ADDi : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, ADDI_FM<0x8>; +def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>, + SLTI_FM<0xa>; +def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>, + SLTI_FM<0xb>; +def ANDi : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>, ADDI_FM<0xc>; -def ORi : ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>, +def ORi : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>, ADDI_FM<0xd>; -def XORi : ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>, +def XORi : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>, ADDI_FM<0xe>; -def LUi : LoadUpper<"lui", CPURegs, uimm16>, LUI_FM; +def LUi : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM; /// Arithmetic Instructions (3-Operand, R-Type) -def ADDu : ArithLogicR<"addu", CPURegsOpnd, 1, IIAlu, add>, ADD_FM<0, 0x21>; -def SUBu : ArithLogicR<"subu", CPURegsOpnd, 0, IIAlu, sub>, ADD_FM<0, 0x23>; -def MUL : ArithLogicR<"mul", CPURegsOpnd, 1, IIImul, mul>, ADD_FM<0x1c, 2>; -def ADD : ArithLogicR<"add", CPURegsOpnd>, ADD_FM<0, 0x20>; -def SUB : ArithLogicR<"sub", CPURegsOpnd>, ADD_FM<0, 0x22>; -def SLT : SetCC_R<"slt", setlt, CPURegs>, ADD_FM<0, 0x2a>; -def SLTu : SetCC_R<"sltu", setult, CPURegs>, ADD_FM<0, 0x2b>; -def AND : ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>, ADD_FM<0, 0x24>; -def OR : ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>, ADD_FM<0, 0x25>; -def XOR : ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>, ADD_FM<0, 0x26>; -def NOR : LogicNOR<"nor", CPURegsOpnd>, ADD_FM<0, 0x27>; +def ADDu : MMRel, ArithLogicR<"addu", CPURegsOpnd, 1, IIAlu, add>, + ADD_FM<0, 0x21>; +def SUBu : MMRel, ArithLogicR<"subu", CPURegsOpnd, 0, IIAlu, sub>, + ADD_FM<0, 0x23>; +def MUL : MMRel, ArithLogicR<"mul", CPURegsOpnd, 1, IIImul, mul>, + ADD_FM<0x1c, 2>; +def ADD : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM<0, 0x20>; +def SUB : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM<0, 0x22>; +def SLT : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM<0, 0x2a>; +def SLTu : MMRel, SetCC_R<"sltu", setult, CPURegs>, ADD_FM<0, 0x2b>; +def AND : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>, + ADD_FM<0, 0x24>; +def OR : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>, + ADD_FM<0, 0x25>; +def XOR : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>, + ADD_FM<0, 0x26>; +def NOR : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM<0, 0x27>; /// Shift Instructions -def SLL : shift_rotate_imm<"sll", shamt, CPURegsOpnd, shl, immZExt5>, +def SLL : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd, shl, immZExt5>, SRA_FM<0, 0>; -def SRL : shift_rotate_imm<"srl", shamt, CPURegsOpnd, srl, immZExt5>, +def SRL : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd, srl, immZExt5>, SRA_FM<2, 0>; -def SRA : shift_rotate_imm<"sra", shamt, CPURegsOpnd, sra, immZExt5>, +def SRA : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd, sra, immZExt5>, SRA_FM<3, 0>; -def SLLV : shift_rotate_reg<"sllv", CPURegsOpnd, shl>, SRLV_FM<4, 0>; -def SRLV : shift_rotate_reg<"srlv", CPURegsOpnd, srl>, SRLV_FM<6, 0>; -def SRAV : shift_rotate_reg<"srav", CPURegsOpnd, sra>, SRLV_FM<7, 0>; +def SLLV : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd, shl>, SRLV_FM<4, 0>; +def SRLV : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd, srl>, SRLV_FM<6, 0>; +def SRAV : MMRel, shift_rotate_reg<"srav", CPURegsOpnd, sra>, SRLV_FM<7, 0>; // Rotate Instructions let Predicates = [HasMips32r2, HasStdEnc] in { - def ROTR : shift_rotate_imm<"rotr", shamt, CPURegsOpnd, rotr, immZExt5>, + def ROTR : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd, rotr, + immZExt5>, SRA_FM<2, 1>; - def ROTRV : shift_rotate_reg<"rotrv", CPURegsOpnd, rotr>, SRLV_FM<6, 1>; + def ROTRV : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd, rotr>, + SRLV_FM<6, 1>; } /// Load and Store Instructions /// aligned -defm LB : LoadM<"lb", CPURegs, sextloadi8>, LW_FM<0x20>; -defm LBu : LoadM<"lbu", CPURegs, zextloadi8>, LW_FM<0x24>; -defm LH : LoadM<"lh", CPURegs, sextloadi16>, LW_FM<0x21>; -defm LHu : LoadM<"lhu", CPURegs, zextloadi16>, LW_FM<0x25>; -defm LW : LoadM<"lw", CPURegs, load>, LW_FM<0x23>; -defm SB : StoreM<"sb", CPURegs, truncstorei8>, LW_FM<0x28>; -defm SH : StoreM<"sh", CPURegs, truncstorei16>, LW_FM<0x29>; -defm SW : StoreM<"sw", CPURegs, store>, LW_FM<0x2b>; +defm LB : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM<0x20>; +defm LBu : LoadM<"lbu", CPURegs, zextloadi8, addrDefault>, MMRel, LW_FM<0x24>; +defm LH : LoadM<"lh", CPURegs, sextloadi16, addrDefault>, MMRel, LW_FM<0x21>; +defm LHu : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM<0x25>; +defm LW : LoadM<"lw", CPURegs, load, addrDefault>, MMRel, LW_FM<0x23>; +defm SB : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM<0x28>; +defm SH : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM<0x29>; +defm SW : StoreM<"sw", CPURegs, store>, MMRel, LW_FM<0x2b>; /// load/store left/right defm LWL : LoadLeftRightM<"lwl", MipsLWL, CPURegs>, LW_FM<0x22>; @@ -918,12 +981,17 @@ let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in { } /// Multiply and Divide Instructions. -def MULT : Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x18>; -def MULTu : Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x19>; -def SDIV : Div<MipsDivRem, "div", IIIdiv, CPURegsOpnd, [HI, LO]>, - MULT_FM<0, 0x1a>; -def UDIV : Div<MipsDivRemU, "divu", IIIdiv, CPURegsOpnd, [HI, LO]>, - MULT_FM<0, 0x1b>; +def MULT : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>, + MULT_FM<0, 0x18>; +def MULTu : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>, + MULT_FM<0, 0x19>; +def PseudoMULT : MultDivPseudo<MULT, ACRegs, CPURegsOpnd, MipsMult, IIImul>; +def PseudoMULTu : MultDivPseudo<MULTu, ACRegs, CPURegsOpnd, MipsMultu, IIImul>; +def SDIV : Div<"div", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1a>; +def UDIV : Div<"divu", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1b>; +def PseudoSDIV : MultDivPseudo<SDIV, ACRegs, CPURegsOpnd, MipsDivRem, IIIdiv, 0>; +def PseudoUDIV : MultDivPseudo<UDIV, ACRegs, CPURegsOpnd, MipsDivRemU, IIIdiv, + 0>; def MTHI : MoveToLOHI<"mthi", CPURegs, [HI]>, MTLO_FM<0x11>; def MTLO : MoveToLOHI<"mtlo", CPURegs, [LO]>, MTLO_FM<0x13>; @@ -951,10 +1019,14 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>; def LEA_ADDiu : EffectiveAddress<"addiu", CPURegs, mem_ea>, LW_FM<9>; // MADD*/MSUB* -def MADD : MArithR<"madd", MipsMAdd, 1>, MULT_FM<0x1c, 0>; -def MADDU : MArithR<"maddu", MipsMAddu, 1>, MULT_FM<0x1c, 1>; -def MSUB : MArithR<"msub", MipsMSub>, MULT_FM<0x1c, 4>; -def MSUBU : MArithR<"msubu", MipsMSubu>, MULT_FM<0x1c, 5>; +def MADD : MArithR<"madd", 1>, MULT_FM<0x1c, 0>; +def MADDU : MArithR<"maddu", 1>, MULT_FM<0x1c, 1>; +def MSUB : MArithR<"msub">, MULT_FM<0x1c, 4>; +def MSUBU : MArithR<"msubu">, MULT_FM<0x1c, 5>; +def PseudoMADD : MAddSubPseudo<MADD, MipsMAdd>; +def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu>; +def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub>; +def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu>; def RDHWR : ReadHardware<CPURegs, HWRegsOpnd>, RDHWR_FM; @@ -997,6 +1069,9 @@ def : InstAlias<"and $rs, $rt, $imm", def : InstAlias<"j $rs", (JR CPURegs:$rs), 0>, Requires<[NotMips64]>; def : InstAlias<"jalr $rs", (JALR RA, CPURegs:$rs)>, Requires<[NotMips64]>; +def : InstAlias<"jal $rs", (JALR RA, CPURegs:$rs), 0>, Requires<[NotMips64]>; +def : InstAlias<"jal $rd,$rs", (JALR CPURegs:$rd, CPURegs:$rs), 0>, + Requires<[NotMips64]>; def : InstAlias<"not $rt, $rs", (NOR CPURegsOpnd:$rt, CPURegsOpnd:$rs, ZERO), 1>; def : InstAlias<"neg $rt, $rs", @@ -1006,8 +1081,11 @@ def : InstAlias<"negu $rt, $rs", def : InstAlias<"slt $rs, $rt, $imm", (SLTi CPURegsOpnd:$rs, CPURegs:$rt, simm16:$imm), 0>; def : InstAlias<"xor $rs, $rt, $imm", - (XORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>, + (XORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>, Requires<[NotMips64]>; +def : InstAlias<"or $rs, $rt, $imm", + (ORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>, + Requires<[NotMips64]>; def : InstAlias<"nop", (SLL ZERO, ZERO, 0), 1>; def : InstAlias<"mfc0 $rt, $rd", (MFC0_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>; @@ -1043,6 +1121,13 @@ def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegsOpnd>; // Arbitrary patterns that map to one or more instructions //===----------------------------------------------------------------------===// +// Load/store pattern templates. +class LoadRegImmPat<Instruction LoadInst, ValueType ValTy, PatFrag Node> : + MipsPat<(ValTy (Node addrRegImm:$a)), (LoadInst addrRegImm:$a)>; + +class StoreRegImmPat<Instruction StoreInst, ValueType ValTy> : + MipsPat<(store ValTy:$v, addrRegImm:$a), (StoreInst ValTy:$v, addrRegImm:$a)>; + // Small immediates def : MipsPat<(i32 immSExt16:$in), (ADDiu ZERO, imm:$in)>; @@ -1058,10 +1143,12 @@ def : MipsPat<(i32 imm:$imm), // Carry MipsPatterns def : MipsPat<(subc CPURegs:$lhs, CPURegs:$rhs), (SUBu CPURegs:$lhs, CPURegs:$rhs)>; -def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs), - (ADDu CPURegs:$lhs, CPURegs:$rhs)>; -def : MipsPat<(addc CPURegs:$src, immSExt16:$imm), - (ADDiu CPURegs:$src, imm:$imm)>; +let Predicates = [HasStdEnc, NotDSP] in { + def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs), + (ADDu CPURegs:$lhs, CPURegs:$rhs)>; + def : MipsPat<(addc CPURegs:$src, immSExt16:$imm), + (ADDiu CPURegs:$src, imm:$imm)>; +} // Call def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)), @@ -1220,6 +1307,24 @@ defm : SetgeImmPats<CPURegs, SLTi, SLTiu>; // bswap pattern def : MipsPat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>; +// mflo/hi patterns. +def : MipsPat<(i32 (ExtractLOHI ACRegs:$ac, imm:$lohi_idx)), + (EXTRACT_SUBREG ACRegs:$ac, imm:$lohi_idx)>; + +// Load halfword/word patterns. +let AddedComplexity = 40 in { + let Predicates = [NotN64, HasStdEnc] in { + def : LoadRegImmPat<LBu, i32, zextloadi8>; + def : LoadRegImmPat<LH, i32, sextloadi16>; + def : LoadRegImmPat<LW, i32, load>; + } + let Predicates = [IsN64, HasStdEnc] in { + def : LoadRegImmPat<LBu_P8, i32, zextloadi8>; + def : LoadRegImmPat<LH_P8, i32, sextloadi16>; + def : LoadRegImmPat<LW_P8, i32, load>; + } +} + //===----------------------------------------------------------------------===// // Floating Point Support //===----------------------------------------------------------------------===// @@ -1238,3 +1343,6 @@ include "Mips16InstrInfo.td" include "MipsDSPInstrFormats.td" include "MipsDSPInstrInfo.td" +// Micromips +include "MicroMipsInstrFormats.td" +include "MicroMipsInstrInfo.td" diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp index 2efe534..bf5ad37 100644 --- a/lib/Target/Mips/MipsLongBranch.cpp +++ b/lib/Target/Mips/MipsLongBranch.cpp @@ -399,6 +399,8 @@ static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) { } bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) { + if (TM.getSubtarget<MipsSubtarget>().inMips16Mode()) + return false; if ((TM.getRelocationModel() == Reloc::PIC_) && TM.getSubtarget<MipsSubtarget>().isABI_O32() && F.getInfo<MipsFunctionInfo>()->globalBaseRegSet()) diff --git a/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp new file mode 100644 index 0000000..c6abf17 --- /dev/null +++ b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp @@ -0,0 +1,34 @@ +//===----------------------------------------------------------------------===// +// Instruction Selector Subtarget Control +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// This file defines a pass used to change the subtarget for the +// Mips Instruction selector. +// +//===----------------------------------------------------------------------===// + +#include "MipsISelDAGToDAG.h" +#include "MipsModuleISelDAGToDAG.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +namespace llvm { + +bool MipsModuleDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + DEBUG(errs() << "In MipsModuleDAGToDAGISel::runMachineFunction\n"); + const_cast<MipsSubtarget&>(Subtarget).resetSubtarget(&MF); + return false; +} + +char MipsModuleDAGToDAGISel::ID = 0; + +} + + +llvm::FunctionPass *llvm::createMipsModuleISelDag(MipsTargetMachine &TM) { + return new MipsModuleDAGToDAGISel(TM); +} + + diff --git a/lib/Target/Mips/MipsModuleISelDAGToDAG.h b/lib/Target/Mips/MipsModuleISelDAGToDAG.h new file mode 100644 index 0000000..fda35ae --- /dev/null +++ b/lib/Target/Mips/MipsModuleISelDAGToDAG.h @@ -0,0 +1,66 @@ +//===---- MipsModuleISelDAGToDAG.h - Change Subtarget --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a pass used to change the subtarget for the +// Mips Instruction selector. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSMODULEISELDAGTODAG_H +#define MIPSMODULEISELDAGTODAG_H + +#include "Mips.h" +#include "MipsSubtarget.h" +#include "MipsTargetMachine.h" +#include "llvm/CodeGen/SelectionDAGISel.h" + + +//===----------------------------------------------------------------------===// +// Instruction Selector Implementation +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MipsModuleDAGToDAGISel - MIPS specific code to select MIPS machine +// instructions for SelectionDAG operations. +//===----------------------------------------------------------------------===// +namespace llvm { + +class MipsModuleDAGToDAGISel : public MachineFunctionPass { +public: + + static char ID; + + explicit MipsModuleDAGToDAGISel(MipsTargetMachine &TM_) + : MachineFunctionPass(ID), + TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {} + + // Pass Name + virtual const char *getPassName() const { + return "MIPS DAG->DAG Pattern Instruction Selection"; + } + + virtual bool runOnMachineFunction(MachineFunction &MF); + + virtual SDNode *Select(SDNode *N) { + llvm_unreachable("unexpected"); + } + +protected: + /// Keep a pointer to the MipsSubtarget around so that we can make the right + /// decision when generating code for different targets. + const TargetMachine &TM; + const MipsSubtarget &Subtarget; +}; + +/// createMipsISelDag - This pass converts a legalized DAG into a +/// MIPS-specific DAG, ready for instruction scheduling. +FunctionPass *createMipsModuleISelDag(MipsTargetMachine &TM); +} + +#endif diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp new file mode 100644 index 0000000..1919077 --- /dev/null +++ b/lib/Target/Mips/MipsOs16.cpp @@ -0,0 +1,113 @@ +//===---- MipsOs16.cpp for Mips Option -Os16 --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an optimization phase for the MIPS target. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-os16" +#include "MipsOs16.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +namespace { + + // Figure out if we need float point based on the function signature. + // We need to move variables in and/or out of floating point + // registers because of the ABI + // + bool needsFPFromSig(Function &F) { + Type* RetType = F.getReturnType(); + switch (RetType->getTypeID()) { + case Type::FloatTyID: + case Type::DoubleTyID: + return true; + default: + ; + } + if (F.arg_size() >=1) { + Argument &Arg = F.getArgumentList().front(); + switch (Arg.getType()->getTypeID()) { + case Type::FloatTyID: + case Type::DoubleTyID: + return true; + default: + ; + } + } + return false; + } + + // Figure out if the function will need floating point operations + // + bool needsFP(Function &F) { + if (needsFPFromSig(F)) + return true; + for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); + I != E; ++I) { + const Instruction &Inst = *I; + switch (Inst.getOpcode()) { + case Instruction::FAdd: + case Instruction::FSub: + case Instruction::FMul: + case Instruction::FDiv: + case Instruction::FRem: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::FCmp: + return true; + default: + ; + } + if (const CallInst *CI = dyn_cast<CallInst>(I)) { + DEBUG(dbgs() << "Working on call" << "\n"); + Function &F_ = *CI->getCalledFunction(); + if (needsFPFromSig(F_)) + return true; + } + } + return false; + } +} +namespace llvm { + + +bool MipsOs16::runOnModule(Module &M) { + DEBUG(errs() << "Run on Module MipsOs16\n"); + bool modified = false; + for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { + if (F->isDeclaration()) continue; + DEBUG(dbgs() << "Working on " << F->getName() << "\n"); + if (needsFP(*F)) { + DEBUG(dbgs() << " need to compile as nomips16 \n"); + F->addFnAttr("nomips16"); + } + else { + F->addFnAttr("mips16"); + DEBUG(dbgs() << " no need to compile as nomips16 \n"); + } + } + return modified; +} + +char MipsOs16::ID = 0; + +} + +ModulePass *llvm::createMipsOs16(MipsTargetMachine &TM) { + return new MipsOs16; +} + + diff --git a/lib/Target/Mips/MipsOs16.h b/lib/Target/Mips/MipsOs16.h new file mode 100644 index 0000000..21beef8 --- /dev/null +++ b/lib/Target/Mips/MipsOs16.h @@ -0,0 +1,49 @@ +//===---- MipsOs16.h for Mips Option -Os16 --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an optimization phase for the MIPS target. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "MipsTargetMachine.h" +#include "llvm/Pass.h" +#include "llvm/Target/TargetMachine.h" + + + +#ifndef MIPSOS16_H +#define MIPSOS16_H + +using namespace llvm; + +namespace llvm { + +class MipsOs16 : public ModulePass { + +public: + static char ID; + + MipsOs16() : ModulePass(ID) { + + } + + virtual const char *getPassName() const { + return "MIPS Os16 Optimization"; + } + + virtual bool runOnModule(Module &M); + +}; + +ModulePass *createMipsOs16(MipsTargetMachine &TM); + +} + +#endif diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h index 3c210e7..5ed5124 100644 --- a/lib/Target/Mips/MipsRegisterInfo.h +++ b/lib/Target/Mips/MipsRegisterInfo.h @@ -68,6 +68,9 @@ public: unsigned getEHExceptionRegister() const; unsigned getEHHandlerRegister() const; + /// \brief Return GPR register class. + virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0; + private: virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td index 6d76e8a..865d4d7 100644 --- a/lib/Target/Mips/MipsRegisterInfo.td +++ b/lib/Target/Mips/MipsRegisterInfo.td @@ -58,6 +58,13 @@ class AFPR64<bits<16> Enc, string n, list<Register> subregs> let SubRegIndices = [sub_32]; } +// Accumulator Registers +class ACC<bits<16> Enc, string n, list<Register> subregs> + : MipsRegWithSubRegs<Enc, n, subregs> { + let SubRegIndices = [sub_lo, sub_hi]; + let CoveredBySubRegs = 1; +} + // Mips Hardware Registers class HWR<bits<16> Enc, string n> : MipsReg<Enc, n>; @@ -222,8 +229,14 @@ let Namespace = "Mips" in { def D31_64 : AFPR64<31, "f31", [F31]>, DwarfRegNum<[63]>; // Hi/Lo registers - def HI : Register<"hi">, DwarfRegNum<[64]>; - def LO : Register<"lo">, DwarfRegNum<[65]>; + def HI : Register<"ac0">, DwarfRegNum<[64]>; + def HI1 : Register<"ac1">, DwarfRegNum<[176]>; + def HI2 : Register<"ac2">, DwarfRegNum<[178]>; + def HI3 : Register<"ac3">, DwarfRegNum<[180]>; + def LO : Register<"ac0">, DwarfRegNum<[65]>; + def LO1 : Register<"ac1">, DwarfRegNum<[177]>; + def LO2 : Register<"ac2">, DwarfRegNum<[179]>; + def LO3 : Register<"ac3">, DwarfRegNum<[181]>; let SubRegIndices = [sub_32] in { def HI64 : RegisterWithSubRegs<"hi", [HI]>; @@ -244,13 +257,15 @@ let Namespace = "Mips" in { def HWR29_64 : MipsReg<29, "29">; // Accum registers - let SubRegIndices = [sub_lo, sub_hi] in - def AC0 : MipsRegWithSubRegs<0, "ac0", [LO, HI]>; - def AC1 : MipsReg<1, "ac1">; - def AC2 : MipsReg<2, "ac2">; - def AC3 : MipsReg<3, "ac3">; + def AC0 : ACC<0, "ac0", [LO, HI]>; + def AC1 : ACC<1, "ac1", [LO1, HI1]>; + def AC2 : ACC<2, "ac2", [LO2, HI2]>; + def AC3 : ACC<3, "ac3", [LO3, HI3]>; + + def AC0_64 : ACC<0, "ac0", [LO64, HI64]>; def DSPCtrl : Register<"dspctrl">; + def DSPCCond : Register<"">; } //===----------------------------------------------------------------------===// @@ -326,17 +341,33 @@ def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>; def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31,FCC0)>, Unallocatable; // Hi/Lo Registers -def HILO : RegisterClass<"Mips", [i32], 32, (add HI, LO)>, Unallocatable; -def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)>, Unallocatable; +def LORegs : RegisterClass<"Mips", [i32], 32, (add LO)>; +def HIRegs : RegisterClass<"Mips", [i32], 32, (add HI)>; +def LORegsDSP : RegisterClass<"Mips", [i32], 32, (add LO, LO1, LO2, LO3)>; +def HIRegsDSP : RegisterClass<"Mips", [i32], 32, (add HI, HI1, HI2, HI3)>; +def LORegs64 : RegisterClass<"Mips", [i64], 64, (add LO64)>; +def HIRegs64 : RegisterClass<"Mips", [i64], 64, (add HI64)>; // Hardware registers def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>, Unallocatable; def HWRegs64 : RegisterClass<"Mips", [i64], 64, (add HWR29_64)>, Unallocatable; // Accumulator Registers -def ACRegs : RegisterClass<"Mips", [i64], 64, (sequence "AC%u", 0, 3)>, - Unallocatable; +def ACRegs : RegisterClass<"Mips", [untyped], 64, (add AC0)> { + let Size = 64; +} + +def ACRegs128 : RegisterClass<"Mips", [untyped], 128, (add AC0_64)> { + let Size = 128; +} + +def ACRegsDSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> { + let Size = 64; +} + +def DSPCC : RegisterClass<"Mips", [v4i8, v2i16], 32, (add DSPCCond)>; +// Register Operands. def CPURegsAsmOperand : AsmOperandClass { let Name = "CPURegsAsm"; let ParserMethod = "parseCPURegs"; diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 0dd6713..2b76704 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -29,6 +29,166 @@ using namespace llvm; +namespace { +typedef MachineBasicBlock::iterator Iter; + +/// Helper class to expand pseudos. +class ExpandPseudo { +public: + ExpandPseudo(MachineFunction &MF); + bool expand(); + +private: + bool expandInstr(MachineBasicBlock &MBB, Iter I); + void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); + void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); + bool expandCopy(MachineBasicBlock &MBB, Iter I); + bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst, + unsigned Src, unsigned RegSize); + + MachineFunction &MF; + const MipsSEInstrInfo &TII; + const MipsRegisterInfo &RegInfo; + MachineRegisterInfo &MRI; +}; +} + +ExpandPseudo::ExpandPseudo(MachineFunction &MF_) + : MF(MF_), + TII(*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo())), + RegInfo(TII.getRegisterInfo()), MRI(MF.getRegInfo()) {} + +bool ExpandPseudo::expand() { + bool Expanded = false; + + for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); + BB != BBEnd; ++BB) + for (Iter I = BB->begin(), End = BB->end(); I != End;) + Expanded |= expandInstr(*BB, I++); + + return Expanded; +} + +bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { + switch(I->getOpcode()) { + case Mips::LOAD_AC64: + case Mips::LOAD_AC64_P8: + case Mips::LOAD_AC_DSP: + case Mips::LOAD_AC_DSP_P8: + expandLoadACC(MBB, I, 4); + break; + case Mips::LOAD_AC128: + case Mips::LOAD_AC128_P8: + expandLoadACC(MBB, I, 8); + break; + case Mips::STORE_AC64: + case Mips::STORE_AC64_P8: + case Mips::STORE_AC_DSP: + case Mips::STORE_AC_DSP_P8: + expandStoreACC(MBB, I, 4); + break; + case Mips::STORE_AC128: + case Mips::STORE_AC128_P8: + expandStoreACC(MBB, I, 8); + break; + case TargetOpcode::COPY: + if (!expandCopy(MBB, I)) + return false; + break; + default: + return false; + } + + MBB.erase(I); + return true; +} + +void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, + unsigned RegSize) { + // load $vr0, FI + // copy lo, $vr0 + // load $vr1, FI + 4 + // copy hi, $vr1 + + assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); + + const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); + unsigned VR0 = MRI.createVirtualRegister(RC); + unsigned VR1 = MRI.createVirtualRegister(RC); + unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); + unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); + DebugLoc DL = I->getDebugLoc(); + const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); + + TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); + BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); + TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); + BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); +} + +void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, + unsigned RegSize) { + // copy $vr0, lo + // store $vr0, FI + // copy $vr1, hi + // store $vr1, FI + 4 + + assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); + + const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); + unsigned VR0 = MRI.createVirtualRegister(RC); + unsigned VR1 = MRI.createVirtualRegister(RC); + unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); + unsigned Lo = RegInfo.getSubReg(Src, Mips::sub_lo); + unsigned Hi = RegInfo.getSubReg(Src, Mips::sub_hi); + DebugLoc DL = I->getDebugLoc(); + + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(Lo, SrcKill); + TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(Hi, SrcKill); + TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); +} + +bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { + unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); + + if (Mips::ACRegsDSPRegClass.contains(Dst, Src)) + return expandCopyACC(MBB, I, Dst, Src, 4); + + if (Mips::ACRegs128RegClass.contains(Dst, Src)) + return expandCopyACC(MBB, I, Dst, Src, 8); + + return false; +} + +bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst, + unsigned Src, unsigned RegSize) { + // copy $vr0, src_lo + // copy dst_lo, $vr0 + // copy $vr1, src_hi + // copy dst_hi, $vr1 + + const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); + unsigned VR0 = MRI.createVirtualRegister(RC); + unsigned VR1 = MRI.createVirtualRegister(RC); + unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); + unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); + unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); + unsigned SrcLo = RegInfo.getSubReg(Src, Mips::sub_lo); + unsigned SrcHi = RegInfo.getSubReg(Src, Mips::sub_hi); + DebugLoc DL = I->getDebugLoc(); + + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(SrcLo, SrcKill); + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) + .addReg(VR0, RegState::Kill); + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(SrcHi, SrcKill); + BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) + .addReg(VR1, RegState::Kill); + return true; +} + unsigned MipsSEFrameLowering::ehDataReg(unsigned I) const { static const unsigned EhDataReg[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 @@ -246,7 +406,10 @@ MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { // Reserve call frame if the size of the maximum call frame fits into 16-bit // immediate field and there are no variable sized objects on the stack. - return isInt<16>(MFI->getMaxCallFrameSize()) && !MFI->hasVarSizedObjects(); + // Make sure the second register scavenger spill slot can be accessed with one + // instruction. + return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && + !MFI->hasVarSizedObjects(); } // Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions @@ -284,6 +447,18 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, if (MipsFI->callsEhReturn()) MipsFI->createEhDataRegsFI(); + // Expand pseudo instructions which load, store or copy accumulators. + // Add an emergency spill slot if a pseudo was expanded. + if (ExpandPseudo(MF).expand()) { + // The spill slot should be half the size of the accumulator. If target is + // mips64, it should be 64-bit, otherwise it should be 32-bt. + const TargetRegisterClass *RC = STI.hasMips64() ? + &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), + RC->getAlignment(), false); + RS->addScavengingFrameIndex(FI); + } + // Set scavenging frame index if necessary. uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + estimateStackSize(MF); @@ -295,7 +470,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), RC->getAlignment(), false); - RS->setScavengingFrameIndex(FI); + RS->addScavengingFrameIndex(FI); } const MipsFrameLowering * diff --git a/lib/Target/Mips/MipsSEFrameLowering.h b/lib/Target/Mips/MipsSEFrameLowering.h index 7becd25..193a66c 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.h +++ b/lib/Target/Mips/MipsSEFrameLowering.h @@ -21,7 +21,7 @@ namespace llvm { class MipsSEFrameLowering : public MipsFrameLowering { public: explicit MipsSEFrameLowering(const MipsSubtarget &STI) - : MipsFrameLowering(STI) {} + : MipsFrameLowering(STI, STI.hasMips64() ? 16 : 8) {} /// emitProlog/emitEpilog - These methods insert prolog and epilog code into /// the function. diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index e22c3c8..b54f1f4 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -35,6 +35,11 @@ #include "llvm/Target/TargetMachine.h" using namespace llvm; +bool MipsSEDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + if (Subtarget.inMips16Mode()) + return false; + return MipsDAGToDAGISel::runOnMachineFunction(MF); +} bool MipsSEDAGToDAGISel::replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr& MI) { @@ -177,27 +182,6 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) { replaceUsesWithZeroReg(MRI, *I); } -/// Select multiply instructions. -std::pair<SDNode*, SDNode*> -MipsSEDAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, EVT Ty, - bool HasLo, bool HasHi) { - SDNode *Lo = 0, *Hi = 0; - SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0), - N->getOperand(1)); - SDValue InFlag = SDValue(Mul, 0); - - if (HasLo) { - unsigned Opcode = (Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64); - Lo = CurDAG->getMachineNode(Opcode, DL, Ty, MVT::Glue, InFlag); - InFlag = SDValue(Lo, 1); - } - if (HasHi) { - unsigned Opcode = (Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64); - Hi = CurDAG->getMachineNode(Opcode, DL, Ty, InFlag); - } - return std::make_pair(Lo, Hi); -} - SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS, DebugLoc DL, SDNode *Node) const { @@ -211,7 +195,7 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag, SDValue LHS = Node->getOperand(0), RHS = Node->getOperand(1); EVT VT = LHS.getValueType(); - SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, DL, VT, Ops, 2); + SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, DL, VT, Ops); SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, DL, VT, SDValue(Carry, 0), RHS); return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS, @@ -307,9 +291,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { // Instruction Selection not handled by the auto-generated // tablegen selection should be handled here. /// - EVT NodeTy = Node->getValueType(0); SDNode *Result; - unsigned MultOpc; switch(Opcode) { default: break; @@ -321,51 +303,13 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { } case ISD::ADDE: { + if (Subtarget.hasDSP()) // Select DSP instructions, ADDSC and ADDWC. + break; SDValue InFlag = Node->getOperand(2); Result = selectAddESubE(Mips::ADDu, InFlag, InFlag.getValue(0), DL, Node); return std::make_pair(true, Result); } - /// Mul with two results - case ISD::SMUL_LOHI: - case ISD::UMUL_LOHI: { - if (NodeTy == MVT::i32) - MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT); - else - MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::DMULTu : Mips::DMULT); - - std::pair<SDNode*, SDNode*> LoHi = selectMULT(Node, MultOpc, DL, NodeTy, - true, true); - - if (!SDValue(Node, 0).use_empty()) - ReplaceUses(SDValue(Node, 0), SDValue(LoHi.first, 0)); - - if (!SDValue(Node, 1).use_empty()) - ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0)); - - return std::make_pair(true, (SDNode*)NULL); - } - - /// Special Muls - case ISD::MUL: { - // Mips32 has a 32-bit three operand mul instruction. - if (Subtarget.hasMips32() && NodeTy == MVT::i32) - break; - MultOpc = NodeTy == MVT::i32 ? Mips::MULT : Mips::DMULT; - Result = selectMULT(Node, MultOpc, DL, NodeTy, true, false).first; - return std::make_pair(true, Result); - } - case ISD::MULHS: - case ISD::MULHU: { - if (NodeTy == MVT::i32) - MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT); - else - MultOpc = (Opcode == ISD::MULHU ? Mips::DMULTu : Mips::DMULT); - - Result = selectMULT(Node, MultOpc, DL, NodeTy, false, true).second; - return std::make_pair(true, Result); - } - case ISD::ConstantFP: { ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node); if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) { @@ -450,6 +394,19 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { ReplaceUses(SDValue(Node, 0), ResNode); return std::make_pair(true, ResNode.getNode()); } + + case MipsISD::InsertLOHI: { + unsigned RCID = Subtarget.hasDSP() ? Mips::ACRegsDSPRegClassID : + Mips::ACRegsRegClassID; + SDValue RegClass = CurDAG->getTargetConstant(RCID, MVT::i32); + SDValue LoIdx = CurDAG->getTargetConstant(Mips::sub_lo, MVT::i32); + SDValue HiIdx = CurDAG->getTargetConstant(Mips::sub_hi, MVT::i32); + const SDValue Ops[] = { RegClass, Node->getOperand(0), LoIdx, + Node->getOperand(1), HiIdx }; + SDNode *Res = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, + MVT::Untyped, Ops); + return std::make_pair(true, Res); + } } return std::make_pair(false, (SDNode*)NULL); diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h index 6137ab0..0dae73d 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.h +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h @@ -24,6 +24,9 @@ public: explicit MipsSEDAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {} private: + + virtual bool runOnMachineFunction(MachineFunction &MF); + bool replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&); std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc dl, diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index 287e2ed..8544bb8 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -15,6 +15,7 @@ #include "MipsTargetMachine.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Intrinsics.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetInstrInfo.h" @@ -27,6 +28,9 @@ EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden, MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) : MipsTargetLowering(TM) { // Set up the register classes + + clearRegisterClasses(); + addRegisterClass(MVT::i32, &Mips::CPURegsRegClass); if (HasMips64) @@ -42,12 +46,23 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) setOperationAction(Opc, VecTys[i], Expand); + setOperationAction(ISD::ADD, VecTys[i], Legal); + setOperationAction(ISD::SUB, VecTys[i], Legal); setOperationAction(ISD::LOAD, VecTys[i], Legal); setOperationAction(ISD::STORE, VecTys[i], Legal); setOperationAction(ISD::BITCAST, VecTys[i], Legal); } + + setTargetDAGCombine(ISD::SHL); + setTargetDAGCombine(ISD::SRA); + setTargetDAGCombine(ISD::SRL); + setTargetDAGCombine(ISD::SETCC); + setTargetDAGCombine(ISD::VSELECT); } + if (Subtarget->hasDSPR2()) + setOperationAction(ISD::MUL, MVT::v2i16, Legal); + if (!TM.Options.UseSoftFloat) { addRegisterClass(MVT::f32, &Mips::FGR32RegClass); @@ -60,11 +75,31 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) } } - setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); + setOperationAction(ISD::MULHS, MVT::i32, Custom); + setOperationAction(ISD::MULHU, MVT::i32, Custom); + + if (HasMips64) { + setOperationAction(ISD::MULHS, MVT::i64, Custom); + setOperationAction(ISD::MULHU, MVT::i64, Custom); + setOperationAction(ISD::MUL, MVT::i64, Custom); + } + + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); + + setOperationAction(ISD::SDIVREM, MVT::i32, Custom); + setOperationAction(ISD::UDIVREM, MVT::i32, Custom); + setOperationAction(ISD::SDIVREM, MVT::i64, Custom); + setOperationAction(ISD::UDIVREM, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); setOperationAction(ISD::LOAD, MVT::i32, Custom); setOperationAction(ISD::STORE, MVT::i32, Custom); + setTargetDAGCombine(ISD::ADDE); + setTargetDAGCombine(ISD::SUBE); + computeRegisterProperties(); } @@ -89,6 +124,334 @@ MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { } } +SDValue MipsSETargetLowering::LowerOperation(SDValue Op, + SelectionDAG &DAG) const { + switch(Op.getOpcode()) { + case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG); + case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG); + case ISD::MULHS: return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG); + case ISD::MULHU: return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG); + case ISD::MUL: return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG); + case ISD::SDIVREM: return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG); + case ISD::UDIVREM: return lowerMulDiv(Op, MipsISD::DivRemU, true, true, + DAG); + case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG); + } + + return MipsTargetLowering::LowerOperation(Op, DAG); +} + +// selectMADD - +// Transforms a subgraph in CurDAG if the following pattern is found: +// (addc multLo, Lo0), (adde multHi, Hi0), +// where, +// multHi/Lo: product of multiplication +// Lo0: initial value of Lo register +// Hi0: initial value of Hi register +// Return true if pattern matching was successful. +static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) { + // ADDENode's second operand must be a flag output of an ADDC node in order + // for the matching to be successful. + SDNode *ADDCNode = ADDENode->getOperand(2).getNode(); + + if (ADDCNode->getOpcode() != ISD::ADDC) + return false; + + SDValue MultHi = ADDENode->getOperand(0); + SDValue MultLo = ADDCNode->getOperand(0); + SDNode *MultNode = MultHi.getNode(); + unsigned MultOpc = MultHi.getOpcode(); + + // MultHi and MultLo must be generated by the same node, + if (MultLo.getNode() != MultNode) + return false; + + // and it must be a multiplication. + if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) + return false; + + // MultLo amd MultHi must be the first and second output of MultNode + // respectively. + if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) + return false; + + // Transform this to a MADD only if ADDENode and ADDCNode are the only users + // of the values of MultNode, in which case MultNode will be removed in later + // phases. + // If there exist users other than ADDENode or ADDCNode, this function returns + // here, which will result in MultNode being mapped to a single MULT + // instruction node rather than a pair of MULT and MADD instructions being + // produced. + if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) + return false; + + DebugLoc DL = ADDENode->getDebugLoc(); + + // Initialize accumulator. + SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, + ADDCNode->getOperand(1), + ADDENode->getOperand(1)); + + // create MipsMAdd(u) node + MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd; + + SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped, + MultNode->getOperand(0),// Factor 0 + MultNode->getOperand(1),// Factor 1 + ACCIn); + + // replace uses of adde and addc here + if (!SDValue(ADDCNode, 0).use_empty()) { + SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32); + SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd, + LoIdx); + CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut); + } + if (!SDValue(ADDENode, 0).use_empty()) { + SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32); + SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd, + HiIdx); + CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut); + } + + return true; +} + +// selectMSUB - +// Transforms a subgraph in CurDAG if the following pattern is found: +// (addc Lo0, multLo), (sube Hi0, multHi), +// where, +// multHi/Lo: product of multiplication +// Lo0: initial value of Lo register +// Hi0: initial value of Hi register +// Return true if pattern matching was successful. +static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) { + // SUBENode's second operand must be a flag output of an SUBC node in order + // for the matching to be successful. + SDNode *SUBCNode = SUBENode->getOperand(2).getNode(); + + if (SUBCNode->getOpcode() != ISD::SUBC) + return false; + + SDValue MultHi = SUBENode->getOperand(1); + SDValue MultLo = SUBCNode->getOperand(1); + SDNode *MultNode = MultHi.getNode(); + unsigned MultOpc = MultHi.getOpcode(); + + // MultHi and MultLo must be generated by the same node, + if (MultLo.getNode() != MultNode) + return false; + + // and it must be a multiplication. + if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) + return false; + + // MultLo amd MultHi must be the first and second output of MultNode + // respectively. + if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) + return false; + + // Transform this to a MSUB only if SUBENode and SUBCNode are the only users + // of the values of MultNode, in which case MultNode will be removed in later + // phases. + // If there exist users other than SUBENode or SUBCNode, this function returns + // here, which will result in MultNode being mapped to a single MULT + // instruction node rather than a pair of MULT and MSUB instructions being + // produced. + if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) + return false; + + DebugLoc DL = SUBENode->getDebugLoc(); + + // Initialize accumulator. + SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, + SUBCNode->getOperand(0), + SUBENode->getOperand(0)); + + // create MipsSub(u) node + MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub; + + SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue, + MultNode->getOperand(0),// Factor 0 + MultNode->getOperand(1),// Factor 1 + ACCIn); + + // replace uses of sube and subc here + if (!SDValue(SUBCNode, 0).use_empty()) { + SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32); + SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub, + LoIdx); + CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut); + } + if (!SDValue(SUBENode, 0).use_empty()) { + SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32); + SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub, + HiIdx); + CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut); + } + + return true; +} + +static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + if (DCI.isBeforeLegalize()) + return SDValue(); + + if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && + selectMADD(N, &DAG)) + return SDValue(N, 0); + + return SDValue(); +} + +static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + if (DCI.isBeforeLegalize()) + return SDValue(); + + if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && + selectMSUB(N, &DAG)) + return SDValue(N, 0); + + return SDValue(); +} + +static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, + SelectionDAG &DAG, + const MipsSubtarget *Subtarget) { + // See if this is a vector splat immediate node. + APInt SplatValue, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + unsigned EltSize = Ty.getVectorElementType().getSizeInBits(); + BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); + + if (!BV || + !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, + EltSize, !Subtarget->isLittle()) || + (SplatBitSize != EltSize) || + (SplatValue.getZExtValue() >= EltSize)) + return SDValue(); + + return DAG.getNode(Opc, N->getDebugLoc(), Ty, N->getOperand(0), + DAG.getConstant(SplatValue.getZExtValue(), MVT::i32)); +} + +static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + EVT Ty = N->getValueType(0); + + if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) + return SDValue(); + + return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget); +} + +static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + EVT Ty = N->getValueType(0); + + if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2())) + return SDValue(); + + return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget); +} + + +static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget *Subtarget) { + EVT Ty = N->getValueType(0); + + if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8)) + return SDValue(); + + return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget); +} + +static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) { + bool IsV216 = (Ty == MVT::v2i16); + + switch (CC) { + case ISD::SETEQ: + case ISD::SETNE: return true; + case ISD::SETLT: + case ISD::SETLE: + case ISD::SETGT: + case ISD::SETGE: return IsV216; + case ISD::SETULT: + case ISD::SETULE: + case ISD::SETUGT: + case ISD::SETUGE: return !IsV216; + default: return false; + } +} + +static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) { + EVT Ty = N->getValueType(0); + + if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) + return SDValue(); + + if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get())) + return SDValue(); + + return DAG.getNode(MipsISD::SETCC_DSP, N->getDebugLoc(), Ty, N->getOperand(0), + N->getOperand(1), N->getOperand(2)); +} + +static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) { + EVT Ty = N->getValueType(0); + + if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) + return SDValue(); + + SDValue SetCC = N->getOperand(0); + + if (SetCC.getOpcode() != MipsISD::SETCC_DSP) + return SDValue(); + + return DAG.getNode(MipsISD::SELECT_CC_DSP, N->getDebugLoc(), Ty, + SetCC.getOperand(0), SetCC.getOperand(1), N->getOperand(1), + N->getOperand(2), SetCC.getOperand(2)); +} + +SDValue +MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + SDValue Val; + + switch (N->getOpcode()) { + case ISD::ADDE: + return performADDECombine(N, DAG, DCI, Subtarget); + case ISD::SUBE: + return performSUBECombine(N, DAG, DCI, Subtarget); + case ISD::SHL: + return performSHLCombine(N, DAG, DCI, Subtarget); + case ISD::SRA: + return performSRACombine(N, DAG, DCI, Subtarget); + case ISD::SRL: + return performSRLCombine(N, DAG, DCI, Subtarget); + case ISD::VSELECT: + return performVSELECTCombine(N, DAG); + case ISD::SETCC: { + Val = performSETCCCombine(N, DAG); + break; + } + } + + if (Val.getNode()) + return Val; + + return MipsTargetLowering::PerformDAGCombine(N, DCI); +} + MachineBasicBlock * MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { @@ -133,6 +496,194 @@ getOpndList(SmallVectorImpl<SDValue> &Ops, InternalLinkage, CLI, Callee, Chain); } +SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc, + bool HasLo, bool HasHi, + SelectionDAG &DAG) const { + EVT Ty = Op.getOperand(0).getValueType(); + DebugLoc DL = Op.getDebugLoc(); + SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped, + Op.getOperand(0), Op.getOperand(1)); + SDValue Lo, Hi; + + if (HasLo) + Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult, + DAG.getConstant(Mips::sub_lo, MVT::i32)); + if (HasHi) + Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult, + DAG.getConstant(Mips::sub_hi, MVT::i32)); + + if (!HasLo || !HasHi) + return HasLo ? Lo : Hi; + + SDValue Vals[] = { Lo, Hi }; + return DAG.getMergeValues(Vals, 2, DL); +} + + +static SDValue initAccumulator(SDValue In, DebugLoc DL, SelectionDAG &DAG) { + SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, + DAG.getConstant(0, MVT::i32)); + SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, + DAG.getConstant(1, MVT::i32)); + return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi); +} + +static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) { + SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op, + DAG.getConstant(Mips::sub_lo, MVT::i32)); + SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op, + DAG.getConstant(Mips::sub_hi, MVT::i32)); + return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi); +} + +// This function expands mips intrinsic nodes which have 64-bit input operands +// or output values. +// +// out64 = intrinsic-node in64 +// => +// lo = copy (extract-element (in64, 0)) +// hi = copy (extract-element (in64, 1)) +// mips-specific-node +// v0 = copy lo +// v1 = copy hi +// out64 = merge-values (v0, v1) +// +static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { + DebugLoc DL = Op.getDebugLoc(); + bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; + SmallVector<SDValue, 3> Ops; + unsigned OpNo = 0; + + // See if Op has a chain input. + if (HasChainIn) + Ops.push_back(Op->getOperand(OpNo++)); + + // The next operand is the intrinsic opcode. + assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant); + + // See if the next operand has type i64. + SDValue Opnd = Op->getOperand(++OpNo), In64; + + if (Opnd.getValueType() == MVT::i64) + In64 = initAccumulator(Opnd, DL, DAG); + else + Ops.push_back(Opnd); + + // Push the remaining operands. + for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo) + Ops.push_back(Op->getOperand(OpNo)); + + // Add In64 to the end of the list. + if (In64.getNode()) + Ops.push_back(In64); + + // Scan output. + SmallVector<EVT, 2> ResTys; + + for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end(); + I != E; ++I) + ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I); + + // Create node. + SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size()); + SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val; + + if (!HasChainIn) + return Out; + + assert(Val->getValueType(1) == MVT::Other); + SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) }; + return DAG.getMergeValues(Vals, 2, DL); +} + +SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) { + default: + return SDValue(); + case Intrinsic::mips_shilo: + return lowerDSPIntr(Op, DAG, MipsISD::SHILO); + case Intrinsic::mips_dpau_h_qbl: + return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL); + case Intrinsic::mips_dpau_h_qbr: + return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR); + case Intrinsic::mips_dpsu_h_qbl: + return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL); + case Intrinsic::mips_dpsu_h_qbr: + return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR); + case Intrinsic::mips_dpa_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH); + case Intrinsic::mips_dps_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH); + case Intrinsic::mips_dpax_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH); + case Intrinsic::mips_dpsx_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH); + case Intrinsic::mips_mulsa_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH); + case Intrinsic::mips_mult: + return lowerDSPIntr(Op, DAG, MipsISD::Mult); + case Intrinsic::mips_multu: + return lowerDSPIntr(Op, DAG, MipsISD::Multu); + case Intrinsic::mips_madd: + return lowerDSPIntr(Op, DAG, MipsISD::MAdd); + case Intrinsic::mips_maddu: + return lowerDSPIntr(Op, DAG, MipsISD::MAddu); + case Intrinsic::mips_msub: + return lowerDSPIntr(Op, DAG, MipsISD::MSub); + case Intrinsic::mips_msubu: + return lowerDSPIntr(Op, DAG, MipsISD::MSubu); + } +} + +SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) { + default: + return SDValue(); + case Intrinsic::mips_extp: + return lowerDSPIntr(Op, DAG, MipsISD::EXTP); + case Intrinsic::mips_extpdp: + return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP); + case Intrinsic::mips_extr_w: + return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W); + case Intrinsic::mips_extr_r_w: + return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W); + case Intrinsic::mips_extr_rs_w: + return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W); + case Intrinsic::mips_extr_s_h: + return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H); + case Intrinsic::mips_mthlip: + return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP); + case Intrinsic::mips_mulsaq_s_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH); + case Intrinsic::mips_maq_s_w_phl: + return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL); + case Intrinsic::mips_maq_s_w_phr: + return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR); + case Intrinsic::mips_maq_sa_w_phl: + return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL); + case Intrinsic::mips_maq_sa_w_phr: + return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR); + case Intrinsic::mips_dpaq_s_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH); + case Intrinsic::mips_dpsq_s_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH); + case Intrinsic::mips_dpaq_sa_l_w: + return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W); + case Intrinsic::mips_dpsq_sa_l_w: + return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W); + case Intrinsic::mips_dpaqx_s_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH); + case Intrinsic::mips_dpaqx_sa_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH); + case Intrinsic::mips_dpsqx_s_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH); + case Intrinsic::mips_dpsqx_sa_w_ph: + return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH); + } +} + MachineBasicBlock * MipsSETargetLowering:: emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ // $bb: diff --git a/lib/Target/Mips/MipsSEISelLowering.h b/lib/Target/Mips/MipsSEISelLowering.h index 04a28ce..ec8a5c7 100644 --- a/lib/Target/Mips/MipsSEISelLowering.h +++ b/lib/Target/Mips/MipsSEISelLowering.h @@ -15,6 +15,7 @@ #define MipsSEISELLOWERING_H #include "MipsISelLowering.h" +#include "MipsRegisterInfo.h" namespace llvm { class MipsSETargetLowering : public MipsTargetLowering { @@ -23,9 +24,26 @@ namespace llvm { virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const; + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; + + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; + virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, + EVT VT) const { + return false; + } + + virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { + if (VT == MVT::Untyped) + return Subtarget->hasDSP() ? &Mips::ACRegsDSPRegClass : + &Mips::ACRegsRegClass; + + return TargetLowering::getRepRegClassFor(VT); + } + private: virtual bool isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo, @@ -38,6 +56,12 @@ namespace llvm { bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const; + SDValue lowerMulDiv(SDValue Op, unsigned NewOpc, bool HasLo, bool HasHi, + SelectionDAG &DAG) const; + + SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; + MachineBasicBlock *emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const; }; diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index a9809ef..2e7048d 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -95,20 +95,28 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = Mips::CFC1; else if (Mips::FGR32RegClass.contains(SrcReg)) Opc = Mips::MFC1; - else if (SrcReg == Mips::HI) + else if (Mips::HIRegsRegClass.contains(SrcReg)) Opc = Mips::MFHI, SrcReg = 0; - else if (SrcReg == Mips::LO) + else if (Mips::LORegsRegClass.contains(SrcReg)) Opc = Mips::MFLO, SrcReg = 0; + else if (Mips::HIRegsDSPRegClass.contains(SrcReg)) + Opc = Mips::MFHI_DSP; + else if (Mips::LORegsDSPRegClass.contains(SrcReg)) + Opc = Mips::MFLO_DSP; } else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg. if (Mips::CCRRegClass.contains(DestReg)) Opc = Mips::CTC1; else if (Mips::FGR32RegClass.contains(DestReg)) Opc = Mips::MTC1; - else if (DestReg == Mips::HI) + else if (Mips::HIRegsRegClass.contains(DestReg)) Opc = Mips::MTHI, DestReg = 0; - else if (DestReg == Mips::LO) + else if (Mips::LORegsRegClass.contains(DestReg)) Opc = Mips::MTLO, DestReg = 0; + else if (Mips::HIRegsDSPRegClass.contains(DestReg)) + Opc = Mips::MTHI_DSP; + else if (Mips::LORegsDSPRegClass.contains(DestReg)) + Opc = Mips::MTLO_DSP; } else if (Mips::FGR32RegClass.contains(DestReg, SrcReg)) Opc = Mips::FMOV_S; @@ -121,17 +129,17 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg. if (Mips::CPU64RegsRegClass.contains(SrcReg)) Opc = Mips::OR64, ZeroReg = Mips::ZERO_64; - else if (SrcReg == Mips::HI64) + else if (Mips::HIRegs64RegClass.contains(SrcReg)) Opc = Mips::MFHI64, SrcReg = 0; - else if (SrcReg == Mips::LO64) + else if (Mips::LORegs64RegClass.contains(SrcReg)) Opc = Mips::MFLO64, SrcReg = 0; else if (Mips::FGR64RegClass.contains(SrcReg)) Opc = Mips::DMFC1; } else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg. - if (DestReg == Mips::HI64) + if (Mips::HIRegs64RegClass.contains(DestReg)) Opc = Mips::MTHI64, DestReg = 0; - else if (DestReg == Mips::LO64) + else if (Mips::LORegs64RegClass.contains(DestReg)) Opc = Mips::MTLO64, DestReg = 0; else if (Mips::FGR64RegClass.contains(DestReg)) Opc = Mips::DMTC1; @@ -152,10 +160,10 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } void MipsSEInstrInfo:: -storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned SrcReg, bool isKill, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { +storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, int FI, + const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, + int64_t Offset) const { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore); @@ -166,6 +174,12 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = IsN64 ? Mips::SW_P8 : Mips::SW; else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::SD_P8 : Mips::SD; + else if (Mips::ACRegsRegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::STORE_AC64_P8 : Mips::STORE_AC64; + else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::STORE_AC_DSP_P8 : Mips::STORE_AC_DSP; + else if (Mips::ACRegs128RegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::STORE_AC128_P8 : Mips::STORE_AC128; else if (Mips::FGR32RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1; else if (Mips::AFGR64RegClass.hasSubClassEq(RC)) @@ -175,15 +189,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, assert(Opc && "Register class not handled!"); BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) - .addFrameIndex(FI).addImm(0).addMemOperand(MMO); + .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); } void MipsSEInstrInfo:: -loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, int FI, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const -{ +loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DestReg, int FI, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, int64_t Offset) const { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); @@ -193,6 +205,12 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = IsN64 ? Mips::LW_P8 : Mips::LW; else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::LD_P8 : Mips::LD; + else if (Mips::ACRegsRegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::LOAD_AC64_P8 : Mips::LOAD_AC64; + else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::LOAD_AC_DSP_P8 : Mips::LOAD_AC_DSP; + else if (Mips::ACRegs128RegClass.hasSubClassEq(RC)) + Opc = IsN64 ? Mips::LOAD_AC128_P8 : Mips::LOAD_AC128; else if (Mips::FGR32RegClass.hasSubClassEq(RC)) Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1; else if (Mips::AFGR64RegClass.hasSubClassEq(RC)) @@ -201,7 +219,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164; assert(Opc && "Register class not handled!"); - BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0) + BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset) .addMemOperand(MMO); } @@ -371,6 +389,7 @@ void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB, unsigned JR = STI.isABI_N64() ? Mips::JR64 : Mips::JR; unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; unsigned RA = STI.isABI_N64() ? Mips::RA_64 : Mips::RA; + unsigned T9 = STI.isABI_N64() ? Mips::T9_64 : Mips::T9; unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; unsigned OffsetReg = I->getOperand(0).getReg(); unsigned TargetReg = I->getOperand(1).getReg(); @@ -378,6 +397,9 @@ void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB, // or $ra, $v0, $zero // addu $sp, $sp, $v1 // jr $ra + if (TM.getRelocationModel() == Reloc::PIC_) + BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), T9) + .addReg(TargetReg).addReg(ZERO); BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), RA) .addReg(TargetReg).addReg(ZERO); BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP) diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h index 3e22b33..0bf7876 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.h +++ b/lib/Target/Mips/MipsSEInstrInfo.h @@ -49,17 +49,19 @@ public: unsigned DestReg, unsigned SrcReg, bool KillSrc) const; - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; + virtual void storeRegToStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const; + + virtual void loadRegFromStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + int64_t Offset) const; virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const; diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp index a39b393..9696738 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -54,6 +54,15 @@ requiresFrameIndexScavenging(const MachineFunction &MF) const { return true; } +const TargetRegisterClass * +MipsSERegisterInfo::intRegClass(unsigned Size) const { + if (Size == 4) + return &Mips::CPURegsRegClass; + + assert(Size == 8); + return &Mips::CPU64RegsRegClass; +} + void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/lib/Target/Mips/MipsSERegisterInfo.h b/lib/Target/Mips/MipsSERegisterInfo.h index f6827e9..2f7c37b 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.h +++ b/lib/Target/Mips/MipsSERegisterInfo.h @@ -31,6 +31,8 @@ public: bool requiresFrameIndexScavenging(const MachineFunction &MF) const; + virtual const TargetRegisterClass *intRegClass(unsigned Size) const; + private: virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp index e11e5d1..14a2b27 100644 --- a/lib/Target/Mips/MipsSubtarget.cpp +++ b/lib/Target/Mips/MipsSubtarget.cpp @@ -11,29 +11,56 @@ // //===----------------------------------------------------------------------===// +#define DEBUG_TYPE "mips-subtarget" + +#include "MipsMachineFunction.h" #include "MipsSubtarget.h" +#include "MipsTargetMachine.h" #include "Mips.h" #include "MipsRegisterInfo.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" #define GET_SUBTARGETINFO_TARGET_DESC #define GET_SUBTARGETINFO_CTOR #include "MipsGenSubtargetInfo.inc" + using namespace llvm; +// FIXME: Maybe this should be on by default when Mips16 is specified +// +static cl::opt<bool> Mixed16_32( + "mips-mixed-16-32", + cl::init(false), + cl::desc("Allow for a mixture of Mips16 " + "and Mips32 code in a single source file"), + cl::Hidden); + +static cl::opt<bool> Mips_Os16( + "mips-os16", + cl::init(false), + cl::desc("Compile all functions that don' use " + "floating point as Mips 16"), + cl::Hidden); + void MipsSubtarget::anchor() { } MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool little, - Reloc::Model _RM) : + Reloc::Model _RM, MipsTargetMachine *_TM) : MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little), IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false), IsLinux(true), HasSEInReg(false), HasCondMov(false), HasSwap(false), HasBitCount(false), HasFPIdx(false), InMips16Mode(false), InMicroMipsMode(false), HasDSP(false), HasDSPR2(false), - RM(_RM) + AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16), + RM(_RM), OverrideMode(NoOverride), TM(_TM) { std::string CPUName = CPU; if (CPUName.empty()) @@ -42,6 +69,8 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU, // Parse features string. ParseSubtargetFeatures(CPUName, FS); + PreviousInMips16Mode = InMips16Mode; + // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUName); @@ -72,3 +101,48 @@ MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel, &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass); return OptLevel >= CodeGenOpt::Aggressive; } + +//FIXME: This logic for reseting the subtarget along with +// the helper classes can probably be simplified but there are a lot of +// cases so we will defer rewriting this to later. +// +void MipsSubtarget::resetSubtarget(MachineFunction *MF) { + bool ChangeToMips16 = false, ChangeToNoMips16 = false; + DEBUG(dbgs() << "resetSubtargetFeatures" << "\n"); + AttributeSet FnAttrs = MF->getFunction()->getAttributes(); + ChangeToMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, + "mips16"); + ChangeToNoMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, + "nomips16"); + assert (!(ChangeToMips16 & ChangeToNoMips16) && + "mips16 and nomips16 specified on the same function"); + if (ChangeToMips16) { + if (PreviousInMips16Mode) + return; + OverrideMode = Mips16Override; + PreviousInMips16Mode = true; + TM->setHelperClassesMips16(); + return; + } else if (ChangeToNoMips16) { + if (!PreviousInMips16Mode) + return; + OverrideMode = NoMips16Override; + PreviousInMips16Mode = false; + TM->setHelperClassesMipsSE(); + return; + } else { + if (OverrideMode == NoOverride) + return; + OverrideMode = NoOverride; + DEBUG(dbgs() << "back to default" << "\n"); + if (inMips16Mode() && !PreviousInMips16Mode) { + TM->setHelperClassesMips16(); + PreviousInMips16Mode = true; + } else if (!inMips16Mode() && PreviousInMips16Mode) { + TM->setHelperClassesMipsSE(); + PreviousInMips16Mode = false; + } + return; + } +} + diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h index 7a2e47c..f2f0e15 100644 --- a/lib/Target/Mips/MipsSubtarget.h +++ b/lib/Target/Mips/MipsSubtarget.h @@ -16,7 +16,9 @@ #include "MCTargetDesc/MipsReginfo.h" #include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetSubtargetInfo.h" + #include <string> #define GET_SUBTARGETINFO_HEADER @@ -25,6 +27,8 @@ namespace llvm { class StringRef; +class MipsTargetMachine; + class MipsSubtarget : public MipsGenSubtargetInfo { virtual void anchor(); @@ -89,12 +93,23 @@ protected: // InMips16 -- can process Mips16 instructions bool InMips16Mode; + // PreviousInMips16 -- the function we just processed was in Mips 16 Mode + bool PreviousInMips16Mode; + // InMicroMips -- can process MicroMips instructions bool InMicroMipsMode; // HasDSP, HasDSPR2 -- supports DSP ASE. bool HasDSP, HasDSPR2; + // Allow mixed Mips16 and Mips32 in one source file + bool AllowMixed16_32; + + // Optimize for space by compiling all functions as Mips 16 unless + // it needs floating point. Functions needing floating point are + // compiled as Mips32 + bool Os16; + InstrItineraryData InstrItins; // The instance to the register info section object @@ -103,6 +118,12 @@ protected: // Relocation Model Reloc::Model RM; + // We can override the determination of whether we are in mips16 mode + // as from the command line + enum {NoOverride, Mips16Override, NoMips16Override} OverrideMode; + + MipsTargetMachine *TM; + public: virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, AntiDepBreakMode& Mode, @@ -118,7 +139,8 @@ public: /// This constructor initializes the data members to match that /// of the specified triple. MipsSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS, bool little, Reloc::Model RM); + const std::string &FS, bool little, Reloc::Model RM, + MipsTargetMachine *TM); /// ParseSubtargetFeatures - Parses features string setting specified /// subtarget options. Definition of function is auto generated by tblgen. @@ -137,7 +159,20 @@ public: bool isSingleFloat() const { return IsSingleFloat; } bool isNotSingleFloat() const { return !IsSingleFloat; } bool hasVFPU() const { return HasVFPU; } - bool inMips16Mode() const { return InMips16Mode; } + bool inMips16Mode() const { + switch (OverrideMode) { + case NoOverride: + return InMips16Mode; + case Mips16Override: + return true; + case NoMips16Override: + return false; + } + llvm_unreachable("Unexpected mode"); + } + bool inMips16ModeDefault() { + return InMips16Mode; + } bool inMicroMipsMode() const { return InMicroMipsMode; } bool hasDSP() const { return HasDSP; } bool hasDSPR2() const { return HasDSPR2; } @@ -153,11 +188,20 @@ public: bool hasBitCount() const { return HasBitCount; } bool hasFPIdx() const { return HasFPIdx; } + bool allowMixed16_32() const { return AllowMixed16_32;}; + + bool os16() const { return Os16;}; + // Grab MipsRegInfo object const MipsReginfo &getMReginfo() const { return MRI; } // Grab relocation model Reloc::Model getRelocationModel() const {return RM;} + + /// \brief Reset the subtarget for the Mips target. + void resetSubtarget(MachineFunction *MF); + + }; } // End llvm namespace diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 3336358..ee28e2a 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -15,11 +15,26 @@ #include "Mips.h" #include "MipsFrameLowering.h" #include "MipsInstrInfo.h" +#include "MipsModuleISelDAGToDAG.h" +#include "MipsOs16.h" +#include "MipsSEFrameLowering.h" +#include "MipsSEInstrInfo.h" +#include "MipsSEISelLowering.h" +#include "MipsSEISelDAGToDAG.h" +#include "Mips16FrameLowering.h" +#include "Mips16InstrInfo.h" +#include "Mips16ISelDAGToDAG.h" +#include "Mips16ISelLowering.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/PassManager.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Support/TargetRegistry.h" using namespace llvm; + + extern "C" void LLVMInitializeMipsTarget() { // Register the target. RegisterTargetMachine<MipsebTargetMachine> X(TheMipsTarget); @@ -42,7 +57,7 @@ MipsTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL, bool isLittle) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS, isLittle, RM), + Subtarget(TT, CPU, FS, isLittle, RM, this), DL(isLittle ? (Subtarget.isABI_N64() ? "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-" @@ -54,9 +69,46 @@ MipsTargetMachine(const Target &T, StringRef TT, "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64")), InstrInfo(MipsInstrInfo::create(*this)), FrameLowering(MipsFrameLowering::create(*this, Subtarget)), - TLInfo(MipsTargetLowering::create(*this)), TSInfo(*this), JITInfo() { + TLInfo(MipsTargetLowering::create(*this)), + TSInfo(*this), JITInfo() { +} + + +void MipsTargetMachine::setHelperClassesMips16() { + InstrInfoSE.swap(InstrInfo); + FrameLoweringSE.swap(FrameLowering); + TLInfoSE.swap(TLInfo); + if (!InstrInfo16) { + InstrInfo.reset(MipsInstrInfo::create(*this)); + FrameLowering.reset(MipsFrameLowering::create(*this, Subtarget)); + TLInfo.reset(MipsTargetLowering::create(*this)); + } else { + InstrInfo16.swap(InstrInfo); + FrameLowering16.swap(FrameLowering); + TLInfo16.swap(TLInfo); + } + assert(TLInfo && "null target lowering 16"); + assert(InstrInfo && "null instr info 16"); + assert(FrameLowering && "null frame lowering 16"); } +void MipsTargetMachine::setHelperClassesMipsSE() { + InstrInfo16.swap(InstrInfo); + FrameLowering16.swap(FrameLowering); + TLInfo16.swap(TLInfo); + if (!InstrInfoSE) { + InstrInfo.reset(MipsInstrInfo::create(*this)); + FrameLowering.reset(MipsFrameLowering::create(*this, Subtarget)); + TLInfo.reset(MipsTargetLowering::create(*this)); + } else { + InstrInfoSE.swap(InstrInfo); + FrameLoweringSE.swap(FrameLowering); + TLInfoSE.swap(TLInfo); + } + assert(TLInfo && "null target lowering in SE"); + assert(InstrInfo && "null instr info SE"); + assert(FrameLowering && "null frame lowering SE"); +} void MipsebTargetMachine::anchor() { } MipsebTargetMachine:: @@ -90,6 +142,7 @@ public: return *getMipsTargetMachine().getSubtargetImpl(); } + virtual void addIRPasses(); virtual bool addInstSelector(); virtual bool addPreEmitPass(); }; @@ -99,24 +152,50 @@ TargetPassConfig *MipsTargetMachine::createPassConfig(PassManagerBase &PM) { return new MipsPassConfig(this, PM); } +void MipsPassConfig::addIRPasses() { + TargetPassConfig::addIRPasses(); + if (getMipsSubtarget().os16()) + addPass(createMipsOs16(getMipsTargetMachine())); +} // Install an instruction selector pass using // the ISelDag to gen Mips code. bool MipsPassConfig::addInstSelector() { - addPass(createMipsISelDag(getMipsTargetMachine())); + if (getMipsSubtarget().allowMixed16_32()) { + addPass(createMipsModuleISelDag(getMipsTargetMachine())); + addPass(createMips16ISelDag(getMipsTargetMachine())); + addPass(createMipsSEISelDag(getMipsTargetMachine())); + } else { + addPass(createMipsISelDag(getMipsTargetMachine())); + } return false; } +void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) { + if (Subtarget.allowMixed16_32()) { + DEBUG(errs() << "No "); + //FIXME: The Basic Target Transform Info + // pass needs to become a function pass instead of + // being an immutable pass and then this method as it exists now + // would be unnecessary. + PM.add(createNoTargetTransformInfoPass()); + } else + LLVMTargetMachine::addAnalysisPasses(PM); + DEBUG(errs() << "Target Transform Info Pass Added\n"); +} + // Implemented by targets that want to run passes immediately before // machine code is emitted. return true if -print-machineinstrs should // print out the code after the passes. bool MipsPassConfig::addPreEmitPass() { MipsTargetMachine &TM = getMipsTargetMachine(); + const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>(); addPass(createMipsDelaySlotFillerPass(TM)); - // NOTE: long branch has not been implemented for mips16. - if (TM.getSubtarget<MipsSubtarget>().hasStandardEncoding()) + if (Subtarget.hasStandardEncoding() || + Subtarget.allowMixed16_32()) addPass(createMipsLongBranchPass(TM)); - if (TM.getSubtarget<MipsSubtarget>().inMips16Mode()) + if (Subtarget.inMips16Mode() || + Subtarget.allowMixed16_32()) addPass(createMipsConstantIslandPass(TM)); return true; diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h index 7e5f192..ee55708 100644 --- a/lib/Target/Mips/MipsTargetMachine.h +++ b/lib/Target/Mips/MipsTargetMachine.h @@ -21,6 +21,8 @@ #include "MipsSelectionDAGInfo.h" #include "MipsSubtarget.h" #include "llvm/ADT/OwningPtr.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/IR/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" @@ -35,6 +37,12 @@ class MipsTargetMachine : public LLVMTargetMachine { OwningPtr<const MipsInstrInfo> InstrInfo; OwningPtr<const MipsFrameLowering> FrameLowering; OwningPtr<const MipsTargetLowering> TLInfo; + OwningPtr<const MipsInstrInfo> InstrInfo16; + OwningPtr<const MipsFrameLowering> FrameLowering16; + OwningPtr<const MipsTargetLowering> TLInfo16; + OwningPtr<const MipsInstrInfo> InstrInfoSE; + OwningPtr<const MipsFrameLowering> FrameLoweringSE; + OwningPtr<const MipsTargetLowering> TLInfoSE; MipsSelectionDAGInfo TSInfo; MipsJITInfo JITInfo; @@ -47,6 +55,8 @@ public: virtual ~MipsTargetMachine() {} + virtual void addAnalysisPasses(PassManagerBase &PM); + virtual const MipsInstrInfo *getInstrInfo() const { return InstrInfo.get(); } virtual const TargetFrameLowering *getFrameLowering() const @@ -73,6 +83,13 @@ public: // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE); + + // Set helper classes + void setHelperClassesMips16(); + + void setHelperClassesMipsSE(); + + }; /// MipsebTargetMachine - Mips32/64 big endian target machine. diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt index 47baef6..7da2fed 100644 --- a/lib/Target/NVPTX/CMakeLists.txt +++ b/lib/Target/NVPTX/CMakeLists.txt @@ -22,6 +22,7 @@ set(NVPTXCodeGen_sources NVPTXAllocaHoisting.cpp NVPTXAsmPrinter.cpp NVPTXUtilities.cpp + NVVMReflect.cpp ) add_llvm_target(NVPTXCodeGen ${NVPTXCodeGen_sources}) diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h index 4545838..b3e8b5d 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h @@ -52,25 +52,24 @@ enum PropertyAnnotation { }; const unsigned AnnotationNameLen = 8; // length of each annotation name -const char -PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = { - "maxntidx", // PROPERTY_MAXNTID_X - "maxntidy", // PROPERTY_MAXNTID_Y - "maxntidz", // PROPERTY_MAXNTID_Z - "reqntidx", // PROPERTY_REQNTID_X - "reqntidy", // PROPERTY_REQNTID_Y - "reqntidz", // PROPERTY_REQNTID_Z - "minctasm", // PROPERTY_MINNCTAPERSM - "texture", // PROPERTY_ISTEXTURE - "surface", // PROPERTY_ISSURFACE - "sampler", // PROPERTY_ISSAMPLER - "rdoimage", // PROPERTY_ISREADONLY_IMAGE_PARAM - "wroimage", // PROPERTY_ISWRITEONLY_IMAGE_PARAM - "kernel", // PROPERTY_ISKERNEL_FUNCTION - "align", // PROPERTY_ALIGN +const char PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = { + "maxntidx", // PROPERTY_MAXNTID_X + "maxntidy", // PROPERTY_MAXNTID_Y + "maxntidz", // PROPERTY_MAXNTID_Z + "reqntidx", // PROPERTY_REQNTID_X + "reqntidy", // PROPERTY_REQNTID_Y + "reqntidz", // PROPERTY_REQNTID_Z + "minctasm", // PROPERTY_MINNCTAPERSM + "texture", // PROPERTY_ISTEXTURE + "surface", // PROPERTY_ISSURFACE + "sampler", // PROPERTY_ISSAMPLER + "rdoimage", // PROPERTY_ISREADONLY_IMAGE_PARAM + "wroimage", // PROPERTY_ISWRITEONLY_IMAGE_PARAM + "kernel", // PROPERTY_ISKERNEL_FUNCTION + "align", // PROPERTY_ALIGN - // last property - "proplast", // PROPERTY_LAST + // last property + "proplast", // PROPERTY_LAST }; // name of named metadata used for global annotations @@ -80,9 +79,8 @@ PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = { // compiling those .cpp files, hence __attribute__((unused)). __attribute__((unused)) #endif -static const char* NamedMDForAnnotations = "nvvm.annotations"; + static const char *NamedMDForAnnotations = "nvvm.annotations"; } - #endif diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp index 6191819..459cd96 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp @@ -23,10 +23,9 @@ bool CompileForDebugging; // compile for debugging static cl::opt<bool, true> Debug("debug-compile", cl::desc("Compile for debugging"), cl::Hidden, - cl::location(CompileForDebugging), - cl::init(false)); + cl::location(CompileForDebugging), cl::init(false)); -void NVPTXMCAsmInfo::anchor() { } +void NVPTXMCAsmInfo::anchor() {} NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) { Triple TheTriple(TT); @@ -55,7 +54,7 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) { Data32bitsDirective = " .b32 "; Data64bitsDirective = " .b64 "; PrivateGlobalPrefix = ""; - ZeroDirective = " .b8"; + ZeroDirective = " .b8"; AsciiDirective = " .b8"; AscizDirective = " .b8"; diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp index 44aa01c..ccd2970 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp @@ -28,7 +28,6 @@ #define GET_REGINFO_MC_DESC #include "NVPTXGenRegisterInfo.inc" - using namespace llvm; static MCInstrInfo *createNVPTXMCInstrInfo() { @@ -44,22 +43,20 @@ static MCRegisterInfo *createNVPTXMCRegisterInfo(StringRef TT) { return X; } -static MCSubtargetInfo *createNVPTXMCSubtargetInfo(StringRef TT, StringRef CPU, - StringRef FS) { +static MCSubtargetInfo * +createNVPTXMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) { MCSubtargetInfo *X = new MCSubtargetInfo(); InitNVPTXMCSubtargetInfo(X, TT, CPU, FS); return X; } -static MCCodeGenInfo *createNVPTXMCCodeGenInfo(StringRef TT, Reloc::Model RM, - CodeModel::Model CM, - CodeGenOpt::Level OL) { +static MCCodeGenInfo *createNVPTXMCCodeGenInfo( + StringRef TT, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) { MCCodeGenInfo *X = new MCCodeGenInfo(); X->InitMCCodeGenInfo(RM, CM, OL); return X; } - // Force static initialization. extern "C" void LLVMInitializeNVPTXTargetMC() { // Register the MC asm info. diff --git a/lib/Target/NVPTX/ManagedStringPool.h b/lib/Target/NVPTX/ManagedStringPool.h index b568488..d6c79b5 100644 --- a/lib/Target/NVPTX/ManagedStringPool.h +++ b/lib/Target/NVPTX/ManagedStringPool.h @@ -12,7 +12,6 @@ // //===----------------------------------------------------------------------===// - #ifndef LLVM_SUPPORT_MANAGED_STRING_H #define LLVM_SUPPORT_MANAGED_STRING_H diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h index b46ea88..6a53a44 100644 --- a/lib/Target/NVPTX/NVPTX.h +++ b/lib/Target/NVPTX/NVPTX.h @@ -41,18 +41,24 @@ enum CondCodes { inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) { switch (CC) { - case NVPTXCC::NE: return "ne"; - case NVPTXCC::EQ: return "eq"; - case NVPTXCC::LT: return "lt"; - case NVPTXCC::LE: return "le"; - case NVPTXCC::GT: return "gt"; - case NVPTXCC::GE: return "ge"; + case NVPTXCC::NE: + return "ne"; + case NVPTXCC::EQ: + return "eq"; + case NVPTXCC::LT: + return "lt"; + case NVPTXCC::LE: + return "le"; + case NVPTXCC::GT: + return "gt"; + case NVPTXCC::GE: + return "ge"; } llvm_unreachable("Unknown condition code"); } -FunctionPass *createNVPTXISelDag(NVPTXTargetMachine &TM, - llvm::CodeGenOpt::Level OptLevel); +FunctionPass * +createNVPTXISelDag(NVPTXTargetMachine &TM, llvm::CodeGenOpt::Level OptLevel); FunctionPass *createLowerStructArgsPass(NVPTXTargetMachine &); FunctionPass *createNVPTXReMatPass(NVPTXTargetMachine &); FunctionPass *createNVPTXReMatBlockPass(NVPTXTargetMachine &); @@ -62,8 +68,7 @@ bool isImageOrSamplerVal(const Value *, const Module *); extern Target TheNVPTXTarget32; extern Target TheNVPTXTarget64; -namespace NVPTX -{ +namespace NVPTX { enum DrvInterface { NVCL, CUDA, @@ -102,7 +107,7 @@ enum LoadStore { }; namespace PTXLdStInstCode { -enum AddressSpace{ +enum AddressSpace { GENERIC = 0, GLOBAL = 1, CONSTANT = 2, diff --git a/lib/Target/NVPTX/NVPTX.td b/lib/Target/NVPTX/NVPTX.td index 7aee359..d78b4e8 100644 --- a/lib/Target/NVPTX/NVPTX.td +++ b/lib/Target/NVPTX/NVPTX.td @@ -26,14 +26,6 @@ include "NVPTXInstrInfo.td" //===----------------------------------------------------------------------===// // SM Versions -def SM10 : SubtargetFeature<"sm_10", "SmVersion", "10", - "Target SM 1.0">; -def SM11 : SubtargetFeature<"sm_11", "SmVersion", "11", - "Target SM 1.1">; -def SM12 : SubtargetFeature<"sm_12", "SmVersion", "12", - "Target SM 1.2">; -def SM13 : SubtargetFeature<"sm_13", "SmVersion", "13", - "Target SM 1.3">; def SM20 : SubtargetFeature<"sm_20", "SmVersion", "20", "Target SM 2.0">; def SM21 : SubtargetFeature<"sm_21", "SmVersion", "21", @@ -56,10 +48,6 @@ def PTX31 : SubtargetFeature<"ptx31", "PTXVersion", "31", class Proc<string Name, list<SubtargetFeature> Features> : Processor<Name, NoItineraries, Features>; -def : Proc<"sm_10", [SM10]>; -def : Proc<"sm_11", [SM11]>; -def : Proc<"sm_12", [SM12]>; -def : Proc<"sm_13", [SM13]>; def : Proc<"sm_20", [SM20]>; def : Proc<"sm_21", [SM21]>; def : Proc<"sm_30", [SM30]>; diff --git a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp index 60f52a4..0f792ec 100644 --- a/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp +++ b/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp @@ -19,9 +19,9 @@ namespace llvm { bool NVPTXAllocaHoisting::runOnFunction(Function &function) { - bool functionModified = false; - Function::iterator I = function.begin(); - TerminatorInst *firstTerminatorInst = (I++)->getTerminator(); + bool functionModified = false; + Function::iterator I = function.begin(); + TerminatorInst *firstTerminatorInst = (I++)->getTerminator(); for (Function::iterator E = function.end(); I != E; ++I) { for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { @@ -37,12 +37,10 @@ bool NVPTXAllocaHoisting::runOnFunction(Function &function) { } char NVPTXAllocaHoisting::ID = 1; -RegisterPass<NVPTXAllocaHoisting> X("alloca-hoisting", - "Hoisting alloca instructions in non-entry " - "blocks to the entry block"); +RegisterPass<NVPTXAllocaHoisting> +X("alloca-hoisting", "Hoisting alloca instructions in non-entry " + "blocks to the entry block"); -FunctionPass *createAllocaHoisting() { - return new NVPTXAllocaHoisting(); -} +FunctionPass *createAllocaHoisting() { return new NVPTXAllocaHoisting(); } } // end namespace llvm diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 0115e1f..ce5d78a 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -47,7 +47,6 @@ #include <sstream> using namespace llvm; - #include "NVPTXGenAsmWriter.inc" bool RegAllocNilUsed = true; @@ -59,21 +58,17 @@ EmitLineNumbers("nvptx-emit-line-numbers", cl::desc("NVPTX Specific: Emit Line numbers even without -G"), cl::init(true)); -namespace llvm { -bool InterleaveSrcInPtx = false; -} - -static cl::opt<bool, true>InterleaveSrc("nvptx-emit-src", - cl::ZeroOrMore, - cl::desc("NVPTX Specific: Emit source line in ptx file"), - cl::location(llvm::InterleaveSrcInPtx)); +namespace llvm { bool InterleaveSrcInPtx = false; } +static cl::opt<bool, true> +InterleaveSrc("nvptx-emit-src", cl::ZeroOrMore, + cl::desc("NVPTX Specific: Emit source line in ptx file"), + cl::location(llvm::InterleaveSrcInPtx)); namespace { /// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V /// depends. -void DiscoverDependentGlobals(Value *V, - DenseSet<GlobalVariable*> &Globals) { +void DiscoverDependentGlobals(Value *V, DenseSet<GlobalVariable *> &Globals) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) Globals.insert(GV); else { @@ -88,12 +83,12 @@ void DiscoverDependentGlobals(Value *V, /// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable /// instances to be emitted, but only after any dependents have been added /// first. -void VisitGlobalVariableForEmission(GlobalVariable *GV, - SmallVectorImpl<GlobalVariable*> &Order, - DenseSet<GlobalVariable*> &Visited, - DenseSet<GlobalVariable*> &Visiting) { +void VisitGlobalVariableForEmission( + GlobalVariable *GV, SmallVectorImpl<GlobalVariable *> &Order, + DenseSet<GlobalVariable *> &Visited, DenseSet<GlobalVariable *> &Visiting) { // Have we already visited this one? - if (Visited.count(GV)) return; + if (Visited.count(GV)) + return; // Do we have a circular dependency? if (Visiting.count(GV)) @@ -103,12 +98,13 @@ void VisitGlobalVariableForEmission(GlobalVariable *GV, Visiting.insert(GV); // Make sure we visit all dependents first - DenseSet<GlobalVariable*> Others; + DenseSet<GlobalVariable *> Others; for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i) DiscoverDependentGlobals(GV->getOperand(i), Others); - - for (DenseSet<GlobalVariable*>::iterator I = Others.begin(), - E = Others.end(); I != E; ++I) + + for (DenseSet<GlobalVariable *>::iterator I = Others.begin(), + E = Others.end(); + I != E; ++I) VisitGlobalVariableForEmission(*I, Order, Visited, Visiting); // Now we can visit ourself @@ -142,25 +138,23 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { if (CE == 0) llvm_unreachable("Unknown constant value to lower!"); - switch (CE->getOpcode()) { default: // If the code isn't optimized, there may be outstanding folding // opportunities. Attempt to fold the expression using DataLayout as a // last resort before giving up. - if (Constant *C = - ConstantFoldConstantExpression(CE, AP.TM.getDataLayout())) + if (Constant *C = ConstantFoldConstantExpression(CE, AP.TM.getDataLayout())) if (C != CE) return LowerConstant(C, AP); // Otherwise report the problem to the user. { - std::string S; - raw_string_ostream OS(S); - OS << "Unsupported expression in static initializer: "; - WriteAsOperand(OS, CE, /*PrintType=*/false, - !AP.MF ? 0 : AP.MF->getFunction()->getParent()); - report_fatal_error(OS.str()); + std::string S; + raw_string_ostream OS(S); + OS << "Unsupported expression in static initializer: "; + WriteAsOperand(OS, CE, /*PrintType=*/ false, + !AP.MF ? 0 : AP.MF->getFunction()->getParent()); + report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { const DataLayout &TD = *AP.TM.getDataLayout(); @@ -182,7 +176,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { // expression properly. This is important for differences between // blockaddress labels. Since the two labels are in the same function, it // is reasonable to treat their delta as a 32-bit value. - // FALL THROUGH. + // FALL THROUGH. case Instruction::BitCast: return LowerConstant(CE->getOperand(0), AP); @@ -192,7 +186,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { // integer type. This promotes constant folding and simplifies this code. Constant *Op = CE->getOperand(0); Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()), - false/*ZExt*/); + false /*ZExt*/); return LowerConstant(Op, AP); } @@ -214,11 +208,12 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { // the high bits so we are sure to get a proper truncation if the input is // a constant expr. unsigned InBits = TD.getTypeAllocSizeInBits(Op->getType()); - const MCExpr *MaskExpr = MCConstantExpr::Create(~0ULL >> (64-InBits), Ctx); + const MCExpr *MaskExpr = + MCConstantExpr::Create(~0ULL >> (64 - InBits), Ctx); return MCBinaryExpr::CreateAnd(OpExpr, MaskExpr, Ctx); } - // The MC library also has a right-shift operator, but it isn't consistently + // The MC library also has a right-shift operator, but it isn't consistently // signed or unsigned between different targets. case Instruction::Add: case Instruction::Sub: @@ -232,24 +227,32 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP); const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP); switch (CE->getOpcode()) { - default: llvm_unreachable("Unknown binary operator constant cast expr"); - case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx); - case Instruction::Sub: return MCBinaryExpr::CreateSub(LHS, RHS, Ctx); - case Instruction::Mul: return MCBinaryExpr::CreateMul(LHS, RHS, Ctx); - case Instruction::SDiv: return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx); - case Instruction::SRem: return MCBinaryExpr::CreateMod(LHS, RHS, Ctx); - case Instruction::Shl: return MCBinaryExpr::CreateShl(LHS, RHS, Ctx); - case Instruction::And: return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx); - case Instruction::Or: return MCBinaryExpr::CreateOr (LHS, RHS, Ctx); - case Instruction::Xor: return MCBinaryExpr::CreateXor(LHS, RHS, Ctx); + default: + llvm_unreachable("Unknown binary operator constant cast expr"); + case Instruction::Add: + return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx); + case Instruction::Sub: + return MCBinaryExpr::CreateSub(LHS, RHS, Ctx); + case Instruction::Mul: + return MCBinaryExpr::CreateMul(LHS, RHS, Ctx); + case Instruction::SDiv: + return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx); + case Instruction::SRem: + return MCBinaryExpr::CreateMod(LHS, RHS, Ctx); + case Instruction::Shl: + return MCBinaryExpr::CreateShl(LHS, RHS, Ctx); + case Instruction::And: + return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx); + case Instruction::Or: + return MCBinaryExpr::CreateOr(LHS, RHS, Ctx); + case Instruction::Xor: + return MCBinaryExpr::CreateXor(LHS, RHS, Ctx); } } } } - -void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) -{ +void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) { if (!EmitLineNumbers) return; if (ignoreLoc(MI)) @@ -268,7 +271,6 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) if (curLoc.isUnknown()) return; - const MachineFunction *MF = MI.getParent()->getParent(); //const TargetMachine &TM = MF->getTarget(); @@ -289,14 +291,13 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) if (filenameMap.find(fileName.str()) == filenameMap.end()) return; - // Emit the line from the source file. if (llvm::InterleaveSrcInPtx) this->emitSrcInText(fileName.str(), curLoc.getLine()); std::stringstream temp; - temp << "\t.loc " << filenameMap[fileName.str()] - << " " << curLoc.getLine() << " " << curLoc.getCol(); + temp << "\t.loc " << filenameMap[fileName.str()] << " " << curLoc.getLine() + << " " << curLoc.getCol(); OutStreamer.EmitRawText(Twine(temp.str().c_str())); } @@ -309,9 +310,7 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer.EmitRawText(OS.str()); } -void NVPTXAsmPrinter::printReturnValStr(const Function *F, - raw_ostream &O) -{ +void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { const DataLayout *TD = TM.getDataLayout(); const TargetLowering *TLI = TM.getTargetLowering(); @@ -329,53 +328,49 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, unsigned size = 0; if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) { size = ITy->getBitWidth(); - if (size < 32) size = 32; + if (size < 32) + size = 32; } else { - assert(Ty->isFloatingPointTy() && - "Floating point type expected here"); + assert(Ty->isFloatingPointTy() && "Floating point type expected here"); size = Ty->getPrimitiveSizeInBits(); } O << ".param .b" << size << " func_retval0"; - } - else if (isa<PointerType>(Ty)) { + } else if (isa<PointerType>(Ty)) { O << ".param .b" << TLI->getPointerTy().getSizeInBits() - << " func_retval0"; + << " func_retval0"; } else { - if ((Ty->getTypeID() == Type::StructTyID) || - isa<VectorType>(Ty)) { + if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) { SmallVector<EVT, 16> vtparts; ComputeValueVTs(*TLI, Ty, vtparts); unsigned totalsz = 0; - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { elems = vtparts[i].getVectorNumElements(); elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0, je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 8)) sz = 8; - totalsz += sz/8; + if (elemtype.isInteger() && (sz < 8)) + sz = 8; + totalsz += sz / 8; } } unsigned retAlignment = 0; if (!llvm::getAlign(*F, 0, retAlignment)) retAlignment = TD->getABITypeAlignment(Ty); - O << ".param .align " - << retAlignment - << " .b8 func_retval0[" - << totalsz << "]"; + O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz + << "]"; } else - assert(false && - "Unknown return type"); + assert(false && "Unknown return type"); } } else { SmallVector<EVT, 16> vtparts; ComputeValueVTs(*TLI, Ty, vtparts); unsigned idx = 0; - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { @@ -383,14 +378,16 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0, je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) sz = 32; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; O << ".reg .b" << sz << " func_retval" << idx; - if (j<je-1) O << ", "; + if (j < je - 1) + O << ", "; ++idx; } - if (i < e-1) + if (i < e - 1) O << ", "; } } @@ -411,7 +408,7 @@ void NVPTXAsmPrinter::EmitFunctionEntryLabel() { // Set up MRI = &MF->getRegInfo(); F = MF->getFunction(); - emitLinkageDirective(F,O); + emitLinkageDirective(F, O); if (llvm::isKernelFunction(*F)) O << ".entry "; else { @@ -434,7 +431,7 @@ void NVPTXAsmPrinter::EmitFunctionEntryLabel() { void NVPTXAsmPrinter::EmitFunctionBodyStart() { const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); unsigned numRegClasses = TRI.getNumRegClasses(); - VRidGlobal2LocalMap = new std::map<unsigned, unsigned>[numRegClasses+1]; + VRidGlobal2LocalMap = new std::map<unsigned, unsigned>[numRegClasses + 1]; OutStreamer.EmitRawText(StringRef("{\n")); setAndEmitFunctionVirtualRegisters(*MF); @@ -446,54 +443,63 @@ void NVPTXAsmPrinter::EmitFunctionBodyStart() { void NVPTXAsmPrinter::EmitFunctionBodyEnd() { OutStreamer.EmitRawText(StringRef("}\n")); - delete []VRidGlobal2LocalMap; + delete[] VRidGlobal2LocalMap; } - -void -NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function& F, - raw_ostream &O) const { +void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F, + raw_ostream &O) const { // If the NVVM IR has some of reqntid* specified, then output // the reqntid directive, and set the unspecified ones to 1. // If none of reqntid* is specified, don't output reqntid directive. unsigned reqntidx, reqntidy, reqntidz; bool specified = false; - if (llvm::getReqNTIDx(F, reqntidx) == false) reqntidx = 1; - else specified = true; - if (llvm::getReqNTIDy(F, reqntidy) == false) reqntidy = 1; - else specified = true; - if (llvm::getReqNTIDz(F, reqntidz) == false) reqntidz = 1; - else specified = true; + if (llvm::getReqNTIDx(F, reqntidx) == false) + reqntidx = 1; + else + specified = true; + if (llvm::getReqNTIDy(F, reqntidy) == false) + reqntidy = 1; + else + specified = true; + if (llvm::getReqNTIDz(F, reqntidz) == false) + reqntidz = 1; + else + specified = true; if (specified) - O << ".reqntid " << reqntidx << ", " - << reqntidy << ", " << reqntidz << "\n"; + O << ".reqntid " << reqntidx << ", " << reqntidy << ", " << reqntidz + << "\n"; // If the NVVM IR has some of maxntid* specified, then output // the maxntid directive, and set the unspecified ones to 1. // If none of maxntid* is specified, don't output maxntid directive. unsigned maxntidx, maxntidy, maxntidz; specified = false; - if (llvm::getMaxNTIDx(F, maxntidx) == false) maxntidx = 1; - else specified = true; - if (llvm::getMaxNTIDy(F, maxntidy) == false) maxntidy = 1; - else specified = true; - if (llvm::getMaxNTIDz(F, maxntidz) == false) maxntidz = 1; - else specified = true; + if (llvm::getMaxNTIDx(F, maxntidx) == false) + maxntidx = 1; + else + specified = true; + if (llvm::getMaxNTIDy(F, maxntidy) == false) + maxntidy = 1; + else + specified = true; + if (llvm::getMaxNTIDz(F, maxntidz) == false) + maxntidz = 1; + else + specified = true; if (specified) - O << ".maxntid " << maxntidx << ", " - << maxntidy << ", " << maxntidz << "\n"; + O << ".maxntid " << maxntidx << ", " << maxntidy << ", " << maxntidz + << "\n"; unsigned mincta; if (llvm::getMinCTASm(F, mincta)) O << ".minnctapersm " << mincta << "\n"; } -void -NVPTXAsmPrinter::getVirtualRegisterName(unsigned vr, bool isVec, - raw_ostream &O) { - const TargetRegisterClass * RC = MRI->getRegClass(vr); +void NVPTXAsmPrinter::getVirtualRegisterName(unsigned vr, bool isVec, + raw_ostream &O) { + const TargetRegisterClass *RC = MRI->getRegClass(vr); unsigned id = RC->getID(); std::map<unsigned, unsigned> ®map = VRidGlobal2LocalMap[id]; @@ -506,44 +512,38 @@ NVPTXAsmPrinter::getVirtualRegisterName(unsigned vr, bool isVec, report_fatal_error("Bad register!"); } -void -NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr, bool isVec, - raw_ostream &O) { +void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr, bool isVec, + raw_ostream &O) { getVirtualRegisterName(vr, isVec, O); } -void NVPTXAsmPrinter::printVecModifiedImmediate(const MachineOperand &MO, - const char *Modifier, - raw_ostream &O) { - static const char vecelem[] = {'0', '1', '2', '3', '0', '1', '2', '3'}; - int Imm = (int)MO.getImm(); - if(0 == strcmp(Modifier, "vecelem")) +void NVPTXAsmPrinter::printVecModifiedImmediate( + const MachineOperand &MO, const char *Modifier, raw_ostream &O) { + static const char vecelem[] = { '0', '1', '2', '3', '0', '1', '2', '3' }; + int Imm = (int) MO.getImm(); + if (0 == strcmp(Modifier, "vecelem")) O << "_" << vecelem[Imm]; - else if(0 == strcmp(Modifier, "vecv4comm1")) { - if((Imm < 0) || (Imm > 3)) + else if (0 == strcmp(Modifier, "vecv4comm1")) { + if ((Imm < 0) || (Imm > 3)) O << "//"; - } - else if(0 == strcmp(Modifier, "vecv4comm2")) { - if((Imm < 4) || (Imm > 7)) + } else if (0 == strcmp(Modifier, "vecv4comm2")) { + if ((Imm < 4) || (Imm > 7)) O << "//"; - } - else if(0 == strcmp(Modifier, "vecv4pos")) { - if(Imm < 0) Imm = 0; - O << "_" << vecelem[Imm%4]; - } - else if(0 == strcmp(Modifier, "vecv2comm1")) { - if((Imm < 0) || (Imm > 1)) + } else if (0 == strcmp(Modifier, "vecv4pos")) { + if (Imm < 0) + Imm = 0; + O << "_" << vecelem[Imm % 4]; + } else if (0 == strcmp(Modifier, "vecv2comm1")) { + if ((Imm < 0) || (Imm > 1)) O << "//"; - } - else if(0 == strcmp(Modifier, "vecv2comm2")) { - if((Imm < 2) || (Imm > 3)) + } else if (0 == strcmp(Modifier, "vecv2comm2")) { + if ((Imm < 2) || (Imm > 3)) O << "//"; - } - else if(0 == strcmp(Modifier, "vecv2pos")) { - if(Imm < 0) Imm = 0; - O << "_" << vecelem[Imm%2]; - } - else + } else if (0 == strcmp(Modifier, "vecv2pos")) { + if (Imm < 0) + Imm = 0; + O << "_" << vecelem[Imm % 2]; + } else llvm_unreachable("Unknown Modifier on immediate operand"); } @@ -565,7 +565,7 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum, emitVirtualRegister(MO.getReg(), true, O); else llvm_unreachable( - "Don't know how to handle the modifier on virtual register."); + "Don't know how to handle the modifier on virtual register."); } } return; @@ -576,7 +576,8 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum, else if (strstr(Modifier, "vec") == Modifier) printVecModifiedImmediate(MO, Modifier, O); else - llvm_unreachable("Don't know how to handle modifier on immediate operand"); + llvm_unreachable( + "Don't know how to handle modifier on immediate operand"); return; case MachineOperand::MO_FPImmediate: @@ -588,18 +589,16 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum, break; case MachineOperand::MO_ExternalSymbol: { - const char * symbname = MO.getSymbolName(); + const char *symbname = MO.getSymbolName(); if (strstr(symbname, ".PARAM") == symbname) { unsigned index; - sscanf(symbname+6, "%u[];", &index); + sscanf(symbname + 6, "%u[];", &index); printParamName(index, O); - } - else if (strstr(symbname, ".HLPPARAM") == symbname) { + } else if (strstr(symbname, ".HLPPARAM") == symbname) { unsigned index; - sscanf(symbname+9, "%u[];", &index); + sscanf(symbname + 9, "%u[];", &index); O << *CurrentFnSym << "_param_" << index << "_offset"; - } - else + } else O << symbname; break; } @@ -613,8 +612,8 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum, } } -void NVPTXAsmPrinter:: -printImplicitDef(const MachineInstr *MI, raw_ostream &O) const { +void NVPTXAsmPrinter::printImplicitDef(const MachineInstr *MI, + raw_ostream &O) const { #ifndef __OPTIMIZE__ O << "\t// Implicit def :"; //printOperand(MI, 0); @@ -628,32 +627,41 @@ void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, if (Modifier && !strcmp(Modifier, "add")) { O << ", "; - printOperand(MI, opNum+1, O); + printOperand(MI, opNum + 1, O); } else { - if (MI->getOperand(opNum+1).isImm() && - MI->getOperand(opNum+1).getImm() == 0) + if (MI->getOperand(opNum + 1).isImm() && + MI->getOperand(opNum + 1).getImm() == 0) return; // don't print ',0' or '+0' O << "+"; - printOperand(MI, opNum+1, O); + printOperand(MI, opNum + 1, O); } } void NVPTXAsmPrinter::printLdStCode(const MachineInstr *MI, int opNum, - raw_ostream &O, const char *Modifier) -{ + raw_ostream &O, const char *Modifier) { if (Modifier) { const MachineOperand &MO = MI->getOperand(opNum); - int Imm = (int)MO.getImm(); + int Imm = (int) MO.getImm(); if (!strcmp(Modifier, "volatile")) { if (Imm) O << ".volatile"; } else if (!strcmp(Modifier, "addsp")) { switch (Imm) { - case NVPTX::PTXLdStInstCode::GLOBAL: O << ".global"; break; - case NVPTX::PTXLdStInstCode::SHARED: O << ".shared"; break; - case NVPTX::PTXLdStInstCode::LOCAL: O << ".local"; break; - case NVPTX::PTXLdStInstCode::PARAM: O << ".param"; break; - case NVPTX::PTXLdStInstCode::CONSTANT: O << ".const"; break; + case NVPTX::PTXLdStInstCode::GLOBAL: + O << ".global"; + break; + case NVPTX::PTXLdStInstCode::SHARED: + O << ".shared"; + break; + case NVPTX::PTXLdStInstCode::LOCAL: + O << ".local"; + break; + case NVPTX::PTXLdStInstCode::PARAM: + O << ".param"; + break; + case NVPTX::PTXLdStInstCode::CONSTANT: + O << ".const"; + break; case NVPTX::PTXLdStInstCode::GENERIC: if (!nvptxSubtarget.hasGenericLdSt()) O << ".global"; @@ -661,31 +669,27 @@ void NVPTXAsmPrinter::printLdStCode(const MachineInstr *MI, int opNum, default: llvm_unreachable("Wrong Address Space"); } - } - else if (!strcmp(Modifier, "sign")) { - if (Imm==NVPTX::PTXLdStInstCode::Signed) + } else if (!strcmp(Modifier, "sign")) { + if (Imm == NVPTX::PTXLdStInstCode::Signed) O << "s"; - else if (Imm==NVPTX::PTXLdStInstCode::Unsigned) + else if (Imm == NVPTX::PTXLdStInstCode::Unsigned) O << "u"; else O << "f"; - } - else if (!strcmp(Modifier, "vec")) { - if (Imm==NVPTX::PTXLdStInstCode::V2) + } else if (!strcmp(Modifier, "vec")) { + if (Imm == NVPTX::PTXLdStInstCode::V2) O << ".v2"; - else if (Imm==NVPTX::PTXLdStInstCode::V4) + else if (Imm == NVPTX::PTXLdStInstCode::V4) O << ".v4"; - } - else + } else llvm_unreachable("Unknown Modifier"); - } - else + } else llvm_unreachable("Empty Modifier"); } -void NVPTXAsmPrinter::emitDeclaration (const Function *F, raw_ostream &O) { +void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) { - emitLinkageDirective(F,O); + emitLinkageDirective(F, O); if (llvm::isKernelFunction(*F)) O << ".entry "; else @@ -696,8 +700,7 @@ void NVPTXAsmPrinter::emitDeclaration (const Function *F, raw_ostream &O) { O << ";\n"; } -static bool usedInGlobalVarDef(const Constant *C) -{ +static bool usedInGlobalVarDef(const Constant *C) { if (!C) return false; @@ -707,8 +710,8 @@ static bool usedInGlobalVarDef(const Constant *C) return true; } - for (Value::const_use_iterator ui=C->use_begin(), ue=C->use_end(); - ui!=ue; ++ui) { + for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end(); + ui != ue; ++ui) { const Constant *C = dyn_cast<Constant>(*ui); if (usedInGlobalVarDef(C)) return true; @@ -716,8 +719,7 @@ static bool usedInGlobalVarDef(const Constant *C) return false; } -static bool usedInOneFunc(const User *U, Function const *&oneFunc) -{ +static bool usedInOneFunc(const User *U, Function const *&oneFunc) { if (const GlobalVariable *othergv = dyn_cast<GlobalVariable>(U)) { if (othergv->getName().str() == "llvm.used") return true; @@ -730,19 +732,17 @@ static bool usedInOneFunc(const User *U, Function const *&oneFunc) return false; oneFunc = curFunc; return true; - } - else + } else return false; } if (const MDNode *md = dyn_cast<MDNode>(U)) if (md->hasName() && ((md->getName().str() == "llvm.dbg.gv") || - (md->getName().str() == "llvm.dbg.sp"))) + (md->getName().str() == "llvm.dbg.sp"))) return true; - - for (User::const_use_iterator ui=U->use_begin(), ue=U->use_end(); - ui!=ue; ++ui) { + for (User::const_use_iterator ui = U->use_begin(), ue = U->use_end(); + ui != ue; ++ui) { if (usedInOneFunc(*ui, oneFunc) == false) return false; } @@ -776,16 +776,18 @@ static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) { static bool useFuncSeen(const Constant *C, llvm::DenseMap<const Function *, bool> &seenMap) { - for (Value::const_use_iterator ui=C->use_begin(), ue=C->use_end(); - ui!=ue; ++ui) { + for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end(); + ui != ue; ++ui) { if (const Constant *cu = dyn_cast<Constant>(*ui)) { if (useFuncSeen(cu, seenMap)) return true; } else if (const Instruction *I = dyn_cast<Instruction>(*ui)) { const BasicBlock *bb = I->getParent(); - if (!bb) continue; + if (!bb) + continue; const Function *caller = bb->getParent(); - if (!caller) continue; + if (!caller) + continue; if (seenMap.find(caller) != seenMap.end()) return true; } @@ -793,10 +795,9 @@ static bool useFuncSeen(const Constant *C, return false; } -void NVPTXAsmPrinter::emitDeclarations (Module &M, raw_ostream &O) { +void NVPTXAsmPrinter::emitDeclarations(Module &M, raw_ostream &O) { llvm::DenseMap<const Function *, bool> seenMap; - for (Module::const_iterator FI=M.begin(), FE=M.end(); - FI!=FE; ++FI) { + for (Module::const_iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) { const Function *F = FI; if (F->isDeclaration()) { @@ -808,8 +809,9 @@ void NVPTXAsmPrinter::emitDeclarations (Module &M, raw_ostream &O) { emitDeclaration(F, O); continue; } - for (Value::const_use_iterator iter=F->use_begin(), - iterEnd=F->use_end(); iter!=iterEnd; ++iter) { + for (Value::const_use_iterator iter = F->use_begin(), + iterEnd = F->use_end(); + iter != iterEnd; ++iter) { if (const Constant *C = dyn_cast<Constant>(*iter)) { if (usedInGlobalVarDef(C)) { // The use is in the initialization of a global variable @@ -828,12 +830,15 @@ void NVPTXAsmPrinter::emitDeclarations (Module &M, raw_ostream &O) { } } - if (!isa<Instruction>(*iter)) continue; + if (!isa<Instruction>(*iter)) + continue; const Instruction *instr = cast<Instruction>(*iter); const BasicBlock *bb = instr->getParent(); - if (!bb) continue; + if (!bb) + continue; const Function *caller = bb->getParent(); - if (!caller) continue; + if (!caller) + continue; // If a caller has already been seen, then the caller is // appearing in the module before the callee. so print out @@ -852,9 +857,10 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) { DebugInfoFinder DbgFinder; DbgFinder.processModule(M); - unsigned i=1; + unsigned i = 1; for (DebugInfoFinder::iterator I = DbgFinder.compile_unit_begin(), - E = DbgFinder.compile_unit_end(); I != E; ++I) { + E = DbgFinder.compile_unit_end(); + I != E; ++I) { DICompileUnit DIUnit(*I); StringRef Filename(DIUnit.getFilename()); StringRef Dirname(DIUnit.getDirectory()); @@ -871,7 +877,8 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) { } for (DebugInfoFinder::iterator I = DbgFinder.subprogram_begin(), - E = DbgFinder.subprogram_end(); I != E; ++I) { + E = DbgFinder.subprogram_end(); + I != E; ++I) { DISubprogram SP(*I); StringRef Filename(SP.getFilename()); StringRef Dirname(SP.getDirectory()); @@ -887,7 +894,7 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) { } } -bool NVPTXAsmPrinter::doInitialization (Module &M) { +bool NVPTXAsmPrinter::doInitialization(Module &M) { SmallString<128> Str1; raw_svector_ostream OS1(Str1); @@ -899,8 +906,8 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { //bool Result = AsmPrinter::doInitialization(M); // Initialize TargetLoweringObjectFile. - const_cast<TargetLoweringObjectFile&>(getObjFileLowering()) - .Initialize(OutContext, TM); + const_cast<TargetLoweringObjectFile &>(getObjFileLowering()) + .Initialize(OutContext, TM); Mang = new Mangler(OutContext, *TM.getDataLayout()); @@ -908,11 +915,9 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { emitHeader(M, OS1); OutStreamer.EmitRawText(OS1.str()); - // Already commented out //bool Result = AsmPrinter::doInitialization(M); - if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) recordAndEmitFilenames(M); @@ -926,16 +931,16 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { // global variable in order, and ensure that we emit it *after* its dependent // globals. We use a little extra memory maintaining both a set and a list to // have fast searches while maintaining a strict ordering. - SmallVector<GlobalVariable*,8> Globals; - DenseSet<GlobalVariable*> GVVisited; - DenseSet<GlobalVariable*> GVVisiting; + SmallVector<GlobalVariable *, 8> Globals; + DenseSet<GlobalVariable *> GVVisited; + DenseSet<GlobalVariable *> GVVisiting; // Visit each global variable, in order - for (Module::global_iterator I = M.global_begin(), E = M.global_end(); - I != E; ++I) + for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; + ++I) VisitGlobalVariableForEmission(I, Globals, GVVisited, GVVisiting); - assert(GVVisited.size() == M.getGlobalList().size() && + assert(GVVisited.size() == M.getGlobalList().size() && "Missed a global variable"); assert(GVVisiting.size() == 0 && "Did not fully process a global variable"); @@ -946,10 +951,10 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { OS2 << '\n'; OutStreamer.EmitRawText(OS2.str()); - return false; // success + return false; // success } -void NVPTXAsmPrinter::emitHeader (Module &M, raw_ostream &O) { +void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O) { O << "//\n"; O << "// Generated by LLVM NVPTX Back-End\n"; O << "//\n"; @@ -989,12 +994,12 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) { Module::GlobalListType &global_list = M.getGlobalList(); int i, n = global_list.size(); - GlobalVariable **gv_array = new GlobalVariable* [n]; + GlobalVariable **gv_array = new GlobalVariable *[n]; // first, back-up GlobalVariable in gv_array i = 0; for (Module::global_iterator I = global_list.begin(), E = global_list.end(); - I != E; ++I) + I != E; ++I) gv_array[i++] = &*I; // second, empty global_list @@ -1005,13 +1010,12 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) { bool ret = AsmPrinter::doFinalization(M); // now we restore global variables - for (i = 0; i < n; i ++) + for (i = 0; i < n; i++) global_list.insert(global_list.end(), gv_array[i]); delete[] gv_array; return ret; - //bool Result = AsmPrinter::doFinalization(M); // Instead of calling the parents doFinalization, we may // clone parents doFinalization and customize here. @@ -1031,8 +1035,8 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) { // external without init -> .extern // appending -> not allowed, assert. -void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue* V, raw_ostream &O) -{ +void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V, + raw_ostream &O) { if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) { if (V->hasExternalLinkage()) { if (isa<GlobalVariable>(V)) { @@ -1059,8 +1063,7 @@ void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue* V, raw_ostream &O) } } - -void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, +void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable *GVar, raw_ostream &O, bool processDemoted) { // Skip meta data @@ -1111,30 +1114,48 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, if (Initializer) CI = dyn_cast<ConstantInt>(Initializer); if (CI) { - unsigned sample=CI->getZExtValue(); + unsigned sample = CI->getZExtValue(); O << " = { "; - for (int i =0, addr=((sample & __CLK_ADDRESS_MASK ) >> - __CLK_ADDRESS_BASE) ; i < 3 ; i++) { + for (int i = 0, + addr = ((sample & __CLK_ADDRESS_MASK) >> __CLK_ADDRESS_BASE); + i < 3; i++) { O << "addr_mode_" << i << " = "; switch (addr) { - case 0: O << "wrap"; break; - case 1: O << "clamp_to_border"; break; - case 2: O << "clamp_to_edge"; break; - case 3: O << "wrap"; break; - case 4: O << "mirror"; break; + case 0: + O << "wrap"; + break; + case 1: + O << "clamp_to_border"; + break; + case 2: + O << "clamp_to_edge"; + break; + case 3: + O << "wrap"; + break; + case 4: + O << "mirror"; + break; } - O <<", "; + O << ", "; } O << "filter_mode = "; - switch (( sample & __CLK_FILTER_MASK ) >> __CLK_FILTER_BASE ) { - case 0: O << "nearest"; break; - case 1: O << "linear"; break; - case 2: assert ( 0 && "Anisotropic filtering is not supported"); - default: O << "nearest"; break; + switch ((sample & __CLK_FILTER_MASK) >> __CLK_FILTER_BASE) { + case 0: + O << "nearest"; + break; + case 1: + O << "linear"; + break; + case 2: + assert(0 && "Anisotropic filtering is not supported"); + default: + O << "nearest"; + break; } - if (!(( sample &__CLK_NORMALIZED_MASK ) >> __CLK_NORMALIZED_BASE)) { + if (!((sample & __CLK_NORMALIZED_MASK) >> __CLK_NORMALIZED_BASE)) { O << ", force_unnormalized_coords = 1"; } O << " }"; @@ -1176,7 +1197,6 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, else O << " .align " << GVar->getAlignment(); - if (ETy->isPrimitiveType() || ETy->isIntegerTy() || isa<PointerType>(ETy)) { O << " ."; O << getPTXFundamentalTypeStr(ETy, false); @@ -1186,17 +1206,17 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, // Ptx allows variable initilization only for constant and global state // spaces. if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) || - (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) || - (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) - && GVar->hasInitializer()) { + (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) || + (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) && + GVar->hasInitializer()) { Constant *Initializer = GVar->getInitializer(); if (!Initializer->isNullValue()) { - O << " = " ; + O << " = "; printScalarConstant(Initializer, O); } } } else { - unsigned int ElementSize =0; + unsigned int ElementSize = 0; // Although PTX has direct support for struct type and array type and // LLVM IR is very similar to PTX, the LLVM CodeGen does not support for @@ -1210,54 +1230,49 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, // Ptx allows variable initilization only for constant and // global state spaces. if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) || - (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) || - (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) - && GVar->hasInitializer()) { + (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) || + (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) && + GVar->hasInitializer()) { Constant *Initializer = GVar->getInitializer(); - if (!isa<UndefValue>(Initializer) && - !Initializer->isNullValue()) { + if (!isa<UndefValue>(Initializer) && !Initializer->isNullValue()) { AggBuffer aggBuffer(ElementSize, O, *this); bufferAggregateConstant(Initializer, &aggBuffer); if (aggBuffer.numSymbols) { if (nvptxSubtarget.is64Bit()) { - O << " .u64 " << *Mang->getSymbol(GVar) <<"[" ; - O << ElementSize/8; - } - else { - O << " .u32 " << *Mang->getSymbol(GVar) <<"[" ; - O << ElementSize/4; + O << " .u64 " << *Mang->getSymbol(GVar) << "["; + O << ElementSize / 8; + } else { + O << " .u32 " << *Mang->getSymbol(GVar) << "["; + O << ElementSize / 4; } O << "]"; - } - else { - O << " .b8 " << *Mang->getSymbol(GVar) <<"[" ; + } else { + O << " .b8 " << *Mang->getSymbol(GVar) << "["; O << ElementSize; O << "]"; } - O << " = {" ; + O << " = {"; aggBuffer.print(); O << "}"; - } - else { - O << " .b8 " << *Mang->getSymbol(GVar) ; + } else { + O << " .b8 " << *Mang->getSymbol(GVar); if (ElementSize) { - O <<"[" ; + O << "["; O << ElementSize; O << "]"; } } - } - else { + } else { O << " .b8 " << *Mang->getSymbol(GVar); if (ElementSize) { - O <<"[" ; + O << "["; O << ElementSize; O << "]"; } } break; default: - assert( 0 && "type not supported yet"); + assert(0 && "type not supported yet"); } } @@ -1270,7 +1285,7 @@ void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) { std::vector<GlobalVariable *> &gvars = localDecls[f]; - for (unsigned i=0, e=gvars.size(); i!=e; ++i) { + for (unsigned i = 0, e = gvars.size(); i != e; ++i) { O << "\t// demoted variable\n\t"; printModuleLevelGV(gvars[i], O, true); } @@ -1280,24 +1295,24 @@ void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace, raw_ostream &O) const { switch (AddressSpace) { case llvm::ADDRESS_SPACE_LOCAL: - O << "local" ; + O << "local"; break; case llvm::ADDRESS_SPACE_GLOBAL: - O << "global" ; + O << "global"; break; case llvm::ADDRESS_SPACE_CONST: // This logic should be consistent with that in // getCodeAddrSpace() (NVPTXISelDATToDAT.cpp) if (nvptxSubtarget.hasGenericLdSt()) - O << "global" ; + O << "global"; else - O << "const" ; + O << "const"; break; case llvm::ADDRESS_SPACE_CONST_NOT_GEN: - O << "const" ; + O << "const"; break; case llvm::ADDRESS_SPACE_SHARED: - O << "shared" ; + O << "shared"; break; default: report_fatal_error("Bad address space found while emitting PTX"); @@ -1305,8 +1320,8 @@ void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace, } } -std::string NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, - bool useB4PTR) const { +std::string +NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const { switch (Ty->getTypeID()) { default: llvm_unreachable("unexpected type"); @@ -1330,17 +1345,20 @@ std::string NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, return "f64"; case Type::PointerTyID: if (nvptxSubtarget.is64Bit()) - if (useB4PTR) return "b64"; - else return "u64"; + if (useB4PTR) + return "b64"; + else + return "u64"; + else if (useB4PTR) + return "b32"; else - if (useB4PTR) return "b32"; - else return "u32"; + return "u32"; } llvm_unreachable("unexpected type"); return NULL; } -void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar, +void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar, raw_ostream &O) { const DataLayout *TD = TM.getDataLayout(); @@ -1364,7 +1382,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar, return; } - int64_t ElementSize =0; + int64_t ElementSize = 0; // Although PTX has direct support for struct type and array type and LLVM IR // is very similar to PTX, the LLVM CodeGen does not support for targets that @@ -1375,22 +1393,19 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar, case Type::ArrayTyID: case Type::VectorTyID: ElementSize = TD->getTypeStoreSize(ETy); - O << " .b8 " << *Mang->getSymbol(GVar) <<"[" ; + O << " .b8 " << *Mang->getSymbol(GVar) << "["; if (ElementSize) { - O << itostr(ElementSize) ; + O << itostr(ElementSize); } O << "]"; break; default: - assert( 0 && "type not supported yet"); + assert(0 && "type not supported yet"); } - return ; + return; } - -static unsigned int -getOpenCLAlignment(const DataLayout *TD, - Type *Ty) { +static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) { if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa<PointerType>(Ty)) return TD->getPrefTypeAlignment(Ty); @@ -1404,9 +1419,9 @@ getOpenCLAlignment(const DataLayout *TD, unsigned int numE = VTy->getNumElements(); unsigned int alignE = TD->getPrefTypeAlignment(ETy); if (numE == 3) - return 4*alignE; + return 4 * alignE; else - return numE*alignE; + return numE * alignE; } const StructType *STy = dyn_cast<StructType>(Ty); @@ -1414,7 +1429,7 @@ getOpenCLAlignment(const DataLayout *TD, unsigned int alignStruct = 1; // Go through each element of the struct and find the // largest alignment. - for (unsigned i=0, e=STy->getNumElements(); i != e; i++) { + for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) { Type *ETy = STy->getElementType(i); unsigned int align = getOpenCLAlignment(TD, ETy); if (align > alignStruct) @@ -1458,7 +1473,7 @@ void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) { } for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, i++) { - if (i==paramIndex) { + if (i == paramIndex) { printParamName(I, paramIndex, O); return; } @@ -1466,8 +1481,7 @@ void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) { llvm_unreachable("paramIndex out of bound"); } -void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, - raw_ostream &O) { +void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) { const DataLayout *TD = TM.getDataLayout(); const AttributeSet &PAL = F->getAttributes(); const TargetLowering *TLI = TM.getTargetLowering(); @@ -1481,7 +1495,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, O << "(\n"; for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) { - const Type *Ty = I->getType(); + Type *Ty = I->getType(); if (!first) O << ",\n"; @@ -1496,14 +1510,28 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, O << "\t.param .surfref " << *CurrentFnSym << "_param_" << paramIndex; else // Default image is read_only O << "\t.param .texref " << *CurrentFnSym << "_param_" << paramIndex; - } - else // Should be llvm::isSampler(*I) + } else // Should be llvm::isSampler(*I) O << "\t.param .samplerref " << *CurrentFnSym << "_param_" - << paramIndex; + << paramIndex; continue; } - if (PAL.hasAttribute(paramIndex+1, Attribute::ByVal) == false) { + if (PAL.hasAttribute(paramIndex + 1, Attribute::ByVal) == false) { + if (Ty->isVectorTy()) { + // Just print .param .b8 .align <a> .param[size]; + // <a> = PAL.getparamalignment + // size = typeallocsize of element type + unsigned align = PAL.getParamAlignment(paramIndex + 1); + if (align == 0) + align = TD->getABITypeAlignment(Ty); + + unsigned sz = TD->getTypeAllocSize(Ty); + O << "\t.param .align " << align << " .b8 "; + printParamName(I, paramIndex, O); + O << "[" << sz << "]"; + + continue; + } // Just a scalar const PointerType *PTy = dyn_cast<PointerType>(Ty); if (isKernelFunc) { @@ -1514,7 +1542,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, if (nvptxSubtarget.getDrvInterface() != NVPTX::CUDA) { Type *ETy = PTy->getElementType(); int addrSpace = PTy->getAddressSpace(); - switch(addrSpace) { + switch (addrSpace) { default: O << ".ptr "; break; @@ -1529,15 +1557,14 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, O << ".ptr .global "; break; } - O << ".align " << (int)getOpenCLAlignment(TD, ETy) << " "; + O << ".align " << (int) getOpenCLAlignment(TD, ETy) << " "; } printParamName(I, paramIndex, O); continue; } // non-pointer scalar to kernel func - O << "\t.param ." - << getPTXFundamentalTypeStr(Ty) << " "; + O << "\t.param ." << getPTXFundamentalTypeStr(Ty) << " "; printParamName(I, paramIndex, O); continue; } @@ -1546,9 +1573,9 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, unsigned sz = 0; if (isa<IntegerType>(Ty)) { sz = cast<IntegerType>(Ty)->getBitWidth(); - if (sz < 32) sz = 32; - } - else if (isa<PointerType>(Ty)) + if (sz < 32) + sz = 32; + } else if (isa<PointerType>(Ty)) sz = thePointerTy.getSizeInBits(); else sz = Ty->getPrimitiveSizeInBits(); @@ -1562,21 +1589,19 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, // param has byVal attribute. So should be a pointer const PointerType *PTy = dyn_cast<PointerType>(Ty); - assert(PTy && - "Param with byval attribute should be a pointer type"); + assert(PTy && "Param with byval attribute should be a pointer type"); Type *ETy = PTy->getElementType(); if (isABI || isKernelFunc) { // Just print .param .b8 .align <a> .param[size]; // <a> = PAL.getparamalignment // size = typeallocsize of element type - unsigned align = PAL.getParamAlignment(paramIndex+1); + unsigned align = PAL.getParamAlignment(paramIndex + 1); if (align == 0) align = TD->getABITypeAlignment(ETy); unsigned sz = TD->getTypeAllocSize(ETy); - O << "\t.param .align " << align - << " .b8 "; + O << "\t.param .align " << align << " .b8 "; printParamName(I, paramIndex, O); O << "[" << sz << "]"; continue; @@ -1587,7 +1612,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, // each vector element. SmallVector<EVT, 16> vtparts; ComputeValueVTs(*TLI, ETy, vtparts); - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { @@ -1595,15 +1620,17 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0,je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) sz = 32; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; O << "\t.reg .b" << sz << " "; printParamName(I, paramIndex, O); - if (j<je-1) O << ",\n"; + if (j < je - 1) + O << ",\n"; ++paramIndex; } - if (i<e-1) + if (i < e - 1) O << ",\n"; } --paramIndex; @@ -1620,9 +1647,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF, emitFunctionParamList(F, O); } - -void NVPTXAsmPrinter:: -setAndEmitFunctionVirtualRegisters(const MachineFunction &MF) { +void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters( + const MachineFunction &MF) { SmallString<128> Str; raw_svector_ostream O(Str); @@ -1635,14 +1661,12 @@ setAndEmitFunctionVirtualRegisters(const MachineFunction &MF) { const MachineFrameInfo *MFI = MF.getFrameInfo(); int NumBytes = (int) MFI->getStackSize(); if (NumBytes) { - O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t" - << DEPOTNAME - << getFunctionNumber() << "[" << NumBytes << "];\n"; + O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t" << DEPOTNAME + << getFunctionNumber() << "[" << NumBytes << "];\n"; if (nvptxSubtarget.is64Bit()) { O << "\t.reg .b64 \t%SP;\n"; O << "\t.reg .b64 \t%SPL;\n"; - } - else { + } else { O << "\t.reg .b32 \t%SP;\n"; O << "\t.reg .b32 \t%SPL;\n"; } @@ -1653,12 +1677,12 @@ setAndEmitFunctionVirtualRegisters(const MachineFunction &MF) { // register number and the per class virtual register number. // We use the per class virtual register number in the ptx output. unsigned int numVRs = MRI->getNumVirtRegs(); - for (unsigned i=0; i< numVRs; i++) { + for (unsigned i = 0; i < numVRs; i++) { unsigned int vr = TRI->index2VirtReg(i); const TargetRegisterClass *RC = MRI->getRegClass(vr); std::map<unsigned, unsigned> ®map = VRidGlobal2LocalMap[RC->getID()]; int n = regmap.size(); - regmap.insert(std::make_pair(vr, n+1)); + regmap.insert(std::make_pair(vr, n + 1)); } // Emit register declarations @@ -1702,23 +1726,20 @@ setAndEmitFunctionVirtualRegisters(const MachineFunction &MF) { OutStreamer.EmitRawText(O.str()); } - void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) { - APFloat APF = APFloat(Fp->getValueAPF()); // make a copy + APFloat APF = APFloat(Fp->getValueAPF()); // make a copy bool ignored; unsigned int numHex; const char *lead; - if (Fp->getType()->getTypeID()==Type::FloatTyID) { + if (Fp->getType()->getTypeID() == Type::FloatTyID) { numHex = 8; lead = "0f"; - APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, - &ignored); + APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &ignored); } else if (Fp->getType()->getTypeID() == Type::DoubleTyID) { numHex = 16; lead = "0d"; - APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, - &ignored); + APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored); } else llvm_unreachable("unsupported fp type"); @@ -1760,7 +1781,6 @@ void NVPTXAsmPrinter::printScalarConstant(Constant *CPV, raw_ostream &O) { llvm_unreachable("Not scalar type found in printScalarConstant()"); } - void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, AggBuffer *aggBuffer) { @@ -1768,7 +1788,7 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, if (isa<UndefValue>(CPV) || CPV->isNullValue()) { int s = TD->getTypeAllocSize(CPV->getType()); - if (s<Bytes) + if (s < Bytes) s = Bytes; aggBuffer->addZeros(s); return; @@ -1779,28 +1799,26 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, case Type::IntegerTyID: { const Type *ETy = CPV->getType(); - if ( ETy == Type::getInt8Ty(CPV->getContext()) ){ + if (ETy == Type::getInt8Ty(CPV->getContext())) { unsigned char c = (unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue(); ptr = &c; aggBuffer->addBytes(ptr, 1, Bytes); - } else if ( ETy == Type::getInt16Ty(CPV->getContext()) ) { - short int16 = - (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue(); - ptr = (unsigned char*)&int16; + } else if (ETy == Type::getInt16Ty(CPV->getContext())) { + short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue(); + ptr = (unsigned char *)&int16; aggBuffer->addBytes(ptr, 2, Bytes); - } else if ( ETy == Type::getInt32Ty(CPV->getContext()) ) { + } else if (ETy == Type::getInt32Ty(CPV->getContext())) { if (ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) { - int int32 =(int)(constInt->getZExtValue()); - ptr = (unsigned char*)&int32; + int int32 = (int)(constInt->getZExtValue()); + ptr = (unsigned char *)&int32; aggBuffer->addBytes(ptr, 4, Bytes); break; } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) { - if (ConstantInt *constInt = - dyn_cast<ConstantInt>(ConstantFoldConstantExpression( - Cexpr, TD))) { - int int32 =(int)(constInt->getZExtValue()); - ptr = (unsigned char*)&int32; + if (ConstantInt *constInt = dyn_cast<ConstantInt>( + ConstantFoldConstantExpression(Cexpr, TD))) { + int int32 = (int)(constInt->getZExtValue()); + ptr = (unsigned char *)&int32; aggBuffer->addBytes(ptr, 4, Bytes); break; } @@ -1812,17 +1830,17 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, } } llvm_unreachable("unsupported integer const type"); - } else if (ETy == Type::getInt64Ty(CPV->getContext()) ) { + } else if (ETy == Type::getInt64Ty(CPV->getContext())) { if (ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) { - long long int64 =(long long)(constInt->getZExtValue()); - ptr = (unsigned char*)&int64; + long long int64 = (long long)(constInt->getZExtValue()); + ptr = (unsigned char *)&int64; aggBuffer->addBytes(ptr, 8, Bytes); break; } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) { if (ConstantInt *constInt = dyn_cast<ConstantInt>( - ConstantFoldConstantExpression(Cexpr, TD))) { - long long int64 =(long long)(constInt->getZExtValue()); - ptr = (unsigned char*)&int64; + ConstantFoldConstantExpression(Cexpr, TD))) { + long long int64 = (long long)(constInt->getZExtValue()); + ptr = (unsigned char *)&int64; aggBuffer->addBytes(ptr, 8, Bytes); break; } @@ -1841,17 +1859,16 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, case Type::FloatTyID: case Type::DoubleTyID: { ConstantFP *CFP = dyn_cast<ConstantFP>(CPV); - const Type* Ty = CFP->getType(); + const Type *Ty = CFP->getType(); if (Ty == Type::getFloatTy(CPV->getContext())) { - float float32 = (float)CFP->getValueAPF().convertToFloat(); - ptr = (unsigned char*)&float32; + float float32 = (float) CFP->getValueAPF().convertToFloat(); + ptr = (unsigned char *)&float32; aggBuffer->addBytes(ptr, 4, Bytes); } else if (Ty == Type::getDoubleTy(CPV->getContext())) { double float64 = CFP->getValueAPF().convertToDouble(); - ptr = (unsigned char*)&float64; + ptr = (unsigned char *)&float64; aggBuffer->addBytes(ptr, 8, Bytes); - } - else { + } else { llvm_unreachable("unsupported fp const type"); } break; @@ -1859,8 +1876,7 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, case Type::PointerTyID: { if (GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) { aggBuffer->addSymbol(GVar); - } - else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) { + } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) { Value *v = Cexpr->stripPointerCasts(); aggBuffer->addSymbol(v); } @@ -1876,10 +1892,9 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, isa<ConstantStruct>(CPV)) { int ElementSize = TD->getTypeAllocSize(CPV->getType()); bufferAggregateConstant(CPV, aggBuffer); - if ( Bytes > ElementSize ) - aggBuffer->addZeros(Bytes-ElementSize); - } - else if (isa<ConstantAggregateZero>(CPV)) + if (Bytes > ElementSize) + aggBuffer->addZeros(Bytes - ElementSize); + } else if (isa<ConstantAggregateZero>(CPV)) aggBuffer->addZeros(Bytes); else llvm_unreachable("Unexpected Constant type"); @@ -1905,7 +1920,7 @@ void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV, } if (const ConstantDataSequential *CDS = - dyn_cast<ConstantDataSequential>(CPV)) { + dyn_cast<ConstantDataSequential>(CPV)) { if (CDS->getNumElements()) for (unsigned i = 0; i < CDS->getNumElements(); ++i) bufferLEByte(cast<Constant>(CDS->getElementAsConstant(i)), 0, @@ -1913,20 +1928,18 @@ void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV, return; } - if (isa<ConstantStruct>(CPV)) { if (CPV->getNumOperands()) { StructType *ST = cast<StructType>(CPV->getType()); for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) { - if ( i == (e - 1)) + if (i == (e - 1)) Bytes = TD->getStructLayout(ST)->getElementOffset(0) + - TD->getTypeAllocSize(ST) - - TD->getStructLayout(ST)->getElementOffset(i); + TD->getTypeAllocSize(ST) - + TD->getStructLayout(ST)->getElementOffset(i); else - Bytes = TD->getStructLayout(ST)->getElementOffset(i+1) - - TD->getStructLayout(ST)->getElementOffset(i); - bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes, - aggBuffer); + Bytes = TD->getStructLayout(ST)->getElementOffset(i + 1) - + TD->getStructLayout(ST)->getElementOffset(i); + bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes, aggBuffer); } } return; @@ -1937,15 +1950,13 @@ void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV, // buildTypeNameMap - Run through symbol table looking for type names. // - bool NVPTXAsmPrinter::isImageType(const Type *Ty) { std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty); - if (PI != TypeNameMap.end() && - (!PI->second.compare("struct._image1d_t") || - !PI->second.compare("struct._image2d_t") || - !PI->second.compare("struct._image3d_t"))) + if (PI != TypeNameMap.end() && (!PI->second.compare("struct._image1d_t") || + !PI->second.compare("struct._image2d_t") || + !PI->second.compare("struct._image3d_t"))) return true; return false; @@ -1955,10 +1966,10 @@ bool NVPTXAsmPrinter::isImageType(const Type *Ty) { /// bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, - const char *ExtraCode, - raw_ostream &O) { + const char *ExtraCode, raw_ostream &O) { if (ExtraCode && ExtraCode[0]) { - if (ExtraCode[1] != 0) return true; // Unknown modifier. + if (ExtraCode[1] != 0) + return true; // Unknown modifier. switch (ExtraCode[0]) { default: @@ -1974,13 +1985,11 @@ bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, return false; } -bool NVPTXAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, - unsigned OpNo, - unsigned AsmVariant, - const char *ExtraCode, - raw_ostream &O) { +bool NVPTXAsmPrinter::PrintAsmMemoryOperand( + const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, + const char *ExtraCode, raw_ostream &O) { if (ExtraCode && ExtraCode[0]) - return true; // Unknown modifier + return true; // Unknown modifier O << '['; printMemOperand(MI, OpNo, O); @@ -1989,41 +1998,69 @@ bool NVPTXAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, return false; } -bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) -{ - switch(MI.getOpcode()) { +bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) { + switch (MI.getOpcode()) { default: return false; - case NVPTX::CallArgBeginInst: case NVPTX::CallArgEndInst0: - case NVPTX::CallArgEndInst1: case NVPTX::CallArgF32: - case NVPTX::CallArgF64: case NVPTX::CallArgI16: - case NVPTX::CallArgI32: case NVPTX::CallArgI32imm: - case NVPTX::CallArgI64: case NVPTX::CallArgI8: - case NVPTX::CallArgParam: case NVPTX::CallVoidInst: - case NVPTX::CallVoidInstReg: case NVPTX::Callseq_End: + case NVPTX::CallArgBeginInst: + case NVPTX::CallArgEndInst0: + case NVPTX::CallArgEndInst1: + case NVPTX::CallArgF32: + case NVPTX::CallArgF64: + case NVPTX::CallArgI16: + case NVPTX::CallArgI32: + case NVPTX::CallArgI32imm: + case NVPTX::CallArgI64: + case NVPTX::CallArgI8: + case NVPTX::CallArgParam: + case NVPTX::CallVoidInst: + case NVPTX::CallVoidInstReg: + case NVPTX::Callseq_End: case NVPTX::CallVoidInstReg64: - case NVPTX::DeclareParamInst: case NVPTX::DeclareRetMemInst: - case NVPTX::DeclareRetRegInst: case NVPTX::DeclareRetScalarInst: - case NVPTX::DeclareScalarParamInst: case NVPTX::DeclareScalarRegInst: - case NVPTX::StoreParamF32: case NVPTX::StoreParamF64: - case NVPTX::StoreParamI16: case NVPTX::StoreParamI32: - case NVPTX::StoreParamI64: case NVPTX::StoreParamI8: - case NVPTX::StoreParamS32I8: case NVPTX::StoreParamU32I8: - case NVPTX::StoreParamS32I16: case NVPTX::StoreParamU32I16: - case NVPTX::StoreRetvalF32: case NVPTX::StoreRetvalF64: - case NVPTX::StoreRetvalI16: case NVPTX::StoreRetvalI32: - case NVPTX::StoreRetvalI64: case NVPTX::StoreRetvalI8: - case NVPTX::LastCallArgF32: case NVPTX::LastCallArgF64: - case NVPTX::LastCallArgI16: case NVPTX::LastCallArgI32: - case NVPTX::LastCallArgI32imm: case NVPTX::LastCallArgI64: - case NVPTX::LastCallArgI8: case NVPTX::LastCallArgParam: - case NVPTX::LoadParamMemF32: case NVPTX::LoadParamMemF64: - case NVPTX::LoadParamMemI16: case NVPTX::LoadParamMemI32: - case NVPTX::LoadParamMemI64: case NVPTX::LoadParamMemI8: - case NVPTX::LoadParamRegF32: case NVPTX::LoadParamRegF64: - case NVPTX::LoadParamRegI16: case NVPTX::LoadParamRegI32: - case NVPTX::LoadParamRegI64: case NVPTX::LoadParamRegI8: - case NVPTX::PrototypeInst: case NVPTX::DBG_VALUE: + case NVPTX::DeclareParamInst: + case NVPTX::DeclareRetMemInst: + case NVPTX::DeclareRetRegInst: + case NVPTX::DeclareRetScalarInst: + case NVPTX::DeclareScalarParamInst: + case NVPTX::DeclareScalarRegInst: + case NVPTX::StoreParamF32: + case NVPTX::StoreParamF64: + case NVPTX::StoreParamI16: + case NVPTX::StoreParamI32: + case NVPTX::StoreParamI64: + case NVPTX::StoreParamI8: + case NVPTX::StoreParamS32I8: + case NVPTX::StoreParamU32I8: + case NVPTX::StoreParamS32I16: + case NVPTX::StoreParamU32I16: + case NVPTX::StoreRetvalF32: + case NVPTX::StoreRetvalF64: + case NVPTX::StoreRetvalI16: + case NVPTX::StoreRetvalI32: + case NVPTX::StoreRetvalI64: + case NVPTX::StoreRetvalI8: + case NVPTX::LastCallArgF32: + case NVPTX::LastCallArgF64: + case NVPTX::LastCallArgI16: + case NVPTX::LastCallArgI32: + case NVPTX::LastCallArgI32imm: + case NVPTX::LastCallArgI64: + case NVPTX::LastCallArgI8: + case NVPTX::LastCallArgParam: + case NVPTX::LoadParamMemF32: + case NVPTX::LoadParamMemF64: + case NVPTX::LoadParamMemI16: + case NVPTX::LoadParamMemI32: + case NVPTX::LoadParamMemI64: + case NVPTX::LoadParamMemI8: + case NVPTX::LoadParamRegF32: + case NVPTX::LoadParamRegF64: + case NVPTX::LoadParamRegI16: + case NVPTX::LoadParamRegI32: + case NVPTX::LoadParamRegI64: + case NVPTX::LoadParamRegI8: + case NVPTX::PrototypeInst: + case NVPTX::DBG_VALUE: return true; } return false; @@ -2035,10 +2072,9 @@ extern "C" void LLVMInitializeNVPTXBackendAsmPrinter() { RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64); } - void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) { std::stringstream temp; - LineReader * reader = this->getReader(filename.str()); + LineReader *reader = this->getReader(filename.str()); temp << "\n//"; temp << filename.str(); temp << ":"; @@ -2049,29 +2085,26 @@ void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) { this->OutStreamer.EmitRawText(Twine(temp.str())); } - LineReader *NVPTXAsmPrinter::getReader(std::string filename) { - if (reader == NULL) { - reader = new LineReader(filename); + if (reader == NULL) { + reader = new LineReader(filename); } if (reader->fileName() != filename) { delete reader; - reader = new LineReader(filename); + reader = new LineReader(filename); } return reader; } - -std::string -LineReader::readLine(unsigned lineNum) { +std::string LineReader::readLine(unsigned lineNum) { if (lineNum < theCurLine) { theCurLine = 0; - fstr.seekg(0,std::ios::beg); + fstr.seekg(0, std::ios::beg); } while (theCurLine < lineNum) { - fstr.getline(buff,500); + fstr.getline(buff, 500); theCurLine++; } return buff; diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h index 42498f0..6dc9fc0 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.h +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h @@ -43,15 +43,15 @@ // This is defined in AsmPrinter.cpp. // Used to process the constant expressions in initializers. namespace nvptx { -const llvm::MCExpr *LowerConstant(const llvm::Constant *CV, - llvm::AsmPrinter &AP) ; +const llvm::MCExpr * +LowerConstant(const llvm::Constant *CV, llvm::AsmPrinter &AP); } namespace llvm { class LineReader { private: - unsigned theCurLine ; + unsigned theCurLine; std::ifstream fstr; char buff[512]; std::string theFileName; @@ -63,17 +63,12 @@ public: theFileName = filename; } std::string fileName() { return theFileName; } - ~LineReader() { - fstr.close(); - } + ~LineReader() { fstr.close(); } std::string readLine(unsigned line); }; - - class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { - class AggBuffer { // Used to buffer the emitted string for initializing global // aggregates. @@ -92,7 +87,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { // Once we have this AggBuffer setup, we can choose how to print // it out. public: - unsigned size; // size of the buffer in bytes + unsigned size; // size of the buffer in bytes unsigned char *buffer; // the buffer unsigned numSymbols; // number of symbol addresses SmallVector<unsigned, 4> symbolPosInBuffer; @@ -105,33 +100,31 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { public: AggBuffer(unsigned _size, raw_ostream &_O, NVPTXAsmPrinter &_AP) - :O(_O),AP(_AP) { + : O(_O), AP(_AP) { buffer = new unsigned char[_size]; size = _size; curpos = 0; numSymbols = 0; } - ~AggBuffer() { - delete [] buffer; - } + ~AggBuffer() { delete[] buffer; } unsigned addBytes(unsigned char *Ptr, int Num, int Bytes) { - assert((curpos+Num) <= size); - assert((curpos+Bytes) <= size); - for ( int i= 0; i < Num; ++i) { + assert((curpos + Num) <= size); + assert((curpos + Bytes) <= size); + for (int i = 0; i < Num; ++i) { buffer[curpos] = Ptr[i]; - curpos ++; + curpos++; } - for ( int i=Num; i < Bytes ; ++i) { + for (int i = Num; i < Bytes; ++i) { buffer[curpos] = 0; - curpos ++; + curpos++; } return curpos; } unsigned addZeros(int Num) { - assert((curpos+Num) <= size); - for ( int i= 0; i < Num; ++i) { + assert((curpos + Num) <= size); + for (int i = 0; i < Num; ++i) { buffer[curpos] = 0; - curpos ++; + curpos++; } return curpos; } @@ -143,10 +136,10 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { void print() { if (numSymbols == 0) { // print out in bytes - for (unsigned i=0; i<size; i++) { + for (unsigned i = 0; i < size; i++) { if (i) O << ", "; - O << (unsigned int)buffer[i]; + O << (unsigned int) buffer[i]; } } else { // print out in 4-bytes or 8-bytes @@ -156,7 +149,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { unsigned int nBytes = 4; if (AP.nvptxSubtarget.is64Bit()) nBytes = 8; - for (pos=0; pos<size; pos+=nBytes) { + for (pos = 0; pos < size; pos += nBytes) { if (pos) O << ", "; if (pos == nextSymbolPos) { @@ -164,22 +157,19 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { if (GlobalValue *GVar = dyn_cast<GlobalValue>(v)) { MCSymbol *Name = AP.Mang->getSymbol(GVar); O << *Name; - } - else if (ConstantExpr *Cexpr = - dyn_cast<ConstantExpr>(v)) { + } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(v)) { O << *nvptx::LowerConstant(Cexpr, AP); } else llvm_unreachable("symbol type unknown"); nSym++; if (nSym >= numSymbols) - nextSymbolPos = size+1; + nextSymbolPos = size + 1; else nextSymbolPos = symbolPosInBuffer[nSym]; - } else - if (nBytes == 4) - O << *(unsigned int*)(buffer+pos); - else - O << *(unsigned long long*)(buffer+pos); + } else if (nBytes == 4) + O << *(unsigned int *)(buffer + pos); + else + O << *(unsigned long long *)(buffer + pos); } } } @@ -189,10 +179,8 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter { virtual void emitSrcInText(StringRef filename, unsigned line); -private : - virtual const char *getPassName() const { - return "NVPTX Assembly Printer"; - } +private: + virtual const char *getPassName() const { return "NVPTX Assembly Printer"; } const Function *F; std::string CurrentFnName; @@ -207,31 +195,28 @@ private : void printGlobalVariable(const GlobalVariable *GVar); void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier=0); + const char *Modifier = 0); void printLdStCode(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier=0); - void printVecModifiedImmediate(const MachineOperand &MO, - const char *Modifier, raw_ostream &O); + const char *Modifier = 0); + void printVecModifiedImmediate(const MachineOperand &MO, const char *Modifier, + raw_ostream &O); void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O, - const char *Modifier=0); + const char *Modifier = 0); void printImplicitDef(const MachineInstr *MI, raw_ostream &O) const; // definition autogenerated. void printInstruction(const MachineInstr *MI, raw_ostream &O); - void printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, - bool=false); + void printModuleLevelGV(GlobalVariable *GVar, raw_ostream &O, bool = false); void printParamName(int paramIndex, raw_ostream &O); void printParamName(Function::const_arg_iterator I, int paramIndex, raw_ostream &O); void emitHeader(Module &M, raw_ostream &O); - void emitKernelFunctionDirectives(const Function& F, - raw_ostream &O) const; + void emitKernelFunctionDirectives(const Function &F, raw_ostream &O) const; void emitVirtualRegister(unsigned int vr, bool isVec, raw_ostream &O); void emitFunctionExternParamList(const MachineFunction &MF); void emitFunctionParamList(const Function *, raw_ostream &O); void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O); void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF); - void emitFunctionTempData(const MachineFunction &MF, - unsigned &FrameSize); + void emitFunctionTempData(const MachineFunction &MF, unsigned &FrameSize); bool isImageType(const Type *Ty); bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, const char *ExtraCode, @@ -269,17 +254,16 @@ private: void recordAndEmitFilenames(Module &); void emitPTXGlobalVariable(const GlobalVariable *GVar, raw_ostream &O); - void emitPTXAddressSpace(unsigned int AddressSpace, - raw_ostream &O) const; - std::string getPTXFundamentalTypeStr(const Type *Ty, bool=true) const ; - void printScalarConstant(Constant *CPV, raw_ostream &O) ; - void printFPConstant(const ConstantFP *Fp, raw_ostream &O) ; - void bufferLEByte(Constant *CPV, int Bytes, AggBuffer *aggBuffer) ; - void bufferAggregateConstant(Constant *CV, AggBuffer *aggBuffer) ; + void emitPTXAddressSpace(unsigned int AddressSpace, raw_ostream &O) const; + std::string getPTXFundamentalTypeStr(const Type *Ty, bool = true) const; + void printScalarConstant(Constant *CPV, raw_ostream &O); + void printFPConstant(const ConstantFP *Fp, raw_ostream &O); + void bufferLEByte(Constant *CPV, int Bytes, AggBuffer *aggBuffer); + void bufferAggregateConstant(Constant *CV, AggBuffer *aggBuffer); void printOperandProper(const MachineOperand &MO); - void emitLinkageDirective(const GlobalValue* V, raw_ostream &O); + void emitLinkageDirective(const GlobalValue *V, raw_ostream &O); void emitDeclarations(Module &, raw_ostream &O); void emitDeclaration(const Function *, raw_ostream &O); @@ -289,10 +273,9 @@ private: LineReader *reader; LineReader *getReader(std::string); public: - NVPTXAsmPrinter(TargetMachine &TM, - MCStreamer &Streamer) - : AsmPrinter(TM, Streamer), - nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) { + NVPTXAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) + : AsmPrinter(TM, Streamer), + nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) { CurrentBankselLabelInBasicBlock = ""; VRidGlobal2LocalMap = NULL; reader = NULL; diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/lib/Target/NVPTX/NVPTXFrameLowering.cpp index bb2c55c..6533da5 100644 --- a/lib/Target/NVPTX/NVPTXFrameLowering.cpp +++ b/lib/Target/NVPTX/NVPTXFrameLowering.cpp @@ -25,9 +25,7 @@ using namespace llvm; -bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const { - return true; -} +bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const { return true; } void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const { if (MF.getFrameInfo()->hasStackObjects()) { @@ -42,46 +40,39 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const { // mov %SPL, %depot; // cvta.local %SP, %SPL; if (is64bit) { - MachineInstr *MI = BuildMI(MBB, MBBI, dl, - tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64), - NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); - BuildMI(MBB, MI, dl, - tm.getInstrInfo()->get(NVPTX::IMOV64rr), NVPTX::VRFrameLocal) - .addReg(NVPTX::VRDepot); + MachineInstr *MI = BuildMI( + MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64), + NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); + BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr), + NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot); } else { - MachineInstr *MI = BuildMI(MBB, MBBI, dl, - tm.getInstrInfo()->get(NVPTX::cvta_local_yes), - NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); - BuildMI(MBB, MI, dl, - tm.getInstrInfo()->get(NVPTX::IMOV32rr), NVPTX::VRFrameLocal) - .addReg(NVPTX::VRDepot); + MachineInstr *MI = BuildMI( + MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes), + NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal); + BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr), + NVPTX::VRFrameLocal).addReg(NVPTX::VRDepot); } - } - else { + } else { // mov %SP, %depot; if (is64bit) - BuildMI(MBB, MBBI, dl, - tm.getInstrInfo()->get(NVPTX::IMOV64rr), NVPTX::VRFrame) - .addReg(NVPTX::VRDepot); + BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV64rr), + NVPTX::VRFrame).addReg(NVPTX::VRDepot); else - BuildMI(MBB, MBBI, dl, - tm.getInstrInfo()->get(NVPTX::IMOV32rr), NVPTX::VRFrame) - .addReg(NVPTX::VRDepot); + BuildMI(MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::IMOV32rr), + NVPTX::VRFrame).addReg(NVPTX::VRDepot); } } } void NVPTXFrameLowering::emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const { -} + MachineBasicBlock &MBB) const {} // This function eliminates ADJCALLSTACKDOWN, // ADJCALLSTACKUP pseudo instructions -void NVPTXFrameLowering:: -eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator I) const { +void NVPTXFrameLowering::eliminateCallFramePseudoInstr( + MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { // Simply discard ADJCALLSTACKDOWN, // ADJCALLSTACKUP instructions. MBB.erase(I); } - diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.h b/lib/Target/NVPTX/NVPTXFrameLowering.h index d34e7be..819f1dd 100644 --- a/lib/Target/NVPTX/NVPTXFrameLowering.h +++ b/lib/Target/NVPTX/NVPTXFrameLowering.h @@ -16,7 +16,6 @@ #include "llvm/Target/TargetFrameLowering.h" - namespace llvm { class NVPTXTargetMachine; @@ -26,13 +25,12 @@ class NVPTXFrameLowering : public TargetFrameLowering { public: explicit NVPTXFrameLowering(NVPTXTargetMachine &_tm, bool _is64bit) - : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0), - tm(_tm), is64bit(_is64bit) {} + : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0), tm(_tm), + is64bit(_is64bit) {} virtual bool hasFP(const MachineFunction &MF) const; virtual void emitPrologue(MachineFunction &MF) const; - virtual void emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const; + virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 481f13a..0f4c8db 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -11,7 +11,6 @@ // //===----------------------------------------------------------------------===// - #include "NVPTXISelDAGToDAG.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Instructions.h" @@ -26,27 +25,22 @@ using namespace llvm; - -static cl::opt<bool> -UseFMADInstruction("nvptx-mad-enable", - cl::ZeroOrMore, - cl::desc("NVPTX Specific: Enable generating FMAD instructions"), - cl::init(false)); +static cl::opt<bool> UseFMADInstruction( + "nvptx-mad-enable", cl::ZeroOrMore, + cl::desc("NVPTX Specific: Enable generating FMAD instructions"), + cl::init(false)); static cl::opt<int> -FMAContractLevel("nvptx-fma-level", - cl::ZeroOrMore, +FMAContractLevel("nvptx-fma-level", cl::ZeroOrMore, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" - " 1: do it 2: do it aggressively"), - cl::init(2)); - + " 1: do it 2: do it aggressively"), + cl::init(2)); -static cl::opt<int> -UsePrecDivF32("nvptx-prec-divf32", - cl::ZeroOrMore, - cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use" - " IEEE Compliant F32 div.rnd if avaiable."), - cl::init(2)); +static cl::opt<int> UsePrecDivF32( + "nvptx-prec-divf32", cl::ZeroOrMore, + cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use" + " IEEE Compliant F32 div.rnd if avaiable."), + cl::init(2)); /// createNVPTXISelDag - This pass converts a legalized DAG into a /// NVPTX-specific DAG, ready for instruction scheduling. @@ -55,26 +49,22 @@ FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM, return new NVPTXDAGToDAGISel(TM, OptLevel); } - NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm, CodeGenOpt::Level OptLevel) -: SelectionDAGISel(tm, OptLevel), - Subtarget(tm.getSubtarget<NVPTXSubtarget>()) -{ + : SelectionDAGISel(tm, OptLevel), + Subtarget(tm.getSubtarget<NVPTXSubtarget>()) { // Always do fma.f32 fpcontract if the target supports the instruction. // Always do fma.f64 fpcontract if the target supports the instruction. // Do mad.f32 is nvptx-mad-enable is specified and the target does not // support fma.f32. doFMADF32 = (OptLevel > 0) && UseFMADInstruction && !Subtarget.hasFMAF32(); - doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() && - (FMAContractLevel>=1); - doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() && - (FMAContractLevel>=1); - doFMAF32AGG = (OptLevel > 0) && Subtarget.hasFMAF32() && - (FMAContractLevel==2); - doFMAF64AGG = (OptLevel > 0) && Subtarget.hasFMAF64() && - (FMAContractLevel==2); + doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel >= 1); + doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel >= 1); + doFMAF32AGG = + (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel == 2); + doFMAF64AGG = + (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel == 2); allowFMA = (FMAContractLevel >= 1) || UseFMADInstruction; @@ -92,10 +82,10 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm, /// Select - Select instructions not customized! Used for /// expanded, promoted and normal instructions. -SDNode* NVPTXDAGToDAGISel::Select(SDNode *N) { +SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) { if (N->isMachineOpcode()) - return NULL; // Already selected. + return NULL; // Already selected. SDNode *ResNode = NULL; switch (N->getOpcode()) { @@ -119,30 +109,34 @@ SDNode* NVPTXDAGToDAGISel::Select(SDNode *N) { case NVPTXISD::StoreV4: ResNode = SelectStoreVector(N); break; - default: break; + default: + break; } if (ResNode) return ResNode; return SelectCode(N); } - -static unsigned int -getCodeAddrSpace(MemSDNode *N, const NVPTXSubtarget &Subtarget) -{ +static unsigned int getCodeAddrSpace(MemSDNode *N, + const NVPTXSubtarget &Subtarget) { const Value *Src = N->getSrcValue(); if (!Src) return NVPTX::PTXLdStInstCode::LOCAL; if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) { switch (PT->getAddressSpace()) { - case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL; - case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL; - case llvm::ADDRESS_SPACE_SHARED: return NVPTX::PTXLdStInstCode::SHARED; + case llvm::ADDRESS_SPACE_LOCAL: + return NVPTX::PTXLdStInstCode::LOCAL; + case llvm::ADDRESS_SPACE_GLOBAL: + return NVPTX::PTXLdStInstCode::GLOBAL; + case llvm::ADDRESS_SPACE_SHARED: + return NVPTX::PTXLdStInstCode::SHARED; case llvm::ADDRESS_SPACE_CONST_NOT_GEN: return NVPTX::PTXLdStInstCode::CONSTANT; - case llvm::ADDRESS_SPACE_GENERIC: return NVPTX::PTXLdStInstCode::GENERIC; - case llvm::ADDRESS_SPACE_PARAM: return NVPTX::PTXLdStInstCode::PARAM; + case llvm::ADDRESS_SPACE_GENERIC: + return NVPTX::PTXLdStInstCode::GENERIC; + case llvm::ADDRESS_SPACE_PARAM: + return NVPTX::PTXLdStInstCode::PARAM; case llvm::ADDRESS_SPACE_CONST: // If the arch supports generic address space, translate it to GLOBAL // for correctness. @@ -153,18 +147,18 @@ getCodeAddrSpace(MemSDNode *N, const NVPTXSubtarget &Subtarget) return NVPTX::PTXLdStInstCode::GLOBAL; else return NVPTX::PTXLdStInstCode::CONSTANT; - default: break; + default: + break; } } return NVPTX::PTXLdStInstCode::LOCAL; } - -SDNode* NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { +SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { DebugLoc dl = N->getDebugLoc(); LoadSDNode *LD = cast<LoadSDNode>(N); EVT LoadedVT = LD->getMemoryVT(); - SDNode *NVPTXLD= NULL; + SDNode *NVPTXLD = NULL; // do not support pre/post inc/dec if (LD->isIndexed()) @@ -204,7 +198,7 @@ SDNode* NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { // type is integer // Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float MVT ScalarVT = SimpleVT.getScalarType(); - unsigned fromTypeWidth = ScalarVT.getSizeInBits(); + unsigned fromTypeWidth = ScalarVT.getSizeInBits(); unsigned int fromType; if ((LD->getExtensionType() == ISD::SEXTLOAD)) fromType = NVPTX::PTXLdStInstCode::Signed; @@ -223,105 +217,166 @@ SDNode* NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { if (SelectDirectAddr(N1, Addr)) { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_avar; break; - case MVT::i16: Opcode = NVPTX::LD_i16_avar; break; - case MVT::i32: Opcode = NVPTX::LD_i32_avar; break; - case MVT::i64: Opcode = NVPTX::LD_i64_avar; break; - case MVT::f32: Opcode = NVPTX::LD_f32_avar; break; - case MVT::f64: Opcode = NVPTX::LD_f64_avar; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_avar; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_avar; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_avar; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_avar; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_avar; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_avar; + break; + default: + return NULL; } - SDValue Ops[] = { getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(fromType), - getI32Imm(fromTypeWidth), - Addr, Chain }; - NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, - MVT::Other, Ops, 7); - } else if (Subtarget.is64Bit()? - SelectADDRsi64(N1.getNode(), N1, Base, Offset): - SelectADDRsi(N1.getNode(), N1, Base, Offset)) { + SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(fromType), + getI32Imm(fromTypeWidth), Addr, Chain }; + NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRsi64(N1.getNode(), N1, Base, Offset) + : SelectADDRsi(N1.getNode(), N1, Base, Offset)) { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_asi; break; - case MVT::i16: Opcode = NVPTX::LD_i16_asi; break; - case MVT::i32: Opcode = NVPTX::LD_i32_asi; break; - case MVT::i64: Opcode = NVPTX::LD_i64_asi; break; - case MVT::f32: Opcode = NVPTX::LD_f32_asi; break; - case MVT::f64: Opcode = NVPTX::LD_f64_asi; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_asi; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_asi; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_asi; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_asi; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_asi; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_asi; + break; + default: + return NULL; } - SDValue Ops[] = { getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(fromType), - getI32Imm(fromTypeWidth), - Base, Offset, Chain }; - NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, - MVT::Other, Ops, 8); - } else if (Subtarget.is64Bit()? - SelectADDRri64(N1.getNode(), N1, Base, Offset): - SelectADDRri(N1.getNode(), N1, Base, Offset)) { + SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(fromType), + getI32Imm(fromTypeWidth), Base, Offset, Chain }; + NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRri64(N1.getNode(), N1, Base, Offset) + : SelectADDRri(N1.getNode(), N1, Base, Offset)) { if (Subtarget.is64Bit()) { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_ari_64; break; - case MVT::i16: Opcode = NVPTX::LD_i16_ari_64; break; - case MVT::i32: Opcode = NVPTX::LD_i32_ari_64; break; - case MVT::i64: Opcode = NVPTX::LD_i64_ari_64; break; - case MVT::f32: Opcode = NVPTX::LD_f32_ari_64; break; - case MVT::f64: Opcode = NVPTX::LD_f64_ari_64; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_ari_64; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_ari_64; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_ari_64; + break; + default: + return NULL; } } else { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_ari; break; - case MVT::i16: Opcode = NVPTX::LD_i16_ari; break; - case MVT::i32: Opcode = NVPTX::LD_i32_ari; break; - case MVT::i64: Opcode = NVPTX::LD_i64_ari; break; - case MVT::f32: Opcode = NVPTX::LD_f32_ari; break; - case MVT::f64: Opcode = NVPTX::LD_f64_ari; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_ari; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_ari; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_ari; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_ari; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_ari; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_ari; + break; + default: + return NULL; } } - SDValue Ops[] = { getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(fromType), - getI32Imm(fromTypeWidth), - Base, Offset, Chain }; - NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, - MVT::Other, Ops, 8); - } - else { + SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(fromType), + getI32Imm(fromTypeWidth), Base, Offset, Chain }; + NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops); + } else { if (Subtarget.is64Bit()) { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_areg_64; break; - case MVT::i16: Opcode = NVPTX::LD_i16_areg_64; break; - case MVT::i32: Opcode = NVPTX::LD_i32_areg_64; break; - case MVT::i64: Opcode = NVPTX::LD_i64_areg_64; break; - case MVT::f32: Opcode = NVPTX::LD_f32_areg_64; break; - case MVT::f64: Opcode = NVPTX::LD_f64_areg_64; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_areg_64; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_areg_64; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_areg_64; + break; + default: + return NULL; } } else { switch (TargetVT) { - case MVT::i8: Opcode = NVPTX::LD_i8_areg; break; - case MVT::i16: Opcode = NVPTX::LD_i16_areg; break; - case MVT::i32: Opcode = NVPTX::LD_i32_areg; break; - case MVT::i64: Opcode = NVPTX::LD_i64_areg; break; - case MVT::f32: Opcode = NVPTX::LD_f32_areg; break; - case MVT::f64: Opcode = NVPTX::LD_f64_areg; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::LD_i8_areg; + break; + case MVT::i16: + Opcode = NVPTX::LD_i16_areg; + break; + case MVT::i32: + Opcode = NVPTX::LD_i32_areg; + break; + case MVT::i64: + Opcode = NVPTX::LD_i64_areg; + break; + case MVT::f32: + Opcode = NVPTX::LD_f32_areg; + break; + case MVT::f64: + Opcode = NVPTX::LD_f64_areg; + break; + default: + return NULL; } } - SDValue Ops[] = { getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(fromType), - getI32Imm(fromTypeWidth), - N1, Chain }; - NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, - MVT::Other, Ops, 7); + SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(fromType), + getI32Imm(fromTypeWidth), N1, Chain }; + NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops); } if (NVPTXLD != NULL) { @@ -344,9 +399,8 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { MemSDNode *MemSD = cast<MemSDNode>(N); EVT LoadedVT = MemSD->getMemoryVT(); - if (!LoadedVT.isSimple()) - return NULL; + return NULL; // Address Space Setting unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD, Subtarget); @@ -369,11 +423,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { // type is integer // Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float MVT ScalarVT = SimpleVT.getScalarType(); - unsigned FromTypeWidth = ScalarVT.getSizeInBits(); + unsigned FromTypeWidth = ScalarVT.getSizeInBits(); unsigned int FromType; // The last operand holds the original LoadSDNode::getExtensionType() value - unsigned ExtensionType = - cast<ConstantSDNode>(N->getOperand(N->getNumOperands()-1))->getZExtValue(); + unsigned ExtensionType = cast<ConstantSDNode>( + N->getOperand(N->getNumOperands() - 1))->getZExtValue(); if (ExtensionType == ISD::SEXTLOAD) FromType = NVPTX::PTXLdStInstCode::Signed; else if (ScalarVT.isFloatingPoint()) @@ -384,198 +438,329 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { unsigned VecType; switch (N->getOpcode()) { - case NVPTXISD::LoadV2: VecType = NVPTX::PTXLdStInstCode::V2; break; - case NVPTXISD::LoadV4: VecType = NVPTX::PTXLdStInstCode::V4; break; - default: return NULL; + case NVPTXISD::LoadV2: + VecType = NVPTX::PTXLdStInstCode::V2; + break; + case NVPTXISD::LoadV4: + VecType = NVPTX::PTXLdStInstCode::V4; + break; + default: + return NULL; } EVT EltVT = N->getValueType(0); if (SelectDirectAddr(Op1, Addr)) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_avar; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_avar; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_avar; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_avar; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_avar; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_avar; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_avar; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_avar; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_avar; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_avar; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_avar; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_avar; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_avar; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_avar; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_avar; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_avar; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_avar; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_avar; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_avar; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_avar; + break; } break; } - SDValue Ops[] = { getI32Imm(IsVolatile), - getI32Imm(CodeAddrSpace), - getI32Imm(VecType), - getI32Imm(FromType), - getI32Imm(FromTypeWidth), - Addr, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops, 7); - } else if (Subtarget.is64Bit()? - SelectADDRsi64(Op1.getNode(), Op1, Base, Offset): - SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) { + SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace), + getI32Imm(VecType), getI32Imm(FromType), + getI32Imm(FromTypeWidth), Addr, Chain }; + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset) + : SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_asi; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_asi; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_asi; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_asi; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_asi; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_asi; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_asi; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_asi; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_asi; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_asi; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_asi; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_asi; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_asi; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_asi; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_asi; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_asi; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_asi; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_asi; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_asi; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_asi; + break; } break; } - SDValue Ops[] = { getI32Imm(IsVolatile), - getI32Imm(CodeAddrSpace), - getI32Imm(VecType), - getI32Imm(FromType), - getI32Imm(FromTypeWidth), - Base, Offset, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops, 8); - } else if (Subtarget.is64Bit()? - SelectADDRri64(Op1.getNode(), Op1, Base, Offset): - SelectADDRri(Op1.getNode(), Op1, Base, Offset)) { + SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace), + getI32Imm(VecType), getI32Imm(FromType), + getI32Imm(FromTypeWidth), Base, Offset, Chain }; + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset) + : SelectADDRri(Op1.getNode(), Op1, Base, Offset)) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_ari_64; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_ari_64; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_ari_64; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_ari_64; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_ari_64; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_ari_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_ari_64; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_ari_64; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_ari_64; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_ari_64; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_ari_64; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_ari_64; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_ari_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_ari_64; + break; } break; } } else { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_ari; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_ari; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_ari; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_ari; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_ari; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_ari; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_ari; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_ari; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_ari; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_ari; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_ari; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_ari; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_ari; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_ari; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_ari; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_ari; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_ari; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_ari; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_ari; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_ari; + break; } break; } } - SDValue Ops[] = { getI32Imm(IsVolatile), - getI32Imm(CodeAddrSpace), - getI32Imm(VecType), - getI32Imm(FromType), - getI32Imm(FromTypeWidth), - Base, Offset, Chain }; + SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace), + getI32Imm(VecType), getI32Imm(FromType), + getI32Imm(FromTypeWidth), Base, Offset, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops, 8); + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); } else { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_areg_64; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_areg_64; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_areg_64; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_areg_64; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_areg_64; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_areg_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_areg_64; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_areg_64; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_areg_64; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_areg_64; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_areg_64; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_areg_64; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_areg_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_areg_64; + break; } break; } } else { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v2_areg; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v2_areg; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v2_areg; break; - case MVT::i64: Opcode = NVPTX::LDV_i64_v2_areg; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v2_areg; break; - case MVT::f64: Opcode = NVPTX::LDV_f64_v2_areg; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v2_areg; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v2_areg; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v2_areg; + break; + case MVT::i64: + Opcode = NVPTX::LDV_i64_v2_areg; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v2_areg; + break; + case MVT::f64: + Opcode = NVPTX::LDV_f64_v2_areg; + break; } break; case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::LDV_i8_v4_areg; break; - case MVT::i16: Opcode = NVPTX::LDV_i16_v4_areg; break; - case MVT::i32: Opcode = NVPTX::LDV_i32_v4_areg; break; - case MVT::f32: Opcode = NVPTX::LDV_f32_v4_areg; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::LDV_i8_v4_areg; + break; + case MVT::i16: + Opcode = NVPTX::LDV_i16_v4_areg; + break; + case MVT::i32: + Opcode = NVPTX::LDV_i32_v4_areg; + break; + case MVT::f32: + Opcode = NVPTX::LDV_f32_v4_areg; + break; } break; } } - SDValue Ops[] = { getI32Imm(IsVolatile), - getI32Imm(CodeAddrSpace), - getI32Imm(VecType), - getI32Imm(FromType), - getI32Imm(FromTypeWidth), - Op1, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops, 7); + SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace), + getI32Imm(VecType), getI32Imm(FromType), + getI32Imm(FromTypeWidth), Op1, Chain }; + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); } MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); @@ -598,96 +783,186 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { // Select opcode if (Subtarget.is64Bit()) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LDGV2: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_64; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_64; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_64; break; - case MVT::i64: Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_64; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_64; break; - case MVT::f64: Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_64; + break; } break; case NVPTXISD::LDGV4: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_64; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_64; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_64; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_64; + break; } break; case NVPTXISD::LDUV2: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_64; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_64; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_64; break; - case MVT::i64: Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_64; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_64; break; - case MVT::f64: Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_64; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_64; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_64; + break; } break; case NVPTXISD::LDUV4: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_64; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_64; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_64; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_64; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_64; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_64; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_64; + break; } break; } } else { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::LDGV2: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_32; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_32; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_32; break; - case MVT::i64: Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_32; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_32; break; - case MVT::f64: Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_32; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v2i16_ELE_32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v2i32_ELE_32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDG_G_v2i64_ELE_32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v2f32_ELE_32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDG_G_v2f64_ELE_32; + break; } break; case NVPTXISD::LDGV4: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_32; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_32; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_32; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_32; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDG_G_v4i16_ELE_32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDG_G_v4i32_ELE_32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDG_G_v4f32_ELE_32; + break; } break; case NVPTXISD::LDUV2: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_32; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_32; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_32; break; - case MVT::i64: Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_32; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_32; break; - case MVT::f64: Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_32; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v2i16_ELE_32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v2i32_ELE_32; + break; + case MVT::i64: + Opcode = NVPTX::INT_PTX_LDU_G_v2i64_ELE_32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v2f32_ELE_32; + break; + case MVT::f64: + Opcode = NVPTX::INT_PTX_LDU_G_v2f64_ELE_32; + break; } break; case NVPTXISD::LDUV4: switch (RetVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_32; break; - case MVT::i16: Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_32; break; - case MVT::i32: Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_32; break; - case MVT::f32: Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_32; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_32; + break; + case MVT::i16: + Opcode = NVPTX::INT_PTX_LDU_G_v4i16_ELE_32; + break; + case MVT::i32: + Opcode = NVPTX::INT_PTX_LDU_G_v4i32_ELE_32; + break; + case MVT::f32: + Opcode = NVPTX::INT_PTX_LDU_G_v4f32_ELE_32; + break; } break; } } SDValue Ops[] = { Op1, Chain }; - LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), &Ops[0], 2); + LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops); MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand(); @@ -696,8 +971,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { return LD; } - -SDNode* NVPTXDAGToDAGISel::SelectStore(SDNode *N) { +SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { DebugLoc dl = N->getDebugLoc(); StoreSDNode *ST = cast<StoreSDNode>(N); EVT StoreVT = ST->getMemoryVT(); @@ -738,7 +1012,7 @@ SDNode* NVPTXDAGToDAGISel::SelectStore(SDNode *N) { // - for integer type, always use 'u' // MVT ScalarVT = SimpleVT.getScalarType(); - unsigned toTypeWidth = ScalarVT.getSizeInBits(); + unsigned toTypeWidth = ScalarVT.getSizeInBits(); unsigned int toType; if (ScalarVT.isFloatingPoint()) toType = NVPTX::PTXLdStInstCode::Float; @@ -757,108 +1031,166 @@ SDNode* NVPTXDAGToDAGISel::SelectStore(SDNode *N) { if (SelectDirectAddr(N2, Addr)) { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_avar; break; - case MVT::i16: Opcode = NVPTX::ST_i16_avar; break; - case MVT::i32: Opcode = NVPTX::ST_i32_avar; break; - case MVT::i64: Opcode = NVPTX::ST_i64_avar; break; - case MVT::f32: Opcode = NVPTX::ST_f32_avar; break; - case MVT::f64: Opcode = NVPTX::ST_f64_avar; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_avar; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_avar; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_avar; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_avar; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_avar; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_avar; + break; + default: + return NULL; } - SDValue Ops[] = { N1, - getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(toType), - getI32Imm(toTypeWidth), - Addr, Chain }; - NVPTXST = CurDAG->getMachineNode(Opcode, dl, - MVT::Other, Ops, 8); - } else if (Subtarget.is64Bit()? - SelectADDRsi64(N2.getNode(), N2, Base, Offset): - SelectADDRsi(N2.getNode(), N2, Base, Offset)) { + SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(toType), + getI32Imm(toTypeWidth), Addr, Chain }; + NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRsi64(N2.getNode(), N2, Base, Offset) + : SelectADDRsi(N2.getNode(), N2, Base, Offset)) { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_asi; break; - case MVT::i16: Opcode = NVPTX::ST_i16_asi; break; - case MVT::i32: Opcode = NVPTX::ST_i32_asi; break; - case MVT::i64: Opcode = NVPTX::ST_i64_asi; break; - case MVT::f32: Opcode = NVPTX::ST_f32_asi; break; - case MVT::f64: Opcode = NVPTX::ST_f64_asi; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_asi; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_asi; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_asi; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_asi; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_asi; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_asi; + break; + default: + return NULL; } - SDValue Ops[] = { N1, - getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(toType), - getI32Imm(toTypeWidth), - Base, Offset, Chain }; - NVPTXST = CurDAG->getMachineNode(Opcode, dl, - MVT::Other, Ops, 9); - } else if (Subtarget.is64Bit()? - SelectADDRri64(N2.getNode(), N2, Base, Offset): - SelectADDRri(N2.getNode(), N2, Base, Offset)) { + SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(toType), + getI32Imm(toTypeWidth), Base, Offset, Chain }; + NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); + } else if (Subtarget.is64Bit() + ? SelectADDRri64(N2.getNode(), N2, Base, Offset) + : SelectADDRri(N2.getNode(), N2, Base, Offset)) { if (Subtarget.is64Bit()) { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_ari_64; break; - case MVT::i16: Opcode = NVPTX::ST_i16_ari_64; break; - case MVT::i32: Opcode = NVPTX::ST_i32_ari_64; break; - case MVT::i64: Opcode = NVPTX::ST_i64_ari_64; break; - case MVT::f32: Opcode = NVPTX::ST_f32_ari_64; break; - case MVT::f64: Opcode = NVPTX::ST_f64_ari_64; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_ari_64; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_ari_64; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_ari_64; + break; + default: + return NULL; } } else { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_ari; break; - case MVT::i16: Opcode = NVPTX::ST_i16_ari; break; - case MVT::i32: Opcode = NVPTX::ST_i32_ari; break; - case MVT::i64: Opcode = NVPTX::ST_i64_ari; break; - case MVT::f32: Opcode = NVPTX::ST_f32_ari; break; - case MVT::f64: Opcode = NVPTX::ST_f64_ari; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_ari; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_ari; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_ari; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_ari; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_ari; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_ari; + break; + default: + return NULL; } } - SDValue Ops[] = { N1, - getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(toType), - getI32Imm(toTypeWidth), - Base, Offset, Chain }; - NVPTXST = CurDAG->getMachineNode(Opcode, dl, - MVT::Other, Ops, 9); + SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(toType), + getI32Imm(toTypeWidth), Base, Offset, Chain }; + NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); } else { if (Subtarget.is64Bit()) { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_areg_64; break; - case MVT::i16: Opcode = NVPTX::ST_i16_areg_64; break; - case MVT::i32: Opcode = NVPTX::ST_i32_areg_64; break; - case MVT::i64: Opcode = NVPTX::ST_i64_areg_64; break; - case MVT::f32: Opcode = NVPTX::ST_f32_areg_64; break; - case MVT::f64: Opcode = NVPTX::ST_f64_areg_64; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_areg_64; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_areg_64; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_areg_64; + break; + default: + return NULL; } } else { switch (SourceVT) { - case MVT::i8: Opcode = NVPTX::ST_i8_areg; break; - case MVT::i16: Opcode = NVPTX::ST_i16_areg; break; - case MVT::i32: Opcode = NVPTX::ST_i32_areg; break; - case MVT::i64: Opcode = NVPTX::ST_i64_areg; break; - case MVT::f32: Opcode = NVPTX::ST_f32_areg; break; - case MVT::f64: Opcode = NVPTX::ST_f64_areg; break; - default: return NULL; + case MVT::i8: + Opcode = NVPTX::ST_i8_areg; + break; + case MVT::i16: + Opcode = NVPTX::ST_i16_areg; + break; + case MVT::i32: + Opcode = NVPTX::ST_i32_areg; + break; + case MVT::i64: + Opcode = NVPTX::ST_i64_areg; + break; + case MVT::f32: + Opcode = NVPTX::ST_f32_areg; + break; + case MVT::f64: + Opcode = NVPTX::ST_f64_areg; + break; + default: + return NULL; } } - SDValue Ops[] = { N1, - getI32Imm(isVolatile), - getI32Imm(codeAddrSpace), - getI32Imm(vecType), - getI32Imm(toType), - getI32Imm(toTypeWidth), - N2, Chain }; - NVPTXST = CurDAG->getMachineNode(Opcode, dl, - MVT::Other, Ops, 8); + SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), + getI32Imm(vecType), getI32Imm(toType), + getI32Imm(toTypeWidth), N2, Chain }; + NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); } if (NVPTXST != NULL) { @@ -901,14 +1233,13 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { // - for integer type, always use 'u' assert(StoreVT.isSimple() && "Store value is not simple"); MVT ScalarVT = StoreVT.getSimpleVT().getScalarType(); - unsigned ToTypeWidth = ScalarVT.getSizeInBits(); + unsigned ToTypeWidth = ScalarVT.getSizeInBits(); unsigned ToType; if (ScalarVT.isFloatingPoint()) ToType = NVPTX::PTXLdStInstCode::Float; else ToType = NVPTX::PTXLdStInstCode::Unsigned; - SmallVector<SDValue, 12> StOps; SDValue N2; unsigned VecType; @@ -928,7 +1259,8 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { StOps.push_back(N->getOperand(4)); N2 = N->getOperand(5); break; - default: return NULL; + default: + return NULL; } StOps.push_back(getI32Imm(IsVolatile)); @@ -939,105 +1271,197 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { if (SelectDirectAddr(N2, Addr)) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_avar; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_avar; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_avar; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_avar; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_avar; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_avar; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_avar; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_avar; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_avar; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_avar; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_avar; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_avar; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_avar; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_avar; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_avar; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_avar; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_avar; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_avar; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_avar; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_avar; + break; } break; } StOps.push_back(Addr); - } else if (Subtarget.is64Bit()? - SelectADDRsi64(N2.getNode(), N2, Base, Offset): - SelectADDRsi(N2.getNode(), N2, Base, Offset)) { + } else if (Subtarget.is64Bit() + ? SelectADDRsi64(N2.getNode(), N2, Base, Offset) + : SelectADDRsi(N2.getNode(), N2, Base, Offset)) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_asi; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_asi; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_asi; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_asi; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_asi; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_asi; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_asi; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_asi; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_asi; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_asi; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_asi; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_asi; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_asi; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_asi; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_asi; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_asi; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_asi; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_asi; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_asi; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_asi; + break; } break; } StOps.push_back(Base); StOps.push_back(Offset); - } else if (Subtarget.is64Bit()? - SelectADDRri64(N2.getNode(), N2, Base, Offset): - SelectADDRri(N2.getNode(), N2, Base, Offset)) { + } else if (Subtarget.is64Bit() + ? SelectADDRri64(N2.getNode(), N2, Base, Offset) + : SelectADDRri(N2.getNode(), N2, Base, Offset)) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_ari_64; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_ari_64; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_ari_64; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_ari_64; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_ari_64; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_ari_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_ari_64; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_ari_64; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_ari_64; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_ari_64; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_ari_64; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_ari_64; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_ari_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_ari_64; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_ari_64; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_ari_64; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_ari_64; + break; } break; } } else { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_ari; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_ari; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_ari; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_ari; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_ari; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_ari; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_ari; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_ari; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_ari; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_ari; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_ari; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_ari; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_ari; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_ari; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_ari; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_ari; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_ari; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_ari; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_ari; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_ari; + break; } break; } @@ -1047,49 +1471,95 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { } else { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_areg_64; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_areg_64; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_areg_64; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_areg_64; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_areg_64; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_areg_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_areg_64; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_areg_64; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_areg_64; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_areg_64; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_areg_64; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_areg_64; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_areg_64; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_areg_64; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_areg_64; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_areg_64; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_areg_64; + break; } break; } } else { switch (N->getOpcode()) { - default: return NULL; + default: + return NULL; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v2_areg; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v2_areg; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v2_areg; break; - case MVT::i64: Opcode = NVPTX::STV_i64_v2_areg; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v2_areg; break; - case MVT::f64: Opcode = NVPTX::STV_f64_v2_areg; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v2_areg; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v2_areg; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v2_areg; + break; + case MVT::i64: + Opcode = NVPTX::STV_i64_v2_areg; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v2_areg; + break; + case MVT::f64: + Opcode = NVPTX::STV_f64_v2_areg; + break; } break; case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::i8: Opcode = NVPTX::STV_i8_v4_areg; break; - case MVT::i16: Opcode = NVPTX::STV_i16_v4_areg; break; - case MVT::i32: Opcode = NVPTX::STV_i32_v4_areg; break; - case MVT::f32: Opcode = NVPTX::STV_f32_v4_areg; break; + default: + return NULL; + case MVT::i8: + Opcode = NVPTX::STV_i8_v4_areg; + break; + case MVT::i16: + Opcode = NVPTX::STV_i16_v4_areg; + break; + case MVT::i32: + Opcode = NVPTX::STV_i32_v4_areg; + break; + case MVT::f32: + Opcode = NVPTX::STV_f32_v4_areg; + break; } break; } @@ -1099,7 +1569,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { StOps.push_back(Chain); - ST = CurDAG->getMachineNode(Opcode, DL, MVT::Other, &StOps[0], StOps.size()); + ST = CurDAG->getMachineNode(Opcode, DL, MVT::Other, StOps); MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand(); @@ -1112,8 +1582,8 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { // A direct address could be a globaladdress or externalsymbol. bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) { // Return true if TGA or ES. - if (N.getOpcode() == ISD::TargetGlobalAddress - || N.getOpcode() == ISD::TargetExternalSymbol) { + if (N.getOpcode() == ISD::TargetGlobalAddress || + N.getOpcode() == ISD::TargetExternalSymbol) { Address = N; return true; } @@ -1131,12 +1601,11 @@ bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) { } // symbol+offset -bool NVPTXDAGToDAGISel::SelectADDRsi_imp(SDNode *OpNode, SDValue Addr, - SDValue &Base, SDValue &Offset, - MVT mvt) { +bool NVPTXDAGToDAGISel::SelectADDRsi_imp( + SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset, MVT mvt) { if (Addr.getOpcode() == ISD::ADD) { if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) { - SDValue base=Addr.getOperand(0); + SDValue base = Addr.getOperand(0); if (SelectDirectAddr(base, Base)) { Offset = CurDAG->getTargetConstant(CN->getZExtValue(), mvt); return true; @@ -1159,9 +1628,8 @@ bool NVPTXDAGToDAGISel::SelectADDRsi64(SDNode *OpNode, SDValue Addr, } // register+offset -bool NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr, - SDValue &Base, SDValue &Offset, - MVT mvt) { +bool NVPTXDAGToDAGISel::SelectADDRri_imp( + SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset, MVT mvt) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt); Offset = CurDAG->getTargetConstant(0, mvt); @@ -1169,7 +1637,7 @@ bool NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr, } if (Addr.getOpcode() == ISD::TargetExternalSymbol || Addr.getOpcode() == ISD::TargetGlobalAddress) - return false; // direct calls. + return false; // direct calls. if (Addr.getOpcode() == ISD::ADD) { if (SelectDirectAddr(Addr.getOperand(0), Addr)) { @@ -1177,7 +1645,7 @@ bool NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr, } if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) { if (FrameIndexSDNode *FIN = - dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) + dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) // Constant offset from frame ref. Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt); else @@ -1209,8 +1677,7 @@ bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N, // (See SelectionDAGNodes.h). So we need to check for both. if (MemSDNode *mN = dyn_cast<MemSDNode>(N)) { Src = mN->getSrcValue(); - } - else if (MemSDNode *mN = dyn_cast<MemIntrinsicSDNode>(N)) { + } else if (MemSDNode *mN = dyn_cast<MemIntrinsicSDNode>(N)) { Src = mN->getSrcValue(); } if (!Src) @@ -1222,13 +1689,13 @@ bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N, /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. -bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, - char ConstraintCode, - std::vector<SDValue> &OutOps) { +bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) { SDValue Op0, Op1; switch (ConstraintCode) { - default: return true; - case 'm': // memory + default: + return true; + case 'm': // memory if (SelectDirectAddr(Op, Op0)) { OutOps.push_back(Op0); OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32)); @@ -1251,10 +1718,8 @@ bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, // pattern matcher inserts a bunch of IMOVi8rr to convert // the imm to i8imm, and this causes instruction selection // to fail. -bool NVPTXDAGToDAGISel::UndefOrImm(SDValue Op, SDValue N, - SDValue &Retval) { - if (!(N.getOpcode() == ISD::UNDEF) && - !(N.getOpcode() == ISD::Constant)) +bool NVPTXDAGToDAGISel::UndefOrImm(SDValue Op, SDValue N, SDValue &Retval) { + if (!(N.getOpcode() == ISD::UNDEF) && !(N.getOpcode() == ISD::Constant)) return false; if (N.getOpcode() == ISD::UNDEF) diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index 4ec9241..70e8e46 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -64,11 +64,10 @@ public: const NVPTXSubtarget &Subtarget; - virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, - char ConstraintCode, - std::vector<SDValue> &OutOps); + virtual bool SelectInlineAsmMemoryOperand( + const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps); private: - // Include the pieces autogenerated from the target description. +// Include the pieces autogenerated from the target description. #include "NVPTXGenDAGISel.inc" SDNode *Select(SDNode *N); @@ -99,7 +98,6 @@ private: bool SelectADDRsi64(SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset); - bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const; bool UndefOrImm(SDValue Op, SDValue N, SDValue &Retval); diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index e9a9fbf..6e01a5a 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -11,7 +11,6 @@ // //===----------------------------------------------------------------------===// - #include "NVPTXISelLowering.h" #include "NVPTX.h" #include "NVPTXTargetMachine.h" @@ -44,14 +43,14 @@ using namespace llvm; static unsigned int uniqueCallSite = 0; -static cl::opt<bool> -sched4reg("nvptx-sched4reg", - cl::desc("NVPTX Specific: schedule for register pressue"), - cl::init(false)); +static cl::opt<bool> sched4reg( + "nvptx-sched4reg", + cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false)); static bool IsPTXVectorType(MVT VT) { switch (VT.SimpleTy) { - default: return false; + default: + return false; case MVT::v2i8: case MVT::v4i8: case MVT::v2i16: @@ -62,22 +61,21 @@ static bool IsPTXVectorType(MVT VT) { case MVT::v2f32: case MVT::v4f32: case MVT::v2f64: - return true; + return true; } } // NVPTXTargetLowering Constructor. NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) -: TargetLowering(TM, new NVPTXTargetObjectFile()), - nvTM(&TM), - nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) { + : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM), + nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) { // always lower memset, memcpy, and memmove intrinsics to load/store // instructions, rather // then generating calls to memset, mempcy or memmove. - MaxStoresPerMemset = (unsigned)0xFFFFFFFF; - MaxStoresPerMemcpy = (unsigned)0xFFFFFFFF; - MaxStoresPerMemmove = (unsigned)0xFFFFFFFF; + MaxStoresPerMemset = (unsigned) 0xFFFFFFFF; + MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF; + MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF; setBooleanContents(ZeroOrNegativeOneBooleanContent); @@ -100,52 +98,50 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass); // Operations not directly supported by NVPTX. - setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); - setOperationAction(ISD::BR_CC, MVT::f32, Expand); - setOperationAction(ISD::BR_CC, MVT::f64, Expand); - setOperationAction(ISD::BR_CC, MVT::i1, Expand); - setOperationAction(ISD::BR_CC, MVT::i8, Expand); - setOperationAction(ISD::BR_CC, MVT::i16, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_CC, MVT::i64, Expand); + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + setOperationAction(ISD::BR_CC, MVT::f32, Expand); + setOperationAction(ISD::BR_CC, MVT::f64, Expand); + setOperationAction(ISD::BR_CC, MVT::i1, Expand); + setOperationAction(ISD::BR_CC, MVT::i8, Expand); + setOperationAction(ISD::BR_CC, MVT::i16, Expand); + setOperationAction(ISD::BR_CC, MVT::i32, Expand); + setOperationAction(ISD::BR_CC, MVT::i64, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); if (nvptxSubtarget.hasROT64()) { - setOperationAction(ISD::ROTL , MVT::i64, Legal); - setOperationAction(ISD::ROTR , MVT::i64, Legal); - } - else { - setOperationAction(ISD::ROTL , MVT::i64, Expand); - setOperationAction(ISD::ROTR , MVT::i64, Expand); + setOperationAction(ISD::ROTL, MVT::i64, Legal); + setOperationAction(ISD::ROTR, MVT::i64, Legal); + } else { + setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::ROTR, MVT::i64, Expand); } if (nvptxSubtarget.hasROT32()) { - setOperationAction(ISD::ROTL , MVT::i32, Legal); - setOperationAction(ISD::ROTR , MVT::i32, Legal); - } - else { - setOperationAction(ISD::ROTL , MVT::i32, Expand); - setOperationAction(ISD::ROTR , MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Legal); + setOperationAction(ISD::ROTR, MVT::i32, Legal); + } else { + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); } - setOperationAction(ISD::ROTL , MVT::i16, Expand); - setOperationAction(ISD::ROTR , MVT::i16, Expand); - setOperationAction(ISD::ROTL , MVT::i8, Expand); - setOperationAction(ISD::ROTR , MVT::i8, Expand); - setOperationAction(ISD::BSWAP , MVT::i16, Expand); - setOperationAction(ISD::BSWAP , MVT::i32, Expand); - setOperationAction(ISD::BSWAP , MVT::i64, Expand); + setOperationAction(ISD::ROTL, MVT::i16, Expand); + setOperationAction(ISD::ROTR, MVT::i16, Expand); + setOperationAction(ISD::ROTL, MVT::i8, Expand); + setOperationAction(ISD::ROTR, MVT::i8, Expand); + setOperationAction(ISD::BSWAP, MVT::i16, Expand); + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::BSWAP, MVT::i64, Expand); // Indirect branch is not supported. // This also disables Jump Table creation. - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BRIND, MVT::Other, Expand); + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::BRIND, MVT::Other, Expand); - setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); - setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); // We want to legalize constant related memmove and memcopy // intrinsics. @@ -168,16 +164,16 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) setTruncStoreAction(MVT::i8, MVT::i1, Expand); // This is legal in NVPTX - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); + setOperationAction(ISD::ConstantFP, MVT::f64, Legal); + setOperationAction(ISD::ConstantFP, MVT::f32, Legal); // TRAP can be lowered to PTX trap - setOperationAction(ISD::TRAP, MVT::Other, Legal); + setOperationAction(ISD::TRAP, MVT::Other, Legal); // Register custom handling for vector loads/stores - for (int i = MVT::FIRST_VECTOR_VALUETYPE; - i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { - MVT VT = (MVT::SimpleValueType)i; + for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE; + ++i) { + MVT VT = (MVT::SimpleValueType) i; if (IsPTXVectorType(VT)) { setOperationAction(ISD::LOAD, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); @@ -190,49 +186,86 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) computeRegisterProperties(); } - const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; - case NVPTXISD::CALL: return "NVPTXISD::CALL"; - case NVPTXISD::RET_FLAG: return "NVPTXISD::RET_FLAG"; - case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper"; - case NVPTXISD::NVBuiltin: return "NVPTXISD::NVBuiltin"; - case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam"; + default: + return 0; + case NVPTXISD::CALL: + return "NVPTXISD::CALL"; + case NVPTXISD::RET_FLAG: + return "NVPTXISD::RET_FLAG"; + case NVPTXISD::Wrapper: + return "NVPTXISD::Wrapper"; + case NVPTXISD::NVBuiltin: + return "NVPTXISD::NVBuiltin"; + case NVPTXISD::DeclareParam: + return "NVPTXISD::DeclareParam"; case NVPTXISD::DeclareScalarParam: return "NVPTXISD::DeclareScalarParam"; - case NVPTXISD::DeclareRet: return "NVPTXISD::DeclareRet"; - case NVPTXISD::DeclareRetParam: return "NVPTXISD::DeclareRetParam"; - case NVPTXISD::PrintCall: return "NVPTXISD::PrintCall"; - case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam"; - case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam"; - case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32"; - case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32"; - case NVPTXISD::MoveToParam: return "NVPTXISD::MoveToParam"; - case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin"; - case NVPTXISD::CallArg: return "NVPTXISD::CallArg"; - case NVPTXISD::LastCallArg: return "NVPTXISD::LastCallArg"; - case NVPTXISD::CallArgEnd: return "NVPTXISD::CallArgEnd"; - case NVPTXISD::CallVoid: return "NVPTXISD::CallVoid"; - case NVPTXISD::CallVal: return "NVPTXISD::CallVal"; - case NVPTXISD::CallSymbol: return "NVPTXISD::CallSymbol"; - case NVPTXISD::Prototype: return "NVPTXISD::Prototype"; - case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam"; - case NVPTXISD::MoveRetval: return "NVPTXISD::MoveRetval"; - case NVPTXISD::MoveToRetval: return "NVPTXISD::MoveToRetval"; - case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval"; - case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam"; - case NVPTXISD::RETURN: return "NVPTXISD::RETURN"; - case NVPTXISD::CallSeqBegin: return "NVPTXISD::CallSeqBegin"; - case NVPTXISD::CallSeqEnd: return "NVPTXISD::CallSeqEnd"; - case NVPTXISD::LoadV2: return "NVPTXISD::LoadV2"; - case NVPTXISD::LoadV4: return "NVPTXISD::LoadV4"; - case NVPTXISD::LDGV2: return "NVPTXISD::LDGV2"; - case NVPTXISD::LDGV4: return "NVPTXISD::LDGV4"; - case NVPTXISD::LDUV2: return "NVPTXISD::LDUV2"; - case NVPTXISD::LDUV4: return "NVPTXISD::LDUV4"; - case NVPTXISD::StoreV2: return "NVPTXISD::StoreV2"; - case NVPTXISD::StoreV4: return "NVPTXISD::StoreV4"; + case NVPTXISD::DeclareRet: + return "NVPTXISD::DeclareRet"; + case NVPTXISD::DeclareRetParam: + return "NVPTXISD::DeclareRetParam"; + case NVPTXISD::PrintCall: + return "NVPTXISD::PrintCall"; + case NVPTXISD::LoadParam: + return "NVPTXISD::LoadParam"; + case NVPTXISD::StoreParam: + return "NVPTXISD::StoreParam"; + case NVPTXISD::StoreParamS32: + return "NVPTXISD::StoreParamS32"; + case NVPTXISD::StoreParamU32: + return "NVPTXISD::StoreParamU32"; + case NVPTXISD::MoveToParam: + return "NVPTXISD::MoveToParam"; + case NVPTXISD::CallArgBegin: + return "NVPTXISD::CallArgBegin"; + case NVPTXISD::CallArg: + return "NVPTXISD::CallArg"; + case NVPTXISD::LastCallArg: + return "NVPTXISD::LastCallArg"; + case NVPTXISD::CallArgEnd: + return "NVPTXISD::CallArgEnd"; + case NVPTXISD::CallVoid: + return "NVPTXISD::CallVoid"; + case NVPTXISD::CallVal: + return "NVPTXISD::CallVal"; + case NVPTXISD::CallSymbol: + return "NVPTXISD::CallSymbol"; + case NVPTXISD::Prototype: + return "NVPTXISD::Prototype"; + case NVPTXISD::MoveParam: + return "NVPTXISD::MoveParam"; + case NVPTXISD::MoveRetval: + return "NVPTXISD::MoveRetval"; + case NVPTXISD::MoveToRetval: + return "NVPTXISD::MoveToRetval"; + case NVPTXISD::StoreRetval: + return "NVPTXISD::StoreRetval"; + case NVPTXISD::PseudoUseParam: + return "NVPTXISD::PseudoUseParam"; + case NVPTXISD::RETURN: + return "NVPTXISD::RETURN"; + case NVPTXISD::CallSeqBegin: + return "NVPTXISD::CallSeqBegin"; + case NVPTXISD::CallSeqEnd: + return "NVPTXISD::CallSeqEnd"; + case NVPTXISD::LoadV2: + return "NVPTXISD::LoadV2"; + case NVPTXISD::LoadV4: + return "NVPTXISD::LoadV4"; + case NVPTXISD::LDGV2: + return "NVPTXISD::LDGV2"; + case NVPTXISD::LDGV4: + return "NVPTXISD::LDGV4"; + case NVPTXISD::LDUV2: + return "NVPTXISD::LDUV2"; + case NVPTXISD::LDUV4: + return "NVPTXISD::LDUV4"; + case NVPTXISD::StoreV2: + return "NVPTXISD::StoreV2"; + case NVPTXISD::StoreV4: + return "NVPTXISD::StoreV4"; } } @@ -248,10 +281,9 @@ NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op); } -std::string NVPTXTargetLowering::getPrototype(Type *retTy, - const ArgListTy &Args, - const SmallVectorImpl<ISD::OutputArg> &Outs, - unsigned retAlignment) const { +std::string NVPTXTargetLowering::getPrototype( + Type *retTy, const ArgListTy &Args, + const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment) const { bool isABI = (nvptxSubtarget.getSmVersion() >= 20); @@ -267,54 +299,47 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, unsigned size = 0; if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) { size = ITy->getBitWidth(); - if (size < 32) size = 32; - } - else { + if (size < 32) + size = 32; + } else { assert(retTy->isFloatingPointTy() && "Floating point type expected here"); size = retTy->getPrimitiveSizeInBits(); } O << ".param .b" << size << " _"; - } - else if (isa<PointerType>(retTy)) - O << ".param .b" << getPointerTy().getSizeInBits() - << " _"; + } else if (isa<PointerType>(retTy)) + O << ".param .b" << getPointerTy().getSizeInBits() << " _"; else { if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) { SmallVector<EVT, 16> vtparts; ComputeValueVTs(*this, retTy, vtparts); unsigned totalsz = 0; - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { elems = vtparts[i].getVectorNumElements(); elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0, je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 8)) sz = 8; - totalsz += sz/8; + if (elemtype.isInteger() && (sz < 8)) + sz = 8; + totalsz += sz / 8; } } - O << ".param .align " - << retAlignment - << " .b8 _[" - << totalsz << "]"; - } - else { - assert(false && - "Unknown return type"); + O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]"; + } else { + assert(false && "Unknown return type"); } } - } - else { + } else { SmallVector<EVT, 16> vtparts; ComputeValueVTs(*this, retTy, vtparts); unsigned idx = 0; - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { @@ -322,14 +347,16 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0, je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) sz = 32; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; O << ".reg .b" << sz << " _"; - if (j<je-1) O << ", "; + if (j < je - 1) + O << ", "; ++idx; } - if (i < e-1) + if (i < e - 1) O << ", "; } } @@ -340,7 +367,7 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, bool first = true; MVT thePointerTy = getPointerTy(); - for (unsigned i=0,e=Args.size(); i!=e; ++i) { + for (unsigned i = 0, e = Args.size(); i != e; ++i) { const Type *Ty = Args[i].Ty; if (!first) { O << ", "; @@ -351,9 +378,9 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, unsigned sz = 0; if (isa<IntegerType>(Ty)) { sz = cast<IntegerType>(Ty)->getBitWidth(); - if (sz < 32) sz = 32; - } - else if (isa<PointerType>(Ty)) + if (sz < 32) + sz = 32; + } else if (isa<PointerType>(Ty)) sz = thePointerTy.getSizeInBits(); else sz = Ty->getPrimitiveSizeInBits(); @@ -365,23 +392,20 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, continue; } const PointerType *PTy = dyn_cast<PointerType>(Ty); - assert(PTy && - "Param with byval attribute should be a pointer type"); + assert(PTy && "Param with byval attribute should be a pointer type"); Type *ETy = PTy->getElementType(); if (isABI) { unsigned align = Outs[i].Flags.getByValAlign(); unsigned sz = getDataLayout()->getTypeAllocSize(ETy); - O << ".param .align " << align - << " .b8 "; + O << ".param .align " << align << " .b8 "; O << "_"; O << "[" << sz << "]"; continue; - } - else { + } else { SmallVector<EVT, 16> vtparts; ComputeValueVTs(*this, ETy, vtparts); - for (unsigned i=0,e=vtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; if (vtparts[i].isVector()) { @@ -389,14 +413,16 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, elemtype = vtparts[i].getVectorElementType(); } - for (unsigned j=0,je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) sz = 32; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; O << ".reg .b" << sz << " "; O << "_"; - if (j<je-1) O << ", "; + if (j < je - 1) + O << ", "; } - if (i<e-1) + if (i < e - 1) O << ", "; } continue; @@ -406,27 +432,25 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, return O.str(); } - -SDValue -NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, - SmallVectorImpl<SDValue> &InVals) const { - SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; +SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; - SmallVector<SDValue, 32> &OutVals = CLI.OutVals; - SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; - SDValue Chain = CLI.Chain; - SDValue Callee = CLI.Callee; - bool &isTailCall = CLI.IsTailCall; - ArgListTy &Args = CLI.Args; - Type *retTy = CLI.RetTy; - ImmutableCallSite *CS = CLI.CS; + SmallVector<SDValue, 32> &OutVals = CLI.OutVals; + SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + ArgListTy &Args = CLI.Args; + Type *retTy = CLI.RetTy; + ImmutableCallSite *CS = CLI.CS; bool isABI = (nvptxSubtarget.getSmVersion() >= 20); SDValue tempChain = Chain; - Chain = DAG.getCALLSEQ_START(Chain, - DAG.getIntPtrConstant(uniqueCallSite, true)); + Chain = + DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true)); SDValue InFlag = Chain.getValue(1); assert((Outs.size() == Args.size()) && @@ -434,7 +458,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, unsigned paramCount = 0; // Declare the .params or .reg need to pass values // to the function - for (unsigned i=0, e=Outs.size(); i!=e; ++i) { + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { EVT VT = Outs[i].VT; if (Outs[i].Flags.isByVal() == false) { @@ -445,19 +469,20 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (isABI) isReg = 0; unsigned sz = VT.getSizeInBits(); - if (VT.isInteger() && (sz < 32)) sz = 32; + if (VT.isInteger() && (sz < 32)) + sz = 32; SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32), - DAG.getConstant(isReg, MVT::i32), - InFlag }; + DAG.getConstant(isReg, MVT::i32), InFlag }; Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs, DeclareParamOps, 5); InFlag = Chain.getValue(1); SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(0, MVT::i32), OutVals[i], InFlag }; + DAG.getConstant(0, MVT::i32), OutVals[i], + InFlag }; unsigned opcode = NVPTXISD::StoreParam; if (isReg) @@ -477,8 +502,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // struct or vector SmallVector<EVT, 16> vtparts; const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty); - assert(PTy && - "Type of a byval parameter should be pointer"); + assert(PTy && "Type of a byval parameter should be pointer"); ComputeValueVTs(*this, PTy->getElementType(), vtparts); if (isABI) { @@ -488,40 +512,41 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // The ByValAlign in the Outs[i].Flags is alway set at this point, so we // don't need to // worry about natural alignment or not. See TargetLowering::LowerCallTo() - SDValue DeclareParamOps[] = { Chain, - DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32), - DAG.getConstant(paramCount, MVT::i32), - DAG.getConstant(sz, MVT::i32), - InFlag }; + SDValue DeclareParamOps[] = { + Chain, DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32), + DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32), + InFlag + }; Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, DeclareParamOps, 5); InFlag = Chain.getValue(1); unsigned curOffset = 0; - for (unsigned j=0,je=vtparts.size(); j!=je; ++j) { + for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { unsigned elems = 1; EVT elemtype = vtparts[j]; if (vtparts[j].isVector()) { elems = vtparts[j].getVectorNumElements(); elemtype = vtparts[j].getVectorElementType(); } - for (unsigned k=0,ke=elems; k!=ke; ++k) { + for (unsigned k = 0, ke = elems; k != ke; ++k) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 8)) sz = 8; - SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), - OutVals[i], - DAG.getConstant(curOffset, - getPointerTy())); - SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr, - MachinePointerInfo(), false, false, false, 0); + if (elemtype.isInteger() && (sz < 8)) + sz = 8; + SDValue srcAddr = + DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i], + DAG.getConstant(curOffset, getPointerTy())); + SDValue theVal = + DAG.getLoad(elemtype, dl, tempChain, srcAddr, + MachinePointerInfo(), false, false, false, 0); SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, - MVT::i32), - DAG.getConstant(curOffset, MVT::i32), - theVal, InFlag }; + SDValue CopyParamOps[] = { Chain, + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(curOffset, MVT::i32), + theVal, InFlag }; Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs, CopyParamOps, 5); InFlag = Chain.getValue(1); - curOffset += sz/8; + curOffset += sz / 8; } } ++paramCount; @@ -530,30 +555,31 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Non-abi, struct or vector // Declare a bunch or .reg .b<size> .param<n> unsigned curOffset = 0; - for (unsigned j=0,je=vtparts.size(); j!=je; ++j) { + for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { unsigned elems = 1; EVT elemtype = vtparts[j]; if (vtparts[j].isVector()) { elems = vtparts[j].getVectorNumElements(); elemtype = vtparts[j].getVectorElementType(); } - for (unsigned k=0,ke=elems; k!=ke; ++k) { + for (unsigned k = 0, ke = elems; k != ke; ++k) { unsigned sz = elemtype.getSizeInBits(); - if (elemtype.isInteger() && (sz < 32)) sz = 32; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount, - MVT::i32), - DAG.getConstant(sz, MVT::i32), - DAG.getConstant(1, MVT::i32), - InFlag }; + SDValue DeclareParamOps[] = { Chain, + DAG.getConstant(paramCount, MVT::i32), + DAG.getConstant(sz, MVT::i32), + DAG.getConstant(1, MVT::i32), InFlag }; Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs, DeclareParamOps, 5); InFlag = Chain.getValue(1); - SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i], - DAG.getConstant(curOffset, - getPointerTy())); - SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr, - MachinePointerInfo(), false, false, false, 0); + SDValue srcAddr = + DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i], + DAG.getConstant(curOffset, getPointerTy())); + SDValue theVal = + DAG.getLoad(elemtype, dl, tempChain, srcAddr, MachinePointerInfo(), + false, false, false, 0); SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(0, MVT::i32), theVal, @@ -578,20 +604,21 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or // individual .reg .b<size> func_retval<0..> for non ABI unsigned resultsz = 0; - for (unsigned i=0,e=resvtparts.size(); i!=e; ++i) { + for (unsigned i = 0, e = resvtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = resvtparts[i]; if (resvtparts[i].isVector()) { elems = resvtparts[i].getVectorNumElements(); elemtype = resvtparts[i].getVectorElementType(); } - for (unsigned j=0,je=elems; j!=je; ++j) { + for (unsigned j = 0, je = elems; j != je; ++j) { unsigned sz = elemtype.getSizeInBits(); if (isABI == false) { - if (elemtype.isInteger() && (sz < 32)) sz = 32; - } - else { - if (elemtype.isInteger() && (sz < 8)) sz = 8; + if (elemtype.isInteger() && (sz < 32)) + sz = 32; + } else { + if (elemtype.isInteger() && (sz < 8)) + sz = 8; } if (isABI == false) { SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); @@ -609,7 +636,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } if (isABI) { if (retTy->isPrimitiveType() || retTy->isIntegerTy() || - retTy->isPointerTy() ) { + retTy->isPointerTy()) { // Scalar needs to be at least 32bit wide if (resultsz < 32) resultsz = 32; @@ -620,8 +647,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs, DeclareRetOps, 5); InFlag = Chain.getValue(1); - } - else { + } else { if (Func) { // direct call if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment)) retAlignment = getDataLayout()->getABITypeAlignment(retTy); @@ -631,10 +657,10 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, retAlignment = getDataLayout()->getABITypeAlignment(retTy); } SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment, - MVT::i32), - DAG.getConstant(resultsz/8, MVT::i32), - DAG.getConstant(0, MVT::i32), InFlag }; + SDValue DeclareRetOps[] = { Chain, + DAG.getConstant(retAlignment, MVT::i32), + DAG.getConstant(resultsz / 8, MVT::i32), + DAG.getConstant(0, MVT::i32), InFlag }; Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs, DeclareRetOps, 5); InFlag = Chain.getValue(1); @@ -652,24 +678,24 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // INLINEASM SDNode. SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue); std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment); - const char *asmstr = nvTM->getManagedStrPool()-> - getManagedString(proto_string.c_str())->c_str(); - SDValue InlineAsmOps[] = { Chain, - DAG.getTargetExternalSymbol(asmstr, - getPointerTy()), - DAG.getMDNode(0), - DAG.getTargetConstant(0, MVT::i32), InFlag }; + const char *asmstr = nvTM->getManagedStrPool() + ->getManagedString(proto_string.c_str())->c_str(); + SDValue InlineAsmOps[] = { + Chain, DAG.getTargetExternalSymbol(asmstr, getPointerTy()), + DAG.getMDNode(0), DAG.getTargetConstant(0, MVT::i32), InFlag + }; Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5); InFlag = Chain.getValue(1); } // Op to just print "call" SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue PrintCallOps[] = { Chain, - DAG.getConstant(isABI ? ((Ins.size()==0) ? 0 : 1) - : retCount, MVT::i32), - InFlag }; - Chain = DAG.getNode(Func?(NVPTXISD::PrintCallUni):(NVPTXISD::PrintCall), dl, - PrintCallVTs, PrintCallOps, 3); + SDValue PrintCallOps[] = { + Chain, + DAG.getConstant(isABI ? ((Ins.size() == 0) ? 0 : 1) : retCount, MVT::i32), + InFlag + }; + Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall), + dl, PrintCallVTs, PrintCallOps, 3); InFlag = Chain.getValue(1); // Ops to print out the function name @@ -685,31 +711,28 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, CallArgBeginOps, 2); InFlag = Chain.getValue(1); - for (unsigned i=0, e=paramCount; i!=e; ++i) { + for (unsigned i = 0, e = paramCount; i != e; ++i) { unsigned opcode; - if (i==(e-1)) + if (i == (e - 1)) opcode = NVPTXISD::LastCallArg; else opcode = NVPTXISD::CallArg; SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32), - DAG.getConstant(i, MVT::i32), - InFlag }; + DAG.getConstant(i, MVT::i32), InFlag }; Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4); InFlag = Chain.getValue(1); } SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue CallArgEndOps[] = { Chain, - DAG.getConstant(Func ? 1 : 0, MVT::i32), + SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32), InFlag }; - Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, - 3); + Chain = + DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3); InFlag = Chain.getValue(1); if (!Func) { SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue PrototypeOps[] = { Chain, - DAG.getConstant(uniqueCallSite, MVT::i32), + SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32), InFlag }; Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3); InFlag = Chain.getValue(1); @@ -719,32 +742,28 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (Ins.size() > 0) { if (isABI) { unsigned resoffset = 0; - for (unsigned i=0,e=Ins.size(); i!=e; ++i) { + for (unsigned i = 0, e = Ins.size(); i != e; ++i) { unsigned sz = Ins[i].VT.getSizeInBits(); - if (Ins[i].VT.isInteger() && (sz < 8)) sz = 8; + if (Ins[i].VT.isInteger() && (sz < 8)) + sz = 8; EVT LoadRetVTs[] = { Ins[i].VT, MVT::Other, MVT::Glue }; - SDValue LoadRetOps[] = { - Chain, - DAG.getConstant(1, MVT::i32), - DAG.getConstant(resoffset, MVT::i32), - InFlag - }; + SDValue LoadRetOps[] = { Chain, DAG.getConstant(1, MVT::i32), + DAG.getConstant(resoffset, MVT::i32), InFlag }; SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs, LoadRetOps, array_lengthof(LoadRetOps)); Chain = retval.getValue(1); InFlag = retval.getValue(2); InVals.push_back(retval); - resoffset += sz/8; + resoffset += sz / 8; } - } - else { + } else { SmallVector<EVT, 16> resvtparts; ComputeValueVTs(*this, retTy, resvtparts); assert(Ins.size() == resvtparts.size() && "Unexpected number of return values in non-ABI case"); unsigned paramNum = 0; - for (unsigned i=0,e=Ins.size(); i!=e; ++i) { + for (unsigned i = 0, e = Ins.size(); i != e; ++i) { assert(EVT(Ins[i].VT) == resvtparts[i] && "Unexpected EVT type in non-ABI case"); unsigned numelems = 1; @@ -754,14 +773,11 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, elemtype = Ins[i].VT.getVectorElementType(); } std::vector<SDValue> tempRetVals; - for (unsigned j=0; j<numelems; ++j) { + for (unsigned j = 0; j < numelems; ++j) { EVT MoveRetVTs[] = { elemtype, MVT::Other, MVT::Glue }; - SDValue MoveRetOps[] = { - Chain, - DAG.getConstant(0, MVT::i32), - DAG.getConstant(paramNum, MVT::i32), - InFlag - }; + SDValue MoveRetOps[] = { Chain, DAG.getConstant(0, MVT::i32), + DAG.getConstant(paramNum, MVT::i32), + InFlag }; SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs, MoveRetOps, array_lengthof(MoveRetOps)); Chain = retval.getValue(1); @@ -777,9 +793,8 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } } } - Chain = DAG.getCALLSEQ_END(Chain, - DAG.getIntPtrConstant(uniqueCallSite, true), - DAG.getIntPtrConstant(uniqueCallSite+1, true), + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true), + DAG.getIntPtrConstant(uniqueCallSite + 1, true), InFlag); uniqueCallSite++; @@ -792,45 +807,51 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack() // (see LegalizeDAG.cpp). This is slow and uses local memory. // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5 -SDValue NVPTXTargetLowering:: -LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { +SDValue +NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); DebugLoc dl = Node->getDebugLoc(); SmallVector<SDValue, 8> Ops; unsigned NumOperands = Node->getNumOperands(); - for (unsigned i=0; i < NumOperands; ++i) { + for (unsigned i = 0; i < NumOperands; ++i) { SDValue SubOp = Node->getOperand(i); EVT VVT = SubOp.getNode()->getValueType(0); EVT EltVT = VVT.getVectorElementType(); unsigned NumSubElem = VVT.getVectorNumElements(); - for (unsigned j=0; j < NumSubElem; ++j) { + for (unsigned j = 0; j < NumSubElem; ++j) { Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp, DAG.getIntPtrConstant(j))); } } - return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), - &Ops[0], Ops.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0], + Ops.size()); } -SDValue NVPTXTargetLowering:: -LowerOperation(SDValue Op, SelectionDAG &DAG) const { +SDValue +NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { - case ISD::RETURNADDR: return SDValue(); - case ISD::FRAMEADDR: return SDValue(); - case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); - case ISD::INTRINSIC_W_CHAIN: return Op; + case ISD::RETURNADDR: + return SDValue(); + case ISD::FRAMEADDR: + return SDValue(); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: + return Op; case ISD::BUILD_VECTOR: case ISD::EXTRACT_SUBVECTOR: return Op; - case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); - case ISD::STORE: return LowerSTORE(Op, DAG); - case ISD::LOAD: return LowerLOAD(Op, DAG); + case ISD::CONCAT_VECTORS: + return LowerCONCAT_VECTORS(Op, DAG); + case ISD::STORE: + return LowerSTORE(Op, DAG); + case ISD::LOAD: + return LowerLOAD(Op, DAG); default: llvm_unreachable("Custom lowering not defined for operation"); } } - SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType() == MVT::i1) return LowerLOADi1(Op, DAG); @@ -842,24 +863,22 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { // => // v1 = ld i8* addr // v = trunc v1 to i1 -SDValue NVPTXTargetLowering:: -LowerLOADi1(SDValue Op, SelectionDAG &DAG) const { +SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); LoadSDNode *LD = cast<LoadSDNode>(Node); DebugLoc dl = Node->getDebugLoc(); - assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ; + assert(LD->getExtensionType() == ISD::NON_EXTLOAD); assert(Node->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only"); - SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(), - LD->getPointerInfo(), - LD->isVolatile(), LD->isNonTemporal(), - LD->isInvariant(), - LD->getAlignment()); + SDValue newLD = + DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(), + LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(), + LD->isInvariant(), LD->getAlignment()); SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD); // The legalizer (the caller) is expecting two values from the legalized // load, so we build a MergeValues node for it. See ExpandUnalignedLoad() // in LegalizeDAG.cpp which also uses MergeValues. - SDValue Ops[] = {result, LD->getChain()}; + SDValue Ops[] = { result, LD->getChain() }; return DAG.getMergeValues(Ops, 2, dl); } @@ -887,7 +906,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { if (!ValVT.isSimple()) return SDValue(); switch (ValVT.getSimpleVT().SimpleTy) { - default: return SDValue(); + default: + return SDValue(); case MVT::v2i8: case MVT::v2i16: case MVT::v2i32: @@ -914,7 +934,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { NeedExt = true; switch (NumElts) { - default: return SDValue(); + default: + return SDValue(); case 2: Opcode = NVPTXISD::StoreV2; break; @@ -947,11 +968,9 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { MemSDNode *MemSD = cast<MemSDNode>(N); - SDValue NewSt = DAG.getMemIntrinsicNode(Opcode, DL, - DAG.getVTList(MVT::Other), &Ops[0], - Ops.size(), MemSD->getMemoryVT(), - MemSD->getMemOperand()); - + SDValue NewSt = DAG.getMemIntrinsicNode( + Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(), + MemSD->getMemoryVT(), MemSD->getMemOperand()); //return DCI.CombineTo(N, NewSt, true); return NewSt; @@ -964,8 +983,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { // => // v1 = zxt v to i8 // st i8, addr -SDValue NVPTXTargetLowering:: -LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { +SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); DebugLoc dl = Node->getDebugLoc(); StoreSDNode *ST = cast<StoreSDNode>(Node); @@ -976,18 +994,14 @@ LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); bool isNonTemporal = ST->isNonTemporal(); - Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, - MVT::i8, Tmp3); - SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, - ST->getPointerInfo(), isVolatile, - isNonTemporal, Alignment); + Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Tmp3); + SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), + isVolatile, isNonTemporal, Alignment); return Result; } - -SDValue -NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx, - EVT v) const { +SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, + int idx, EVT v) const { std::string *name = nvTM->getManagedStrPool()->getManagedString(inname); std::stringstream suffix; suffix << idx; @@ -1000,19 +1014,16 @@ NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const { return getExtSymb(DAG, ".PARAM", idx, v); } -SDValue -NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) { +SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) { return getExtSymb(DAG, ".HLPPARAM", idx); } // Check to see if the kernel argument is image*_t or sampler_t bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) { - static const char *const specialTypes[] = { - "struct._image2d_t", - "struct._image3d_t", - "struct._sampler_t" - }; + static const char *const specialTypes[] = { "struct._image2d_t", + "struct._image3d_t", + "struct._sampler_t" }; const Type *Ty = arg->getType(); const PointerType *PTy = dyn_cast<PointerType>(Ty); @@ -1033,12 +1044,10 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) { return false; } -SDValue -NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { +SDValue NVPTXTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); const DataLayout *TD = getDataLayout(); @@ -1054,34 +1063,43 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, std::vector<Type *> argTypes; std::vector<const Argument *> theArgs; for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); - I != E; ++I) { + I != E; ++I) { theArgs.push_back(I); argTypes.push_back(I->getType()); } - assert(argTypes.size() == Ins.size() && - "Ins types and function types did not match"); + //assert(argTypes.size() == Ins.size() && + // "Ins types and function types did not match"); int idx = 0; - for (unsigned i=0, e=Ins.size(); i!=e; ++i, ++idx) { + for (unsigned i = 0, e = argTypes.size(); i != e; ++i, ++idx) { Type *Ty = argTypes[i]; EVT ObjectVT = getValueType(Ty); - assert(ObjectVT == Ins[i].VT && - "Ins type did not match function type"); + //assert(ObjectVT == Ins[i].VT && + // "Ins type did not match function type"); // If the kernel argument is image*_t or sampler_t, convert it to // a i32 constant holding the parameter position. This can later // matched in the AsmPrinter to output the correct mangled name. - if (isImageOrSamplerVal(theArgs[i], - (theArgs[i]->getParent() ? - theArgs[i]->getParent()->getParent() : 0))) { + if (isImageOrSamplerVal( + theArgs[i], + (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent() + : 0))) { assert(isKernel && "Only kernels can have image/sampler params"); - InVals.push_back(DAG.getConstant(i+1, MVT::i32)); + InVals.push_back(DAG.getConstant(i + 1, MVT::i32)); continue; } if (theArgs[i]->use_empty()) { // argument is dead - InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT)); + if (ObjectVT.isVector()) { + EVT EltVT = ObjectVT.getVectorElementType(); + unsigned NumElts = ObjectVT.getVectorNumElements(); + for (unsigned vi = 0; vi < NumElts; ++vi) { + InVals.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT)); + } + } else { + InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT)); + } continue; } @@ -1089,31 +1107,52 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, // to newly created nodes. The SDNOdes for params have to // appear in the same order as their order of appearance // in the original function. "idx+1" holds that order. - if (PAL.hasAttribute(i+1, Attribute::ByVal) == false) { + if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) { + if (ObjectVT.isVector()) { + unsigned NumElts = ObjectVT.getVectorNumElements(); + EVT EltVT = ObjectVT.getVectorElementType(); + unsigned Offset = 0; + for (unsigned vi = 0; vi < NumElts; ++vi) { + SDValue A = getParamSymbol(DAG, idx, getPointerTy()); + SDValue B = DAG.getIntPtrConstant(Offset); + SDValue Addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), + //getParamSymbol(DAG, idx, EltVT), + //DAG.getConstant(Offset, getPointerTy())); + A, B); + Value *SrcValue = Constant::getNullValue(PointerType::get( + EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); + SDValue Ld = DAG.getLoad( + EltVT, dl, Root, Addr, MachinePointerInfo(SrcValue), false, false, + false, + TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext()))); + Offset += EltVT.getStoreSizeInBits() / 8; + InVals.push_back(Ld); + } + continue; + } + // A plain scalar. if (isABI || isKernel) { // If ABI, load from the param symbol SDValue Arg = getParamSymbol(DAG, idx); // Conjure up a value that we can get the address space from. // FIXME: Using a constant here is a hack. - Value *srcValue = Constant::getNullValue(PointerType::get( - ObjectVT.getTypeForEVT(F->getContext()), - llvm::ADDRESS_SPACE_PARAM)); - SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg, - MachinePointerInfo(srcValue), false, false, - false, - TD->getABITypeAlignment(ObjectVT.getTypeForEVT( - F->getContext()))); + Value *srcValue = Constant::getNullValue( + PointerType::get(ObjectVT.getTypeForEVT(F->getContext()), + llvm::ADDRESS_SPACE_PARAM)); + SDValue p = DAG.getLoad( + ObjectVT, dl, Root, Arg, MachinePointerInfo(srcValue), false, false, + false, + TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); if (p.getNode()) - DAG.AssignOrdering(p.getNode(), idx+1); + DAG.AssignOrdering(p.getNode(), idx + 1); InVals.push_back(p); - } - else { + } else { // If no ABI, just move the param symbol SDValue Arg = getParamSymbol(DAG, idx, ObjectVT); SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); if (p.getNode()) - DAG.AssignOrdering(p.getNode(), idx+1); + DAG.AssignOrdering(p.getNode(), idx + 1); InVals.push_back(p); } continue; @@ -1130,47 +1169,49 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); if (p.getNode()) - DAG.AssignOrdering(p.getNode(), idx+1); + DAG.AssignOrdering(p.getNode(), idx + 1); if (isKernel) InVals.push_back(p); else { - SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT, - DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), - p); + SDValue p2 = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT, + DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p); InVals.push_back(p2); } } else { // Have to move a set of param symbols to registers and // store them locally and return the local pointer in InVals const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]); - assert(elemPtrType && - "Byval parameter should be a pointer type"); + assert(elemPtrType && "Byval parameter should be a pointer type"); Type *elemType = elemPtrType->getElementType(); // Compute the constituent parts SmallVector<EVT, 16> vtparts; SmallVector<uint64_t, 16> offsets; ComputeValueVTs(*this, elemType, vtparts, &offsets, 0); unsigned totalsize = 0; - for (unsigned j=0, je=vtparts.size(); j!=je; ++j) + for (unsigned j = 0, je = vtparts.size(); j != je; ++j) totalsize += vtparts[j].getStoreSizeInBits(); - SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()-> - CreateStackObject(totalsize/8, 16, false), - getPointerTy()); + SDValue localcopy = DAG.getFrameIndex( + MF.getFrameInfo()->CreateStackObject(totalsize / 8, 16, false), + getPointerTy()); unsigned sizesofar = 0; std::vector<SDValue> theChains; - for (unsigned j=0, je=vtparts.size(); j!=je; ++j) { + for (unsigned j = 0, je = vtparts.size(); j != je; ++j) { unsigned numElems = 1; - if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements(); - for (unsigned k=0, ke=numElems; k!=ke; ++k) { + if (vtparts[j].isVector()) + numElems = vtparts[j].getVectorNumElements(); + for (unsigned k = 0, ke = numElems; k != ke; ++k) { EVT tmpvt = vtparts[j]; - if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType(); + if (tmpvt.isVector()) + tmpvt = tmpvt.getVectorElementType(); SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt, getParamSymbol(DAG, idx, tmpvt)); - SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy, - DAG.getConstant(sizesofar, getPointerTy())); - theChains.push_back(DAG.getStore(Chain, dl, arg, addr, - MachinePointerInfo(), false, false, 0)); - sizesofar += tmpvt.getStoreSizeInBits()/8; + SDValue addr = + DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy, + DAG.getConstant(sizesofar, getPointerTy())); + theChains.push_back(DAG.getStore( + Chain, dl, arg, addr, MachinePointerInfo(), false, false, 0)); + sizesofar += tmpvt.getStoreSizeInBits() / 8; ++idx; } } @@ -1190,43 +1231,42 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, //} if (!OutChains.empty()) - DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &OutChains[0], OutChains.size())); + DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0], + OutChains.size())); return Chain; } -SDValue -NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, - bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { +SDValue NVPTXTargetLowering::LowerReturn( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, + SelectionDAG &DAG) const { bool isABI = (nvptxSubtarget.getSmVersion() >= 20); unsigned sizesofar = 0; unsigned idx = 0; - for (unsigned i=0, e=Outs.size(); i!=e; ++i) { + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { SDValue theVal = OutVals[i]; EVT theValType = theVal.getValueType(); unsigned numElems = 1; - if (theValType.isVector()) numElems = theValType.getVectorNumElements(); - for (unsigned j=0,je=numElems; j!=je; ++j) { + if (theValType.isVector()) + numElems = theValType.getVectorNumElements(); + for (unsigned j = 0, je = numElems; j != je; ++j) { SDValue tmpval = theVal; if (theValType.isVector()) tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, - theValType.getVectorElementType(), - tmpval, DAG.getIntPtrConstant(j)); - Chain = DAG.getNode(isABI ? NVPTXISD::StoreRetval :NVPTXISD::MoveToRetval, - dl, MVT::Other, - Chain, - DAG.getConstant(isABI ? sizesofar : idx, MVT::i32), + theValType.getVectorElementType(), tmpval, + DAG.getIntPtrConstant(j)); + Chain = DAG.getNode( + isABI ? NVPTXISD::StoreRetval : NVPTXISD::MoveToRetval, dl, + MVT::Other, Chain, DAG.getConstant(isABI ? sizesofar : idx, MVT::i32), tmpval); if (theValType.isVector()) - sizesofar += theValType.getVectorElementType().getStoreSizeInBits()/8; + sizesofar += theValType.getVectorElementType().getStoreSizeInBits() / 8; else - sizesofar += theValType.getStoreSizeInBits()/8; + sizesofar += theValType.getStoreSizeInBits() / 8; ++idx; } } @@ -1234,12 +1274,9 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain); } -void -NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op, - std::string &Constraint, - std::vector<SDValue> &Ops, - SelectionDAG &DAG) const -{ +void NVPTXTargetLowering::LowerAsmOperandForConstraint( + SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, + SelectionDAG &DAG) const { if (Constraint.length() > 1) return; else @@ -1249,8 +1286,7 @@ NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op, // NVPTX suuport vector of legal types of any length in Intrinsics because the // NVPTX specific type legalizer // will legalize them to the PTX supported length. -bool -NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const { +bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const { if (isTypeLegal(VT)) return true; if (VT.isVector()) { @@ -1261,15 +1297,13 @@ NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const { return false; } - // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as // TgtMemIntrinsic // because we need the information that is only available in the "Value" type // of destination // pointer. In particular, the address space information. -bool -NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I, - unsigned Intrinsic) const { +bool NVPTXTargetLowering::getTgtMemIntrinsic( + IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const { switch (Intrinsic) { default: return false; @@ -1325,9 +1359,8 @@ NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I, /// Used to guide target specific optimizations, like loop strength reduction /// (LoopStrengthReduce.cpp) and memory optimization for address mode /// (CodeGenPrepare.cpp) -bool -NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM, - Type *Ty) const { +bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM, + Type *Ty) const { // AddrMode - This represents an addressing mode of: // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg @@ -1345,10 +1378,10 @@ NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM, } switch (AM.Scale) { - case 0: // "r", "r+i" or "i" is allowed + case 0: // "r", "r+i" or "i" is allowed break; case 1: - if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed. + if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed. return false; // Otherwise we have r+i. break; @@ -1385,8 +1418,7 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const { return TargetLowering::getConstraintType(Constraint); } - -std::pair<unsigned, const TargetRegisterClass*> +std::pair<unsigned, const TargetRegisterClass *> NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { if (Constraint.size() == 1) { @@ -1409,8 +1441,6 @@ NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } - - /// getFunctionAlignment - Return the Log2 alignment of this function. unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const { return 4; @@ -1418,7 +1448,7 @@ unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const { /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads. static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, - SmallVectorImpl<SDValue>& Results) { + SmallVectorImpl<SDValue> &Results) { EVT ResVT = N->getValueType(0); DebugLoc DL = N->getDebugLoc(); @@ -1429,7 +1459,8 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, // but I'm leaving that as a TODO for now. assert(ResVT.isSimple() && "Can only handle simple types"); switch (ResVT.getSimpleVT().SimpleTy) { - default: return; + default: + return; case MVT::v2i8: case MVT::v2i16: case MVT::v2i32: @@ -1460,7 +1491,8 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SDVTList LdResVTs; switch (NumElts) { - default: return; + default: + return; case 2: Opcode = NVPTXISD::LoadV2; LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other); @@ -1500,14 +1532,14 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SDValue LoadChain = NewLD.getValue(NumElts); - SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts); + SDValue BuildVec = + DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts); Results.push_back(BuildVec); Results.push_back(LoadChain); } -static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, - SelectionDAG &DAG, +static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl<SDValue> &Results) { SDValue Chain = N->getOperand(0); SDValue Intrin = N->getOperand(1); @@ -1515,8 +1547,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, // Get the intrinsic ID unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue(); - switch(IntrinNo) { - default: return; + switch (IntrinNo) { + default: + return; case Intrinsic::nvvm_ldg_global_i: case Intrinsic::nvvm_ldg_global_f: case Intrinsic::nvvm_ldg_global_p: @@ -1544,10 +1577,12 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SDVTList LdResVTs; switch (NumElts) { - default: return; + default: + return; case 2: - switch(IntrinNo) { - default: return; + switch (IntrinNo) { + default: + return; case Intrinsic::nvvm_ldg_global_i: case Intrinsic::nvvm_ldg_global_f: case Intrinsic::nvvm_ldg_global_p: @@ -1562,8 +1597,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other); break; case 4: { - switch(IntrinNo) { - default: return; + switch (IntrinNo) { + default: + return; case Intrinsic::nvvm_ldg_global_i: case Intrinsic::nvvm_ldg_global_f: case Intrinsic::nvvm_ldg_global_p: @@ -1586,29 +1622,31 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, // Copy regular operands OtherOps.push_back(Chain); // Chain - // Skip operand 1 (intrinsic ID) - // Others + // Skip operand 1 (intrinsic ID) + // Others for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) OtherOps.push_back(N->getOperand(i)); MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N); - SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0], - OtherOps.size(), MemSD->getMemoryVT(), - MemSD->getMemOperand()); + SDValue NewLD = DAG.getMemIntrinsicNode( + Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(), + MemSD->getMemoryVT(), MemSD->getMemOperand()); SmallVector<SDValue, 4> ScalarRes; for (unsigned i = 0; i < NumElts; ++i) { SDValue Res = NewLD.getValue(i); if (NeedTrunc) - Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res); + Res = + DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res); ScalarRes.push_back(Res); } SDValue LoadChain = NewLD.getValue(NumElts); - SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts); + SDValue BuildVec = + DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts); Results.push_back(BuildVec); Results.push_back(LoadChain); @@ -1629,10 +1667,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, // We make sure the memory type is i8, which will be used during isel // to select the proper instruction. - SDValue NewLD = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, - LdResVTs, &Ops[0], - Ops.size(), MVT::i8, - MemSD->getMemOperand()); + SDValue NewLD = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0], + Ops.size(), MVT::i8, MemSD->getMemOperand()); Results.push_back(NewLD.getValue(0)); Results.push_back(NewLD.getValue(1)); @@ -1641,11 +1678,11 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, } } -void NVPTXTargetLowering::ReplaceNodeResults(SDNode *N, - SmallVectorImpl<SDValue> &Results, - SelectionDAG &DAG) const { +void NVPTXTargetLowering::ReplaceNodeResults( + SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { switch (N->getOpcode()) { - default: report_fatal_error("Unhandled custom legalization"); + default: + report_fatal_error("Unhandled custom legalization"); case ISD::LOAD: ReplaceLoadVector(N, DAG, Results); return; diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h index 14afc14..3cd49d3 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/lib/Target/NVPTX/NVPTXISelLowering.h @@ -87,7 +87,7 @@ public: bool isTypeSupportedInIntrinsic(MVT VT) const; - bool getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I, + bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const; /// isLegalAddressingMode - Return true if the addressing mode represented @@ -107,14 +107,13 @@ public: } ConstraintType getConstraintType(const std::string &Constraint) const; - std::pair<unsigned, const TargetRegisterClass*> + std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; - virtual SDValue - LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, - SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const; + virtual SDValue LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const; @@ -136,17 +135,15 @@ public: NVPTXTargetMachine *nvTM; // PTX always uses 32-bit shift amounts - virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { - return MVT::i32; - } + virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; } virtual bool shouldSplitVectorElementType(EVT VT) const; private: - const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here + const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here - SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx, EVT = - MVT::i32) const; + SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx, + EVT = MVT::i32) const; SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT = MVT::i32) const; SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx); @@ -159,8 +156,7 @@ private: SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const; - virtual void ReplaceNodeResults(SDNode *N, - SmallVectorImpl<SDValue> &Results, + virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const; }; } // namespace llvm diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp index 9e73d80..33a63c2 100644 --- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp +++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp @@ -23,61 +23,55 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include <cstdio> - using namespace llvm; // FIXME: Add the subtarget support on this constructor. NVPTXInstrInfo::NVPTXInstrInfo(NVPTXTargetMachine &tm) -: NVPTXGenInstrInfo(), - TM(tm), - RegInfo(*this, *TM.getSubtargetImpl()) {} - + : NVPTXGenInstrInfo(), TM(tm), RegInfo(*this, *TM.getSubtargetImpl()) {} -void NVPTXInstrInfo::copyPhysReg (MachineBasicBlock &MBB, - MachineBasicBlock::iterator I, DebugLoc DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const { +void NVPTXInstrInfo::copyPhysReg( + MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, bool KillSrc) const { if (NVPTX::Int32RegsRegClass.contains(DestReg) && NVPTX::Int32RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::IMOV32rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Int8RegsRegClass.contains(DestReg) && - NVPTX::Int8RegsRegClass.contains(SrcReg)) + NVPTX::Int8RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::IMOV8rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Int1RegsRegClass.contains(DestReg) && - NVPTX::Int1RegsRegClass.contains(SrcReg)) + NVPTX::Int1RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::IMOV1rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Float32RegsRegClass.contains(DestReg) && - NVPTX::Float32RegsRegClass.contains(SrcReg)) + NVPTX::Float32RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::FMOV32rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Int16RegsRegClass.contains(DestReg) && - NVPTX::Int16RegsRegClass.contains(SrcReg)) + NVPTX::Int16RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::IMOV16rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Int64RegsRegClass.contains(DestReg) && - NVPTX::Int64RegsRegClass.contains(SrcReg)) + NVPTX::Int64RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::IMOV64rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else if (NVPTX::Float64RegsRegClass.contains(DestReg) && - NVPTX::Float64RegsRegClass.contains(SrcReg)) + NVPTX::Float64RegsRegClass.contains(SrcReg)) BuildMI(MBB, I, DL, get(NVPTX::FMOV64rr), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + .addReg(SrcReg, getKillRegState(KillSrc)); else { llvm_unreachable("Don't know how to copy a register"); } } -bool NVPTXInstrInfo::isMoveInstr(const MachineInstr &MI, - unsigned &SrcReg, +bool NVPTXInstrInfo::isMoveInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DestReg) const { // Look for the appropriate part of TSFlags bool isMove = false; - unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::SimpleMoveMask) >> - NVPTX::SimpleMoveShift; + unsigned TSFlags = + (MI.getDesc().TSFlags & NVPTX::SimpleMoveMask) >> NVPTX::SimpleMoveShift; isMove = (TSFlags == 1); if (isMove) { @@ -94,10 +88,10 @@ bool NVPTXInstrInfo::isMoveInstr(const MachineInstr &MI, return false; } -bool NVPTXInstrInfo::isReadSpecialReg(MachineInstr &MI) const -{ +bool NVPTXInstrInfo::isReadSpecialReg(MachineInstr &MI) const { switch (MI.getOpcode()) { - default: return false; + default: + return false; case NVPTX::INT_PTX_SREG_NTID_X: case NVPTX::INT_PTX_SREG_NTID_Y: case NVPTX::INT_PTX_SREG_NTID_Z: @@ -115,12 +109,11 @@ bool NVPTXInstrInfo::isReadSpecialReg(MachineInstr &MI) const } } - bool NVPTXInstrInfo::isLoadInstr(const MachineInstr &MI, unsigned &AddrSpace) const { bool isLoad = false; - unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::isLoadMask) >> - NVPTX::isLoadShift; + unsigned TSFlags = + (MI.getDesc().TSFlags & NVPTX::isLoadMask) >> NVPTX::isLoadShift; isLoad = (TSFlags == 1); if (isLoad) AddrSpace = getLdStCodeAddrSpace(MI); @@ -130,15 +123,14 @@ bool NVPTXInstrInfo::isLoadInstr(const MachineInstr &MI, bool NVPTXInstrInfo::isStoreInstr(const MachineInstr &MI, unsigned &AddrSpace) const { bool isStore = false; - unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::isStoreMask) >> - NVPTX::isStoreShift; + unsigned TSFlags = + (MI.getDesc().TSFlags & NVPTX::isStoreMask) >> NVPTX::isStoreShift; isStore = (TSFlags == 1); if (isStore) AddrSpace = getLdStCodeAddrSpace(MI); return isStore; } - bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const { unsigned addrspace = 0; if (MI->getOpcode() == NVPTX::INT_CUDA_SYNCTHREADS) @@ -152,7 +144,6 @@ bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const { return true; } - /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning /// true if it cannot be understood (e.g. it's a switch dispatch or isn't /// implemented for a target). Upon success, this returns false and returns @@ -176,11 +167,9 @@ bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const { /// Note that RemoveBranch and InsertBranch must be implemented to support /// cases where this method returns success. /// -bool NVPTXInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, - MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl<MachineOperand> &Cond, - bool AllowModify) const { +bool NVPTXInstrInfo::AnalyzeBranch( + MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const { // If the block has no terminators, it just falls into the block after it. MachineBasicBlock::iterator I = MBB.end(); if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) @@ -208,14 +197,13 @@ bool NVPTXInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineInstr *SecondLastInst = I; // If there are three terminators, we don't know what sort of block this is. - if (SecondLastInst && I != MBB.begin() && - isUnpredicatedTerminator(--I)) + if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) return true; // If the block ends with NVPTX::GOTO and NVPTX:CBranch, handle it. if (SecondLastInst->getOpcode() == NVPTX::CBranch && LastInst->getOpcode() == NVPTX::GOTO) { - TBB = SecondLastInst->getOperand(1).getMBB(); + TBB = SecondLastInst->getOperand(1).getMBB(); Cond.push_back(SecondLastInst->getOperand(0)); FBB = LastInst->getOperand(0).getMBB(); return false; @@ -238,7 +226,8 @@ bool NVPTXInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) return 0; + if (I == MBB.begin()) + return 0; --I; if (I->getOpcode() != NVPTX::GOTO && I->getOpcode() != NVPTX::CBranch) return 0; @@ -248,7 +237,8 @@ unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { I = MBB.end(); - if (I == MBB.begin()) return 1; + if (I == MBB.begin()) + return 1; --I; if (I->getOpcode() != NVPTX::CBranch) return 1; @@ -258,11 +248,9 @@ unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { return 2; } -unsigned -NVPTXInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const SmallVectorImpl<MachineOperand> &Cond, - DebugLoc DL) const { +unsigned NVPTXInstrInfo::InsertBranch( + MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const { // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); assert((Cond.size() == 1 || Cond.size() == 0) && @@ -270,17 +258,16 @@ NVPTXInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, // One-way branch. if (FBB == 0) { - if (Cond.empty()) // Unconditional branch + if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(TBB); - else // Conditional branch - BuildMI(&MBB, DL, get(NVPTX::CBranch)) - .addReg(Cond[0].getReg()).addMBB(TBB); + else // Conditional branch + BuildMI(&MBB, DL, get(NVPTX::CBranch)).addReg(Cond[0].getReg()) + .addMBB(TBB); return 1; } // Two-way Conditional Branch. - BuildMI(&MBB, DL, get(NVPTX::CBranch)) - .addReg(Cond[0].getReg()).addMBB(TBB); + BuildMI(&MBB, DL, get(NVPTX::CBranch)).addReg(Cond[0].getReg()).addMBB(TBB); BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(FBB); return 2; } diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.h b/lib/Target/NVPTX/NVPTXInstrInfo.h index 7b8e218..b1972e9 100644 --- a/lib/Target/NVPTX/NVPTXInstrInfo.h +++ b/lib/Target/NVPTX/NVPTXInstrInfo.h @@ -23,8 +23,7 @@ namespace llvm { -class NVPTXInstrInfo : public NVPTXGenInstrInfo -{ +class NVPTXInstrInfo : public NVPTXGenInstrInfo { NVPTXTargetMachine &TM; const NVPTXRegisterInfo RegInfo; public: @@ -50,30 +49,26 @@ public: * const TargetRegisterClass *RC) const; */ - virtual void copyPhysReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator I, DebugLoc DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const ; - virtual bool isMoveInstr(const MachineInstr &MI, - unsigned &SrcReg, + virtual void copyPhysReg( + MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, bool KillSrc) const; + virtual bool isMoveInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DestReg) const; bool isLoadInstr(const MachineInstr &MI, unsigned &AddrSpace) const; bool isStoreInstr(const MachineInstr &MI, unsigned &AddrSpace) const; bool isReadSpecialReg(MachineInstr &MI) const; - virtual bool CanTailMerge(const MachineInstr *MI) const ; + virtual bool CanTailMerge(const MachineInstr *MI) const; // Branch analysis. - virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl<MachineOperand> &Cond, - bool AllowModify) const; + virtual bool AnalyzeBranch( + MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const; virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; - virtual unsigned InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const SmallVectorImpl<MachineOperand> &Cond, - DebugLoc DL) const; + virtual unsigned InsertBranch( + MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const; unsigned getLdStCodeAddrSpace(const MachineInstr &MI) const { - return MI.getOperand(2).getImm(); + return MI.getOperand(2).getImm(); } }; diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index f7fa7aa..7c257b4 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -25,18 +25,15 @@ using namespace llvm; -namespace llvm { -FunctionPass *createLowerAggrCopies(); -} +namespace llvm { FunctionPass *createLowerAggrCopies(); } char NVPTXLowerAggrCopies::ID = 0; // Lower MemTransferInst or load-store pair to loop -static void convertTransferToLoop(Instruction *splitAt, Value *srcAddr, - Value *dstAddr, Value *len, - //unsigned numLoads, - bool srcVolatile, bool dstVolatile, - LLVMContext &Context, Function &F) { +static void convertTransferToLoop( + Instruction *splitAt, Value *srcAddr, Value *dstAddr, Value *len, + //unsigned numLoads, + bool srcVolatile, bool dstVolatile, LLVMContext &Context, Function &F) { Type *indType = len->getType(); BasicBlock *origBB = splitAt->getParent(); @@ -48,10 +45,8 @@ static void convertTransferToLoop(Instruction *splitAt, Value *srcAddr, // srcAddr and dstAddr are expected to be pointer types, // so no check is made here. - unsigned srcAS = - dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace(); - unsigned dstAS = - dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); + unsigned srcAS = dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace(); + unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); // Cast pointers to (char *) srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS)); @@ -86,12 +81,11 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr, origBB->getTerminator()->setSuccessor(0, loopBB); IRBuilder<> builder(origBB, origBB->getTerminator()); - unsigned dstAS = - dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); + unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); // Cast pointer to the type of value getting stored - dstAddr = builder.CreateBitCast(dstAddr, - PointerType::get(val->getType(), dstAS)); + dstAddr = + builder.CreateBitCast(dstAddr, PointerType::get(val->getType(), dstAS)); IRBuilder<> loop(loopBB); PHINode *ind = loop.CreatePHI(len->getType(), 0); @@ -120,24 +114,26 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { //BasicBlock *bb = BI; for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; - ++II) { - if (LoadInst * load = dyn_cast<LoadInst>(II)) { + ++II) { + if (LoadInst *load = dyn_cast<LoadInst>(II)) { - if (load->hasOneUse() == false) continue; + if (load->hasOneUse() == false) + continue; - if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) continue; + if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) + continue; User *use = *(load->use_begin()); - if (StoreInst * store = dyn_cast<StoreInst>(use)) { + if (StoreInst *store = dyn_cast<StoreInst>(use)) { if (store->getOperand(0) != load) //getValueOperand - continue; + continue; aggrLoads.push_back(load); } - } else if (MemTransferInst * intr = dyn_cast<MemTransferInst>(II)) { + } else if (MemTransferInst *intr = dyn_cast<MemTransferInst>(II)) { Value *len = intr->getLength(); // If the number of elements being copied is greater // than MaxAggrCopySize, lower it to a loop - if (ConstantInt * len_int = dyn_cast < ConstantInt > (len)) { + if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) { if (len_int->getZExtValue() >= MaxAggrCopySize) { aggrMemcpys.push_back(intr); } @@ -145,9 +141,9 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { // turn variable length memcpy/memmov into loop aggrMemcpys.push_back(intr); } - } else if (MemSetInst * memsetintr = dyn_cast<MemSetInst>(II)) { + } else if (MemSetInst *memsetintr = dyn_cast<MemSetInst>(II)) { Value *len = memsetintr->getLength(); - if (ConstantInt * len_int = dyn_cast<ConstantInt>(len)) { + if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) { if (len_int->getZExtValue() >= MaxAggrCopySize) { aggrMemsets.push_back(memsetintr); } @@ -158,8 +154,9 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { } } } - if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0) - && (aggrMemsets.size() == 0)) return false; + if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0) && + (aggrMemsets.size() == 0)) + return false; // // Do the transformation of an aggr load/copy/set to a loop diff --git a/lib/Target/NVPTX/NVPTXNumRegisters.h b/lib/Target/NVPTX/NVPTXNumRegisters.h index b4a4dbc..a95c16b 100644 --- a/lib/Target/NVPTX/NVPTXNumRegisters.h +++ b/lib/Target/NVPTX/NVPTXNumRegisters.h @@ -11,10 +11,6 @@ #ifndef NVPTX_NUM_REGISTERS_H #define NVPTX_NUM_REGISTERS_H -namespace llvm { - -const unsigned NVPTXNumRegisters = 396; - -} +namespace llvm { const unsigned NVPTXNumRegisters = 396; } #endif diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp index 350a2c5..2824653 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp @@ -23,69 +23,54 @@ #include "llvm/MC/MachineLocation.h" #include "llvm/Target/TargetInstrInfo.h" - using namespace llvm; -namespace llvm -{ -std::string getNVPTXRegClassName (TargetRegisterClass const *RC) { +namespace llvm { +std::string getNVPTXRegClassName(TargetRegisterClass const *RC) { if (RC == &NVPTX::Float32RegsRegClass) { return ".f32"; } if (RC == &NVPTX::Float64RegsRegClass) { return ".f64"; - } - else if (RC == &NVPTX::Int64RegsRegClass) { + } else if (RC == &NVPTX::Int64RegsRegClass) { return ".s64"; - } - else if (RC == &NVPTX::Int32RegsRegClass) { + } else if (RC == &NVPTX::Int32RegsRegClass) { return ".s32"; - } - else if (RC == &NVPTX::Int16RegsRegClass) { + } else if (RC == &NVPTX::Int16RegsRegClass) { return ".s16"; } - // Int8Regs become 16-bit registers in PTX - else if (RC == &NVPTX::Int8RegsRegClass) { + // Int8Regs become 16-bit registers in PTX + else if (RC == &NVPTX::Int8RegsRegClass) { return ".s16"; - } - else if (RC == &NVPTX::Int1RegsRegClass) { + } else if (RC == &NVPTX::Int1RegsRegClass) { return ".pred"; - } - else if (RC == &NVPTX::SpecialRegsRegClass) { + } else if (RC == &NVPTX::SpecialRegsRegClass) { return "!Special!"; - } - else { + } else { return "INTERNAL"; } return ""; } -std::string getNVPTXRegClassStr (TargetRegisterClass const *RC) { +std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) { if (RC == &NVPTX::Float32RegsRegClass) { return "%f"; } if (RC == &NVPTX::Float64RegsRegClass) { return "%fd"; - } - else if (RC == &NVPTX::Int64RegsRegClass) { + } else if (RC == &NVPTX::Int64RegsRegClass) { return "%rd"; - } - else if (RC == &NVPTX::Int32RegsRegClass) { + } else if (RC == &NVPTX::Int32RegsRegClass) { return "%r"; - } - else if (RC == &NVPTX::Int16RegsRegClass) { + } else if (RC == &NVPTX::Int16RegsRegClass) { return "%rs"; - } - else if (RC == &NVPTX::Int8RegsRegClass) { + } else if (RC == &NVPTX::Int8RegsRegClass) { return "%rc"; - } - else if (RC == &NVPTX::Int1RegsRegClass) { + } else if (RC == &NVPTX::Int1RegsRegClass) { return "%p"; - } - else if (RC == &NVPTX::SpecialRegsRegClass) { + } else if (RC == &NVPTX::SpecialRegsRegClass) { return "!Special!"; - } - else { + } else { return "INTERNAL"; } return ""; @@ -94,23 +79,22 @@ std::string getNVPTXRegClassStr (TargetRegisterClass const *RC) { NVPTXRegisterInfo::NVPTXRegisterInfo(const TargetInstrInfo &tii, const NVPTXSubtarget &st) - : NVPTXGenRegisterInfo(0), - Is64Bit(st.is64Bit()) {} + : NVPTXGenRegisterInfo(0), Is64Bit(st.is64Bit()) {} #define GET_REGINFO_TARGET_DESC #include "NVPTXGenRegisterInfo.inc" /// NVPTX Callee Saved Registers -const uint16_t* NVPTXRegisterInfo:: -getCalleeSavedRegs(const MachineFunction *MF) const { +const uint16_t * +NVPTXRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { static const uint16_t CalleeSavedRegs[] = { 0 }; return CalleeSavedRegs; } // NVPTX Callee Saved Reg Classes -const TargetRegisterClass* const* +const TargetRegisterClass *const * NVPTXRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { - static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 }; + static const TargetRegisterClass *const CalleeSavedRegClasses[] = { 0 }; return CalleeSavedRegClasses; } @@ -119,10 +103,9 @@ BitVector NVPTXRegisterInfo::getReservedRegs(const MachineFunction &MF) const { return Reserved; } -void NVPTXRegisterInfo:: -eliminateFrameIndex(MachineBasicBlock::iterator II, - int SPAdj, unsigned FIOperandNum, - RegScavenger *RS) const { +void NVPTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { assert(SPAdj == 0 && "Unexpected"); MachineInstr &MI = *II; @@ -130,15 +113,14 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, MachineFunction &MF = *MI.getParent()->getParent(); int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + - MI.getOperand(FIOperandNum+1).getImm(); + MI.getOperand(FIOperandNum + 1).getImm(); // Using I0 as the frame pointer MI.getOperand(FIOperandNum).ChangeToRegister(NVPTX::VRFrame, false); - MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset); + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); } -int NVPTXRegisterInfo:: -getDwarfRegNum(unsigned RegNum, bool isEH) const { +int NVPTXRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const { return 0; } @@ -146,7 +128,4 @@ unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return NVPTX::VRFrame; } -unsigned NVPTXRegisterInfo::getRARegister() const { - return 0; -} - +unsigned NVPTXRegisterInfo::getRARegister() const { return 0; } diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.h b/lib/Target/NVPTX/NVPTXRegisterInfo.h index 69f73f2..d406820 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.h +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.h @@ -17,7 +17,6 @@ #include "ManagedStringPool.h" #include "llvm/Target/TargetRegisterInfo.h" - #define GET_REGINFO_HEADER #include "NVPTXGenRegisterInfo.inc" #include "llvm/Target/TargetRegisterInfo.h" @@ -33,30 +32,28 @@ class NVPTXRegisterInfo : public NVPTXGenRegisterInfo { private: bool Is64Bit; // Hold Strings that can be free'd all together with NVPTXRegisterInfo - ManagedStringPool ManagedStrPool; + ManagedStringPool ManagedStrPool; public: - NVPTXRegisterInfo(const TargetInstrInfo &tii, - const NVPTXSubtarget &st); - + NVPTXRegisterInfo(const TargetInstrInfo &tii, const NVPTXSubtarget &st); //------------------------------------------------------ // Pure virtual functions from TargetRegisterInfo //------------------------------------------------------ // NVPTX callee saved registers - virtual const uint16_t* + virtual const uint16_t * getCalleeSavedRegs(const MachineFunction *MF = 0) const; // NVPTX callee saved register classes - virtual const TargetRegisterClass* const * + virtual const TargetRegisterClass *const * getCalleeSavedRegClasses(const MachineFunction *MF) const; virtual BitVector getReservedRegs(const MachineFunction &MF) const; - virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, - int SPAdj, unsigned FIOperandNum, - RegScavenger *RS=NULL) const; + virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, + unsigned FIOperandNum, + RegScavenger *RS = NULL) const; virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const; virtual unsigned getFrameRegister(const MachineFunction &MF) const; @@ -74,11 +71,9 @@ public: }; - -std::string getNVPTXRegClassName (const TargetRegisterClass *RC); -std::string getNVPTXRegClassStr (const TargetRegisterClass *RC); +std::string getNVPTXRegClassName(const TargetRegisterClass *RC); +std::string getNVPTXRegClassStr(const TargetRegisterClass *RC); } // end namespace llvm - #endif diff --git a/lib/Target/NVPTX/NVPTXSection.h b/lib/Target/NVPTX/NVPTXSection.h index e166be5..e57ace9 100644 --- a/lib/Target/NVPTX/NVPTXSection.h +++ b/lib/Target/NVPTX/NVPTXSection.h @@ -32,7 +32,8 @@ public: /// Override this as NVPTX has its own way of printing switching /// to a section. virtual void PrintSwitchToSection(const MCAsmInfo &MAI, - raw_ostream &OS) const {} + raw_ostream &OS, + const MCExpr *Subsection) const {} /// Base address of PTX sections is zero. virtual bool isBaseAddressKnownZero() const { return true; } diff --git a/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp b/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp index babe295..83dfe12 100644 --- a/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp +++ b/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp @@ -21,9 +21,7 @@ using namespace llvm; -namespace llvm { -FunctionPass *createSplitBBatBarPass(); -} +namespace llvm { FunctionPass *createSplitBBatBarPass(); } char NVPTXSplitBBatBar::ID = 0; @@ -72,6 +70,4 @@ bool NVPTXSplitBBatBar::runOnFunction(Function &F) { // This interface will most likely not be necessary, because this pass will // not be invoked by the driver, but will be used as a prerequisite to // another pass. -FunctionPass *llvm::createSplitBBatBarPass() { - return new NVPTXSplitBBatBar(); -} +FunctionPass *llvm::createSplitBBatBarPass() { return new NVPTXSplitBBatBar(); } diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp index 7b62cce..2dcd73d 100644 --- a/lib/Target/NVPTX/NVPTXSubtarget.cpp +++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp @@ -22,27 +22,23 @@ using namespace llvm; // Select Driver Interface #include "llvm/Support/CommandLine.h" namespace { -cl::opt<NVPTX::DrvInterface> -DriverInterface(cl::desc("Choose driver interface:"), - cl::values( - clEnumValN(NVPTX::NVCL, "drvnvcl", "Nvidia OpenCL driver"), - clEnumValN(NVPTX::CUDA, "drvcuda", "Nvidia CUDA driver"), - clEnumValN(NVPTX::TEST, "drvtest", "Plain Test"), - clEnumValEnd), - cl::init(NVPTX::NVCL)); +cl::opt<NVPTX::DrvInterface> DriverInterface( + cl::desc("Choose driver interface:"), + cl::values(clEnumValN(NVPTX::NVCL, "drvnvcl", "Nvidia OpenCL driver"), + clEnumValN(NVPTX::CUDA, "drvcuda", "Nvidia CUDA driver"), + clEnumValN(NVPTX::TEST, "drvtest", "Plain Test"), clEnumValEnd), + cl::init(NVPTX::NVCL)); } NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool is64Bit) -: NVPTXGenSubtargetInfo(TT, CPU, FS), - Is64Bit(is64Bit), - PTXVersion(0), - SmVersion(10) { + : NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0), + SmVersion(20) { drvInterface = DriverInterface; // Provide the default CPU if none - std::string defCPU = "sm_10"; + std::string defCPU = "sm_20"; ParseSubtargetFeatures((CPU.empty() ? defCPU : CPU), FS); diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h index beea77e..670077d 100644 --- a/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/lib/Target/NVPTX/NVPTXSubtarget.h @@ -25,7 +25,7 @@ namespace llvm { class NVPTXSubtarget : public NVPTXGenSubtargetInfo { - + std::string TargetName; NVPTX::DrvInterface drvInterface; bool Is64Bit; @@ -61,13 +61,10 @@ public: bool hasLDU() const { return SmVersion >= 20; } bool hasGenericLdSt() const { return SmVersion >= 20; } inline bool hasHWROT32() const { return false; } - inline bool hasSWROT32() const { - return true; - } - inline bool hasROT32() const { return hasHWROT32() || hasSWROT32() ; } + inline bool hasSWROT32() const { return true; } + inline bool hasROT32() const { return hasHWROT32() || hasSWROT32(); } inline bool hasROT64() const { return SmVersion >= 20; } - bool is64Bit() const { return Is64Bit; } unsigned int getSmVersion() const { return SmVersion; } @@ -96,4 +93,4 @@ public: } // End llvm namespace -#endif // NVPTXSUBTARGET_H +#endif // NVPTXSUBTARGET_H diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp index cd765fa..67ca6b5 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -45,9 +45,11 @@ #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/Scalar.h" - using namespace llvm; +namespace llvm { +void initializeNVVMReflectPass(PassRegistry&); +} extern "C" void LLVMInitializeNVPTXTarget() { // Register the target. @@ -57,52 +59,42 @@ extern "C" void LLVMInitializeNVPTXTarget() { RegisterMCAsmInfo<NVPTXMCAsmInfo> A(TheNVPTXTarget32); RegisterMCAsmInfo<NVPTXMCAsmInfo> B(TheNVPTXTarget64); + // FIXME: This pass is really intended to be invoked during IR optimization, + // but it's very NVPTX-specific. + initializeNVVMReflectPass(*PassRegistry::getPassRegistry()); } -NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, - StringRef TT, - StringRef CPU, - StringRef FS, - const TargetOptions& Options, - Reloc::Model RM, - CodeModel::Model CM, - CodeGenOpt::Level OL, - bool is64bit) -: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS, is64bit), - DL(Subtarget.getDataLayout()), - InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit) -/*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ { -} - - +NVPTXTargetMachine::NVPTXTargetMachine( + const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL, bool is64bit) + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, CPU, FS, is64bit), DL(Subtarget.getDataLayout()), + InstrInfo(*this), TLInfo(*this), TSInfo(*this), + FrameLowering( + *this, is64bit) /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {} void NVPTXTargetMachine32::anchor() {} -NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, - const TargetOptions &Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) -: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) { -} +NVPTXTargetMachine32::NVPTXTargetMachine32( + const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} void NVPTXTargetMachine64::anchor() {} -NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, StringRef TT, - StringRef CPU, StringRef FS, - const TargetOptions &Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL) -: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) { -} - +NVPTXTargetMachine64::NVPTXTargetMachine64( + const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} namespace llvm { class NVPTXPassConfig : public TargetPassConfig { public: NVPTXPassConfig(NVPTXTargetMachine *TM, PassManagerBase &PM) - : TargetPassConfig(TM, PM) {} + : TargetPassConfig(TM, PM) {} NVPTXTargetMachine &getNVPTXTargetMachine() const { return getTM<NVPTXTargetMachine>(); @@ -126,6 +118,4 @@ bool NVPTXPassConfig::addInstSelector() { return false; } -bool NVPTXPassConfig::addPreRegAlloc() { - return false; -} +bool NVPTXPassConfig::addPreRegAlloc() { return false; } diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h index 1a732be..5fbcf73 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.h +++ b/lib/Target/NVPTX/NVPTXTargetMachine.h @@ -11,7 +11,6 @@ // //===----------------------------------------------------------------------===// - #ifndef NVPTX_TARGETMACHINE_H #define NVPTX_TARGETMACHINE_H @@ -31,42 +30,40 @@ namespace llvm { /// NVPTXTargetMachine /// class NVPTXTargetMachine : public LLVMTargetMachine { - NVPTXSubtarget Subtarget; - const DataLayout DL; // Calculates type size & alignment - NVPTXInstrInfo InstrInfo; - NVPTXTargetLowering TLInfo; - TargetSelectionDAGInfo TSInfo; + NVPTXSubtarget Subtarget; + const DataLayout DL; // Calculates type size & alignment + NVPTXInstrInfo InstrInfo; + NVPTXTargetLowering TLInfo; + TargetSelectionDAGInfo TSInfo; // NVPTX does not have any call stack frame, but need a NVPTX specific // FrameLowering class because TargetFrameLowering is abstract. - NVPTXFrameLowering FrameLowering; + NVPTXFrameLowering FrameLowering; // Hold Strings that can be free'd all together with NVPTXTargetMachine - ManagedStringPool ManagedStrPool; + ManagedStringPool ManagedStrPool; //bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level, // bool DisableVerify, MCContext *&OutCtx); public: - NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, - StringRef FS, const TargetOptions &Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OP, - bool is64bit); + NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, + CodeModel::Model CM, CodeGenOpt::Level OP, bool is64bit); virtual const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; } - virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; } - virtual const DataLayout *getDataLayout() const { return &DL;} - virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget;} + virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; } + virtual const DataLayout *getDataLayout() const { return &DL; } + virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget; } virtual const NVPTXRegisterInfo *getRegisterInfo() const { return &(InstrInfo.getRegisterInfo()); } virtual NVPTXTargetLowering *getTargetLowering() const { - return const_cast<NVPTXTargetLowering*>(&TLInfo); + return const_cast<NVPTXTargetLowering *>(&TLInfo); } virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const { @@ -79,22 +76,19 @@ public: //virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level); ManagedStringPool *getManagedStrPool() const { - return const_cast<ManagedStringPool*>(&ManagedStrPool); + return const_cast<ManagedStringPool *>(&ManagedStrPool); } virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); // Emission of machine code through JITCodeEmitter is not supported. - virtual bool addPassesToEmitMachineCode(PassManagerBase &, - JITCodeEmitter &, + virtual bool addPassesToEmitMachineCode(PassManagerBase &, JITCodeEmitter &, bool = true) { return true; } // Emission of machine code through MCJIT is not supported. - virtual bool addPassesToEmitMC(PassManagerBase &, - MCContext *&, - raw_ostream &, + virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&, raw_ostream &, bool = true) { return true; } @@ -119,7 +113,6 @@ public: CodeGenOpt::Level OL); }; - } // end namespace llvm #endif diff --git a/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/lib/Target/NVPTX/NVPTXTargetObjectFile.h index b5698a2..6ab0e08 100644 --- a/lib/Target/NVPTX/NVPTXTargetObjectFile.h +++ b/lib/Target/NVPTX/NVPTXTargetObjectFile.h @@ -46,45 +46,43 @@ public: } virtual void Initialize(MCContext &ctx, const TargetMachine &TM) { - TextSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getText()); - DataSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getDataRel()); - BSSSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getBSS()); - ReadOnlySection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getReadOnly()); + TextSection = new NVPTXSection(MCSection::SV_ELF, SectionKind::getText()); + DataSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getDataRel()); + BSSSection = new NVPTXSection(MCSection::SV_ELF, SectionKind::getBSS()); + ReadOnlySection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getReadOnly()); - StaticCtorSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - StaticDtorSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - LSDASection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - EHFrameSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfAbbrevSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfInfoSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfLineSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfFrameSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfPubTypesSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfDebugInlineSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfStrSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfLocSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfARangesSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfRangesSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); - DwarfMacroInfoSection = new NVPTXSection(MCSection::SV_ELF, - SectionKind::getMetadata()); + StaticCtorSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + StaticDtorSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + LSDASection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + EHFrameSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfAbbrevSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfInfoSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfLineSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfFrameSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfPubTypesSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfDebugInlineSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfStrSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfLocSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfARangesSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfRangesSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); + DwarfMacroInfoSection = + new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata()); } virtual const MCSection *getSectionForConstant(SectionKind Kind) const { @@ -93,8 +91,7 @@ public: virtual const MCSection * getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, - Mangler *Mang, - const TargetMachine &TM) const { + Mangler *Mang, const TargetMachine &TM) const { return DataSection; } diff --git a/lib/Target/NVPTX/NVPTXUtilities.cpp b/lib/Target/NVPTX/NVPTXUtilities.cpp index 1ccc9f7..6786eb0 100644 --- a/lib/Target/NVPTX/NVPTXUtilities.cpp +++ b/lib/Target/NVPTX/NVPTXUtilities.cpp @@ -34,7 +34,6 @@ typedef std::map<const Module *, global_val_annot_t> per_module_annot_t; ManagedStatic<per_module_annot_t> annotationCache; - static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) { assert(md && "Invalid mdnode for annotation"); assert((md->getNumOperands() % 2) == 1 && "Invalid number of operands"); @@ -46,7 +45,7 @@ static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) { assert(prop && "Annotation property not a string"); // value - ConstantInt *Val = dyn_cast<ConstantInt>(md->getOperand(i+1)); + ConstantInt *Val = dyn_cast<ConstantInt>(md->getOperand(i + 1)); assert(Val && "Value operand not a constant int"); std::string keyname = prop->getString().str(); @@ -120,9 +119,9 @@ bool llvm::findAllNVVMAnnotation(const GlobalValue *gv, std::string prop, bool llvm::isTexture(const llvm::Value &val) { if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) { unsigned annot; - if (llvm::findOneNVVMAnnotation(gv, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISTEXTURE], - annot)) { + if (llvm::findOneNVVMAnnotation( + gv, llvm::PropertyAnnotationNames[llvm::PROPERTY_ISTEXTURE], + annot)) { assert((annot == 1) && "Unexpected annotation on a texture symbol"); return true; } @@ -133,9 +132,9 @@ bool llvm::isTexture(const llvm::Value &val) { bool llvm::isSurface(const llvm::Value &val) { if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) { unsigned annot; - if (llvm::findOneNVVMAnnotation(gv, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSURFACE], - annot)) { + if (llvm::findOneNVVMAnnotation( + gv, llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSURFACE], + annot)) { assert((annot == 1) && "Unexpected annotation on a surface symbol"); return true; } @@ -146,9 +145,9 @@ bool llvm::isSurface(const llvm::Value &val) { bool llvm::isSampler(const llvm::Value &val) { if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) { unsigned annot; - if (llvm::findOneNVVMAnnotation(gv, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER], - annot)) { + if (llvm::findOneNVVMAnnotation( + gv, llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER], + annot)) { assert((annot == 1) && "Unexpected annotation on a sampler symbol"); return true; } @@ -156,9 +155,9 @@ bool llvm::isSampler(const llvm::Value &val) { if (const Argument *arg = dyn_cast<Argument>(&val)) { const Function *func = arg->getParent(); std::vector<unsigned> annot; - if (llvm::findAllNVVMAnnotation(func, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER], - annot)) { + if (llvm::findAllNVVMAnnotation( + func, llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER], + annot)) { if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end()) return true; } @@ -171,8 +170,9 @@ bool llvm::isImageReadOnly(const llvm::Value &val) { const Function *func = arg->getParent(); std::vector<unsigned> annot; if (llvm::findAllNVVMAnnotation(func, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISREADONLY_IMAGE_PARAM], - annot)) { + llvm::PropertyAnnotationNames[ + llvm::PROPERTY_ISREADONLY_IMAGE_PARAM], + annot)) { if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end()) return true; } @@ -185,8 +185,9 @@ bool llvm::isImageWriteOnly(const llvm::Value &val) { const Function *func = arg->getParent(); std::vector<unsigned> annot; if (llvm::findAllNVVMAnnotation(func, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISWRITEONLY_IMAGE_PARAM], - annot)) { + llvm::PropertyAnnotationNames[ + llvm::PROPERTY_ISWRITEONLY_IMAGE_PARAM], + annot)) { if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end()) return true; } @@ -214,52 +215,44 @@ std::string llvm::getSamplerName(const llvm::Value &val) { } bool llvm::getMaxNTIDx(const Function &F, unsigned &x) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_X], - x)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_X], x)); } bool llvm::getMaxNTIDy(const Function &F, unsigned &y) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Y], - y)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Y], y)); } bool llvm::getMaxNTIDz(const Function &F, unsigned &z) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Z], - z)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Z], z)); } bool llvm::getReqNTIDx(const Function &F, unsigned &x) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_X], - x)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_X], x)); } bool llvm::getReqNTIDy(const Function &F, unsigned &y) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Y], - y)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Y], y)); } bool llvm::getReqNTIDz(const Function &F, unsigned &z) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Z], - z)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Z], z)); } bool llvm::getMinCTASm(const Function &F, unsigned &x) { - return (llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_MINNCTAPERSM], - x)); + return (llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_MINNCTAPERSM], x)); } bool llvm::isKernelFunction(const Function &F) { unsigned x = 0; - bool retval = llvm::findOneNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ISKERNEL_FUNCTION], - x); + bool retval = llvm::findOneNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_ISKERNEL_FUNCTION], x); if (retval == false) { // There is no NVVM metadata, check the calling convention if (F.getCallingConv() == llvm::CallingConv::PTX_Kernel) @@ -267,20 +260,19 @@ bool llvm::isKernelFunction(const Function &F) { else return false; } - return (x==1); + return (x == 1); } bool llvm::getAlign(const Function &F, unsigned index, unsigned &align) { std::vector<unsigned> Vs; - bool retval = llvm::findAllNVVMAnnotation(&F, - llvm::PropertyAnnotationNames[llvm::PROPERTY_ALIGN], - Vs); + bool retval = llvm::findAllNVVMAnnotation( + &F, llvm::PropertyAnnotationNames[llvm::PROPERTY_ALIGN], Vs); if (retval == false) return false; - for (int i=0, e=Vs.size(); i<e; i++) { + for (int i = 0, e = Vs.size(); i < e; i++) { unsigned v = Vs[i]; - if ( (v >> 16) == index ) { - align = v & 0xFFFF; + if ((v >> 16) == index) { + align = v & 0xFFFF; return true; } } @@ -289,16 +281,15 @@ bool llvm::getAlign(const Function &F, unsigned index, unsigned &align) { bool llvm::getAlign(const CallInst &I, unsigned index, unsigned &align) { if (MDNode *alignNode = I.getMetadata("callalign")) { - for (int i=0, n = alignNode->getNumOperands(); - i<n; i++) { + for (int i = 0, n = alignNode->getNumOperands(); i < n; i++) { if (const ConstantInt *CI = - dyn_cast<ConstantInt>(alignNode->getOperand(i))) { + dyn_cast<ConstantInt>(alignNode->getOperand(i))) { unsigned v = CI->getZExtValue(); - if ( (v>>16) == index ) { + if ((v >> 16) == index) { align = v & 0xFFFF; return true; } - if ( (v>>16) > index ) { + if ((v >> 16) > index) { return false; } } @@ -337,8 +328,8 @@ bool llvm::isMemorySpaceTransferIntrinsic(Intrinsic::ID id) { // consider several special intrinsics in striping pointer casts, and // provide an option to ignore GEP indicies for find out the base address only // which could be used in simple alias disambigurate. -const Value *llvm::skipPointerTransfer(const Value *V, - bool ignore_GEP_indices) { +const Value * +llvm::skipPointerTransfer(const Value *V, bool ignore_GEP_indices) { V = V->stripPointerCasts(); while (true) { if (const IntrinsicInst *IS = dyn_cast<IntrinsicInst>(V)) { @@ -360,8 +351,8 @@ const Value *llvm::skipPointerTransfer(const Value *V, // - ignore GEP indicies for find out the base address only, and // - tracking PHINode // which could be used in simple alias disambigurate. -const Value *llvm::skipPointerTransfer(const Value *V, - std::set<const Value *> &processed) { +const Value * +llvm::skipPointerTransfer(const Value *V, std::set<const Value *> &processed) { if (processed.find(V) != processed.end()) return NULL; processed.insert(V); @@ -406,7 +397,6 @@ const Value *llvm::skipPointerTransfer(const Value *V, return V; } - // The following are some useful utilities for debuggung BasicBlock *llvm::getParentBlock(Value *v) { diff --git a/lib/Target/NVPTX/NVPTXUtilities.h b/lib/Target/NVPTX/NVPTXUtilities.h index 247e09b..a208004 100644 --- a/lib/Target/NVPTX/NVPTXUtilities.h +++ b/lib/Target/NVPTX/NVPTXUtilities.h @@ -23,8 +23,7 @@ #include <string> #include <vector> -namespace llvm -{ +namespace llvm { #define NVCL_IMAGE2D_READONLY_FUNCNAME "__is_image2D_readonly" #define NVCL_IMAGE3D_READONLY_FUNCNAME "__is_image3D_readonly" @@ -64,8 +63,7 @@ bool isBarrierIntrinsic(llvm::Intrinsic::ID); /// to pass into type construction of CallInst ctors. This turns a null /// terminated list of pointers (or other value types) into a real live vector. /// -template<typename T> -inline std::vector<T> make_vector(T A, ...) { +template <typename T> inline std::vector<T> make_vector(T A, ...) { va_list Args; va_start(Args, A); std::vector<T> Result; @@ -78,8 +76,8 @@ inline std::vector<T> make_vector(T A, ...) { bool isMemorySpaceTransferIntrinsic(Intrinsic::ID id); const Value *skipPointerTransfer(const Value *V, bool ignore_GEP_indices); -const Value *skipPointerTransfer(const Value *V, - std::set<const Value *> &processed); +const Value * +skipPointerTransfer(const Value *V, std::set<const Value *> &processed); BasicBlock *getParentBlock(Value *v); Function *getParentFunction(Value *v); void dumpBlock(Value *v, char *blockName); diff --git a/lib/Target/NVPTX/NVPTXutil.cpp b/lib/Target/NVPTX/NVPTXutil.cpp index 6a0e532..5f074b3 100644 --- a/lib/Target/NVPTX/NVPTXutil.cpp +++ b/lib/Target/NVPTX/NVPTXutil.cpp @@ -18,8 +18,7 @@ using namespace llvm; namespace llvm { -bool isParamLoad(const MachineInstr *MI) -{ +bool isParamLoad(const MachineInstr *MI) { if ((MI->getOpcode() != NVPTX::LD_i32_avar) && (MI->getOpcode() != NVPTX::LD_i64_avar)) return false; @@ -30,13 +29,11 @@ bool isParamLoad(const MachineInstr *MI) return true; } -#define DATA_MASK 0x7f -#define DIGIT_WIDTH 7 -#define MORE_BYTES 0x80 +#define DATA_MASK 0x7f +#define DIGIT_WIDTH 7 +#define MORE_BYTES 0x80 -static int encode_leb128(uint64_t val, int *nbytes, - char *space, int splen) -{ +static int encode_leb128(uint64_t val, int *nbytes, char *space, int splen) { char *a; char *end = space + splen; @@ -61,29 +58,30 @@ static int encode_leb128(uint64_t val, int *nbytes, #undef DIGIT_WIDTH #undef MORE_BYTES -uint64_t encode_leb128(const char *str) -{ - union { uint64_t x; char a[8]; } temp64; +uint64_t encode_leb128(const char *str) { + union { + uint64_t x; + char a[8]; + } temp64; temp64.x = 0; - for (unsigned i=0,e=strlen(str); i!=e; ++i) - temp64.a[i] = str[e-1-i]; + for (unsigned i = 0, e = strlen(str); i != e; ++i) + temp64.a[i] = str[e - 1 - i]; char encoded[16]; int nbytes; int retval = encode_leb128(temp64.x, &nbytes, encoded, 16); - (void)retval; - assert(retval == 0 && - "Encoding to leb128 failed"); + (void) retval; + assert(retval == 0 && "Encoding to leb128 failed"); assert(nbytes <= 8 && "Cannot support register names with leb128 encoding > 8 bytes"); temp64.x = 0; - for (int i=0; i<nbytes; ++i) + for (int i = 0; i < nbytes; ++i) temp64.a[i] = encoded[i]; return temp64.x; diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp new file mode 100644 index 0000000..0ad62ce --- /dev/null +++ b/lib/Target/NVPTX/NVVMReflect.cpp @@ -0,0 +1,177 @@ +//===- NVVMReflect.cpp - NVVM Emulate conditional compilation -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass replaces occurences of __nvvm_reflect("string") with an +// integer based on -nvvm-reflect-list string=<int> option given to this pass. +// If an undefined string value is seen in a call to __nvvm_reflect("string"), +// a default value of 0 will be used. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Pass.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Constants.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_os_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include <map> +#include <sstream> +#include <string> +#include <vector> + +#define NVVM_REFLECT_FUNCTION "__nvvm_reflect" + +using namespace llvm; + +namespace llvm { void initializeNVVMReflectPass(PassRegistry &); } + +namespace { +class LLVM_LIBRARY_VISIBILITY NVVMReflect : public ModulePass { +private: + StringMap<int> VarMap; + typedef DenseMap<std::string, int>::iterator VarMapIter; + Function *ReflectFunction; + +public: + static char ID; + NVVMReflect() : ModulePass(ID) { + VarMap.clear(); + ReflectFunction = 0; + } + + void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); } + virtual bool runOnModule(Module &); + + void setVarMap(); +}; +} + +static cl::opt<bool> +NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), + cl::desc("NVVM reflection, enabled by default")); + +char NVVMReflect::ID = 0; +INITIALIZE_PASS(NVVMReflect, "nvvm-reflect", + "Replace occurences of __nvvm_reflect() calls with 0/1", false, + false) + +static cl::list<std::string> +ReflectList("nvvm-reflect-list", cl::value_desc("name=<int>"), + cl::desc("A list of string=num assignments"), + cl::ValueRequired); + +/// The command line can look as follows : +/// -nvvm-reflect-list a=1,b=2 -nvvm-reflect-list c=3,d=0 -R e=2 +/// The strings "a=1,b=2", "c=3,d=0", "e=2" are available in the +/// ReflectList vector. First, each of ReflectList[i] is 'split' +/// using "," as the delimiter. Then each of this part is split +/// using "=" as the delimiter. +void NVVMReflect::setVarMap() { + for (unsigned i = 0, e = ReflectList.size(); i != e; ++i) { + DEBUG(dbgs() << "Option : " << ReflectList[i] << "\n"); + SmallVector<StringRef, 4> NameValList; + StringRef(ReflectList[i]).split(NameValList, ","); + for (unsigned j = 0, ej = NameValList.size(); j != ej; ++j) { + SmallVector<StringRef, 2> NameValPair; + NameValList[j].split(NameValPair, "="); + assert(NameValPair.size() == 2 && "name=val expected"); + std::stringstream ValStream(NameValPair[1]); + int Val; + ValStream >> Val; + assert((!(ValStream.fail())) && "integer value expected"); + VarMap[NameValPair[0]] = Val; + } + } +} + +bool NVVMReflect::runOnModule(Module &M) { + if (!NVVMReflectEnabled) + return false; + + setVarMap(); + + ReflectFunction = M.getFunction(NVVM_REFLECT_FUNCTION); + + // If reflect function is not used, then there will be + // no entry in the module. + if (ReflectFunction == 0) + return false; + + // Validate _reflect function + assert(ReflectFunction->isDeclaration() && + "_reflect function should not have a body"); + assert(ReflectFunction->getReturnType()->isIntegerTy() && + "_reflect's return type should be integer"); + + std::vector<Instruction *> ToRemove; + + // Go through the uses of ReflectFunction in this Function. + // Each of them should a CallInst with a ConstantArray argument. + // First validate that. If the c-string corresponding to the + // ConstantArray can be found successfully, see if it can be + // found in VarMap. If so, replace the uses of CallInst with the + // value found in VarMap. If not, replace the use with value 0. + for (Value::use_iterator I = ReflectFunction->use_begin(), + E = ReflectFunction->use_end(); + I != E; ++I) { + assert(isa<CallInst>(*I) && "Only a call instruction can use _reflect"); + CallInst *Reflect = cast<CallInst>(*I); + + assert((Reflect->getNumOperands() == 2) && + "Only one operand expect for _reflect function"); + // In cuda, we will have an extra constant-to-generic conversion of + // the string. + const Value *conv = Reflect->getArgOperand(0); + assert(isa<CallInst>(conv) && "Expected a const-to-gen conversion"); + const CallInst *ConvCall = cast<CallInst>(conv); + const Value *str = ConvCall->getArgOperand(0); + assert(isa<ConstantExpr>(str) && + "Format of _reflect function not recognized"); + const ConstantExpr *GEP = cast<ConstantExpr>(str); + + const Value *Sym = GEP->getOperand(0); + assert(isa<Constant>(Sym) && "Format of _reflect function not recognized"); + + const Constant *SymStr = cast<Constant>(Sym); + + assert(isa<ConstantDataSequential>(SymStr->getOperand(0)) && + "Format of _reflect function not recognized"); + + assert(cast<ConstantDataSequential>(SymStr->getOperand(0))->isCString() && + "Format of _reflect function not recognized"); + + std::string ReflectArg = + cast<ConstantDataSequential>(SymStr->getOperand(0))->getAsString(); + + ReflectArg = ReflectArg.substr(0, ReflectArg.size() - 1); + DEBUG(dbgs() << "Arg of _reflect : " << ReflectArg << "\n"); + + int ReflectVal = 0; // The default value is 0 + if (VarMap.find(ReflectArg) != VarMap.end()) { + ReflectVal = VarMap[ReflectArg]; + } + Reflect->replaceAllUsesWith( + ConstantInt::get(Reflect->getType(), ReflectVal)); + ToRemove.push_back(Reflect); + } + if (ToRemove.size() == 0) + return false; + + for (unsigned i = 0, e = ToRemove.size(); i != e; ++i) + ToRemove[i]->eraseFromParent(); + return true; +} diff --git a/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp b/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp index 6c801b8..cc7d4dc 100644 --- a/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp +++ b/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp @@ -17,7 +17,7 @@ Target llvm::TheNVPTXTarget64; extern "C" void LLVMInitializeNVPTXTargetInfo() { RegisterTarget<Triple::nvptx> X(TheNVPTXTarget32, "nvptx", - "NVIDIA PTX 32-bit"); + "NVIDIA PTX 32-bit"); RegisterTarget<Triple::nvptx64> Y(TheNVPTXTarget64, "nvptx64", - "NVIDIA PTX 64-bit"); + "NVIDIA PTX 64-bit"); } diff --git a/lib/Target/NVPTX/cl_common_defines.h b/lib/Target/NVPTX/cl_common_defines.h index a7347ef..45cc0b8 100644 --- a/lib/Target/NVPTX/cl_common_defines.h +++ b/lib/Target/NVPTX/cl_common_defines.h @@ -24,22 +24,21 @@ enum { CLK_LUMINANCE = 0x10B9 #if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1) - , + , CLK_Rx = 0x10BA, CLK_RGx = 0x10BB, CLK_RGBx = 0x10BC #endif }; - typedef enum clk_channel_type { // valid formats for float return types - CLK_SNORM_INT8 = 0x10D0, // four channel RGBA unorm8 - CLK_SNORM_INT16 = 0x10D1, // four channel RGBA unorm16 - CLK_UNORM_INT8 = 0x10D2, // four channel RGBA unorm8 - CLK_UNORM_INT16 = 0x10D3, // four channel RGBA unorm16 - CLK_HALF_FLOAT = 0x10DD, // four channel RGBA half - CLK_FLOAT = 0x10DE, // four channel RGBA float + CLK_SNORM_INT8 = 0x10D0, // four channel RGBA unorm8 + CLK_SNORM_INT16 = 0x10D1, // four channel RGBA unorm16 + CLK_UNORM_INT8 = 0x10D2, // four channel RGBA unorm8 + CLK_UNORM_INT16 = 0x10D3, // four channel RGBA unorm16 + CLK_HALF_FLOAT = 0x10DD, // four channel RGBA half + CLK_FLOAT = 0x10DE, // four channel RGBA float #if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1) CLK_UNORM_SHORT_565 = 0x10D4, @@ -48,7 +47,7 @@ typedef enum clk_channel_type { #endif // valid only for integer return types - CLK_SIGNED_INT8 = 0x10D7, + CLK_SIGNED_INT8 = 0x10D7, CLK_SIGNED_INT16 = 0x10D8, CLK_SIGNED_INT32 = 0x10D9, CLK_UNSIGNED_INT8 = 0x10DA, @@ -56,70 +55,68 @@ typedef enum clk_channel_type { CLK_UNSIGNED_INT32 = 0x10DC, // CI SPI for CPU - __CLK_UNORM_INT8888 , // four channel ARGB unorm8 - __CLK_UNORM_INT8888R, // four channel BGRA unorm8 + __CLK_UNORM_INT8888, // four channel ARGB unorm8 + __CLK_UNORM_INT8888R, // four channel BGRA unorm8 __CLK_VALID_IMAGE_TYPE_COUNT, __CLK_INVALID_IMAGE_TYPE = __CLK_VALID_IMAGE_TYPE_COUNT, - __CLK_VALID_IMAGE_TYPE_MASK_BITS = 4, // number of bits required to - // represent any image type - __CLK_VALID_IMAGE_TYPE_MASK = ( 1 << __CLK_VALID_IMAGE_TYPE_MASK_BITS ) - 1 -}clk_channel_type; + __CLK_VALID_IMAGE_TYPE_MASK_BITS = 4, // number of bits required to + // represent any image type + __CLK_VALID_IMAGE_TYPE_MASK = (1 << __CLK_VALID_IMAGE_TYPE_MASK_BITS) - 1 +} clk_channel_type; typedef enum clk_sampler_type { - __CLK_ADDRESS_BASE = 0, - CLK_ADDRESS_NONE = 0 << __CLK_ADDRESS_BASE, - CLK_ADDRESS_CLAMP = 1 << __CLK_ADDRESS_BASE, - CLK_ADDRESS_CLAMP_TO_EDGE = 2 << __CLK_ADDRESS_BASE, - CLK_ADDRESS_REPEAT = 3 << __CLK_ADDRESS_BASE, - CLK_ADDRESS_MIRROR = 4 << __CLK_ADDRESS_BASE, + __CLK_ADDRESS_BASE = 0, + CLK_ADDRESS_NONE = 0 << __CLK_ADDRESS_BASE, + CLK_ADDRESS_CLAMP = 1 << __CLK_ADDRESS_BASE, + CLK_ADDRESS_CLAMP_TO_EDGE = 2 << __CLK_ADDRESS_BASE, + CLK_ADDRESS_REPEAT = 3 << __CLK_ADDRESS_BASE, + CLK_ADDRESS_MIRROR = 4 << __CLK_ADDRESS_BASE, #if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1) - CLK_ADDRESS_MIRRORED_REPEAT = CLK_ADDRESS_MIRROR, + CLK_ADDRESS_MIRRORED_REPEAT = CLK_ADDRESS_MIRROR, #endif - __CLK_ADDRESS_MASK = CLK_ADDRESS_NONE | CLK_ADDRESS_CLAMP | - CLK_ADDRESS_CLAMP_TO_EDGE | - CLK_ADDRESS_REPEAT | CLK_ADDRESS_MIRROR, - __CLK_ADDRESS_BITS = 3, // number of bits required to - // represent address info - - __CLK_NORMALIZED_BASE = __CLK_ADDRESS_BITS, - CLK_NORMALIZED_COORDS_FALSE = 0, - CLK_NORMALIZED_COORDS_TRUE = 1 << __CLK_NORMALIZED_BASE, - __CLK_NORMALIZED_MASK = CLK_NORMALIZED_COORDS_FALSE | - CLK_NORMALIZED_COORDS_TRUE, - __CLK_NORMALIZED_BITS = 1, // number of bits required to - // represent normalization - - __CLK_FILTER_BASE = __CLK_NORMALIZED_BASE + - __CLK_NORMALIZED_BITS, - CLK_FILTER_NEAREST = 0 << __CLK_FILTER_BASE, - CLK_FILTER_LINEAR = 1 << __CLK_FILTER_BASE, - CLK_FILTER_ANISOTROPIC = 2 << __CLK_FILTER_BASE, - __CLK_FILTER_MASK = CLK_FILTER_NEAREST | CLK_FILTER_LINEAR | - CLK_FILTER_ANISOTROPIC, - __CLK_FILTER_BITS = 2, // number of bits required to - // represent address info - - __CLK_MIP_BASE = __CLK_FILTER_BASE + __CLK_FILTER_BITS, - CLK_MIP_NEAREST = 0 << __CLK_MIP_BASE, - CLK_MIP_LINEAR = 1 << __CLK_MIP_BASE, - CLK_MIP_ANISOTROPIC = 2 << __CLK_MIP_BASE, - __CLK_MIP_MASK = CLK_MIP_NEAREST | CLK_MIP_LINEAR | - CLK_MIP_ANISOTROPIC, - __CLK_MIP_BITS = 2, - - __CLK_SAMPLER_BITS = __CLK_MIP_BASE + __CLK_MIP_BITS, - __CLK_SAMPLER_MASK = __CLK_MIP_MASK | __CLK_FILTER_MASK | - __CLK_NORMALIZED_MASK | __CLK_ADDRESS_MASK, - - __CLK_ANISOTROPIC_RATIO_BITS = 5, - __CLK_ANISOTROPIC_RATIO_MASK = (int) 0x80000000 >> - (__CLK_ANISOTROPIC_RATIO_BITS-1) + __CLK_ADDRESS_MASK = + CLK_ADDRESS_NONE | CLK_ADDRESS_CLAMP | CLK_ADDRESS_CLAMP_TO_EDGE | + CLK_ADDRESS_REPEAT | CLK_ADDRESS_MIRROR, + __CLK_ADDRESS_BITS = 3, // number of bits required to + // represent address info + + __CLK_NORMALIZED_BASE = __CLK_ADDRESS_BITS, + CLK_NORMALIZED_COORDS_FALSE = 0, + CLK_NORMALIZED_COORDS_TRUE = 1 << __CLK_NORMALIZED_BASE, + __CLK_NORMALIZED_MASK = + CLK_NORMALIZED_COORDS_FALSE | CLK_NORMALIZED_COORDS_TRUE, + __CLK_NORMALIZED_BITS = 1, // number of bits required to + // represent normalization + + __CLK_FILTER_BASE = __CLK_NORMALIZED_BASE + __CLK_NORMALIZED_BITS, + CLK_FILTER_NEAREST = 0 << __CLK_FILTER_BASE, + CLK_FILTER_LINEAR = 1 << __CLK_FILTER_BASE, + CLK_FILTER_ANISOTROPIC = 2 << __CLK_FILTER_BASE, + __CLK_FILTER_MASK = + CLK_FILTER_NEAREST | CLK_FILTER_LINEAR | CLK_FILTER_ANISOTROPIC, + __CLK_FILTER_BITS = 2, // number of bits required to + // represent address info + + __CLK_MIP_BASE = __CLK_FILTER_BASE + __CLK_FILTER_BITS, + CLK_MIP_NEAREST = 0 << __CLK_MIP_BASE, + CLK_MIP_LINEAR = 1 << __CLK_MIP_BASE, + CLK_MIP_ANISOTROPIC = 2 << __CLK_MIP_BASE, + __CLK_MIP_MASK = CLK_MIP_NEAREST | CLK_MIP_LINEAR | CLK_MIP_ANISOTROPIC, + __CLK_MIP_BITS = 2, + + __CLK_SAMPLER_BITS = __CLK_MIP_BASE + __CLK_MIP_BITS, + __CLK_SAMPLER_MASK = __CLK_MIP_MASK | __CLK_FILTER_MASK | + __CLK_NORMALIZED_MASK | __CLK_ADDRESS_MASK, + + __CLK_ANISOTROPIC_RATIO_BITS = 5, + __CLK_ANISOTROPIC_RATIO_MASK = + (int) 0x80000000 >> (__CLK_ANISOTROPIC_RATIO_BITS - 1) } clk_sampler_type; // Memory synchronization -#define CLK_LOCAL_MEM_FENCE (1 << 0) -#define CLK_GLOBAL_MEM_FENCE (1 << 1) +#define CLK_LOCAL_MEM_FENCE (1 << 0) +#define CLK_GLOBAL_MEM_FENCE (1 << 1) #endif // __CL_COMMON_DEFINES_H__ diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp index 3d58306..bacc108 100644 --- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp +++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp @@ -13,7 +13,7 @@ #define DEBUG_TYPE "asm-printer" #include "PPCInstPrinter.h" -#include "MCTargetDesc/PPCBaseInfo.h" +#include "MCTargetDesc/PPCMCTargetDesc.h" #include "MCTargetDesc/PPCPredicates.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" @@ -87,35 +87,9 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O, const char *Modifier) { unsigned Code = MI->getOperand(OpNo).getImm(); - if (!Modifier) { - unsigned CCReg = MI->getOperand(OpNo+1).getReg(); - unsigned RegNo; - switch (CCReg) { - default: llvm_unreachable("Unknown CR register"); - case PPC::CR0: RegNo = 0; break; - case PPC::CR1: RegNo = 1; break; - case PPC::CR2: RegNo = 2; break; - case PPC::CR3: RegNo = 3; break; - case PPC::CR4: RegNo = 4; break; - case PPC::CR5: RegNo = 5; break; - case PPC::CR6: RegNo = 6; break; - case PPC::CR7: RegNo = 7; break; - } - - // Print the CR bit number. The Code is ((BI << 5) | BO) for a - // BCC, but we must have the positive form here (BO == 12) - unsigned BI = Code >> 5; - assert((Code & 0xF) == 12 && - "BO in predicate bit must have the positive form"); - - unsigned Value = 4*RegNo + BI; - O << Value; - return; - } if (StringRef(Modifier) == "cc") { switch ((PPC::Predicate)Code) { - case PPC::PRED_ALWAYS: return; // Don't print anything for always. case PPC::PRED_LT: O << "lt"; return; case PPC::PRED_LE: O << "le"; return; case PPC::PRED_EQ: O << "eq"; return; @@ -129,8 +103,6 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo, assert(StringRef(Modifier) == "reg" && "Need to specify 'cc' or 'reg' as predicate op modifier!"); - // Don't print the register for 'always'. - if (Code == PPC::PRED_ALWAYS) return; printOperand(MI, OpNo+1, O); } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp index f24edf6..ec26574 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -30,13 +30,9 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { case FK_Data_2: case FK_Data_4: case FK_Data_8: - case PPC::fixup_ppc_toc: case PPC::fixup_ppc_tlsreg: case PPC::fixup_ppc_nofixup: return Value; - case PPC::fixup_ppc_lo14: - case PPC::fixup_ppc_toc16_ds: - return (Value & 0xffff) << 2; case PPC::fixup_ppc_brcond14: return Value & 0xfffc; case PPC::fixup_ppc_br24: @@ -48,8 +44,9 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { case PPC::fixup_ppc_ha16: return ((Value >> 16) + ((Value & 0x8000) ? 1 : 0)) & 0xffff; case PPC::fixup_ppc_lo16: - case PPC::fixup_ppc_toc16: return Value & 0xffff; + case PPC::fixup_ppc_lo16_ds: + return Value & 0xfffc; } } @@ -82,10 +79,7 @@ public: { "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_ppc_lo16", 16, 16, 0 }, { "fixup_ppc_ha16", 16, 16, 0 }, - { "fixup_ppc_lo14", 16, 14, 0 }, - { "fixup_ppc_toc", 0, 64, 0 }, - { "fixup_ppc_toc16", 16, 16, 0 }, - { "fixup_ppc_toc16_ds", 16, 14, 0 }, + { "fixup_ppc_lo16_ds", 16, 14, 0 }, { "fixup_ppc_tlsreg", 0, 0, 0 }, { "fixup_ppc_nofixup", 0, 0, 0 } }; diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h b/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h deleted file mode 100644 index 9c975c0..0000000 --- a/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h +++ /dev/null @@ -1,70 +0,0 @@ -//===-- PPCBaseInfo.h - Top level definitions for PPC -----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains small standalone helper functions and enum definitions for -// the PPC target useful for the compiler back-end and the MC libraries. -// As such, it deliberately does not include references to LLVM core -// code gen types, passes, etc.. -// -//===----------------------------------------------------------------------===// - -#ifndef PPCBASEINFO_H -#define PPCBASEINFO_H - -#include "PPCMCTargetDesc.h" -#include "llvm/Support/ErrorHandling.h" - -namespace llvm { - -/// getPPCRegisterNumbering - Given the enum value for some register, e.g. -/// PPC::F14, return the number that it corresponds to (e.g. 14). -inline static unsigned getPPCRegisterNumbering(unsigned RegEnum) { - using namespace PPC; - switch (RegEnum) { - case 0: return 0; - case R0 : case X0 : case F0 : case V0 : case CR0: case CR0LT: return 0; - case R1 : case X1 : case F1 : case V1 : case CR1: case CR0GT: return 1; - case R2 : case X2 : case F2 : case V2 : case CR2: case CR0EQ: return 2; - case R3 : case X3 : case F3 : case V3 : case CR3: case CR0UN: return 3; - case R4 : case X4 : case F4 : case V4 : case CR4: case CR1LT: return 4; - case R5 : case X5 : case F5 : case V5 : case CR5: case CR1GT: return 5; - case R6 : case X6 : case F6 : case V6 : case CR6: case CR1EQ: return 6; - case R7 : case X7 : case F7 : case V7 : case CR7: case CR1UN: return 7; - case R8 : case X8 : case F8 : case V8 : case CR2LT: return 8; - case R9 : case X9 : case F9 : case V9 : case CR2GT: return 9; - case R10: case X10: case F10: case V10: case CR2EQ: return 10; - case R11: case X11: case F11: case V11: case CR2UN: return 11; - case R12: case X12: case F12: case V12: case CR3LT: return 12; - case R13: case X13: case F13: case V13: case CR3GT: return 13; - case R14: case X14: case F14: case V14: case CR3EQ: return 14; - case R15: case X15: case F15: case V15: case CR3UN: return 15; - case R16: case X16: case F16: case V16: case CR4LT: return 16; - case R17: case X17: case F17: case V17: case CR4GT: return 17; - case R18: case X18: case F18: case V18: case CR4EQ: return 18; - case R19: case X19: case F19: case V19: case CR4UN: return 19; - case R20: case X20: case F20: case V20: case CR5LT: return 20; - case R21: case X21: case F21: case V21: case CR5GT: return 21; - case R22: case X22: case F22: case V22: case CR5EQ: return 22; - case R23: case X23: case F23: case V23: case CR5UN: return 23; - case R24: case X24: case F24: case V24: case CR6LT: return 24; - case R25: case X25: case F25: case V25: case CR6GT: return 25; - case R26: case X26: case F26: case V26: case CR6EQ: return 26; - case R27: case X27: case F27: case V27: case CR6UN: return 27; - case R28: case X28: case F28: case V28: case CR7LT: return 28; - case R29: case X29: case F29: case V29: case CR7GT: return 29; - case R30: case X30: case F30: case V30: case CR7EQ: return 30; - case R31: case X31: case F31: case V31: case CR7UN: return 31; - default: - llvm_unreachable("Unhandled reg in PPCRegisterInfo::getRegisterNumbering!"); - } -} - -} // end namespace llvm; - -#endif diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp index 61868d4..81a86dc 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp @@ -77,6 +77,9 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, case PPC::fixup_ppc_br24: Type = ELF::R_PPC_REL24; break; + case PPC::fixup_ppc_brcond14: + Type = ELF::R_PPC_REL14; + break; case FK_Data_4: case FK_PCRel_4: Type = ELF::R_PPC_REL32; @@ -133,6 +136,9 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, case MCSymbolRefExpr::VK_None: Type = ELF::R_PPC_ADDR16_LO; break; + case MCSymbolRefExpr::VK_PPC_TOC_ENTRY: + Type = ELF::R_PPC64_TOC16; + break; case MCSymbolRefExpr::VK_PPC_TOC16_LO: Type = ELF::R_PPC64_TOC16_LO; break; @@ -144,35 +150,12 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, break; } break; - case PPC::fixup_ppc_lo14: - Type = ELF::R_PPC_ADDR14; - break; - case PPC::fixup_ppc_toc: - Type = ELF::R_PPC64_TOC; - break; - case PPC::fixup_ppc_toc16: + case PPC::fixup_ppc_lo16_ds: switch (Modifier) { default: llvm_unreachable("Unsupported Modifier"); - case MCSymbolRefExpr::VK_PPC_TPREL16_LO: - Type = ELF::R_PPC64_TPREL16_LO; - break; - case MCSymbolRefExpr::VK_PPC_DTPREL16_LO: - Type = ELF::R_PPC64_DTPREL16_LO; - break; case MCSymbolRefExpr::VK_None: - Type = ELF::R_PPC64_TOC16; - break; - case MCSymbolRefExpr::VK_PPC_TOC16_LO: - Type = ELF::R_PPC64_TOC16_LO; + Type = ELF::R_PPC64_ADDR16_DS; break; - case MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO: - Type = ELF::R_PPC64_GOT_TLSLD16_LO; - break; - } - break; - case PPC::fixup_ppc_toc16_ds: - switch (Modifier) { - default: llvm_unreachable("Unsupported Modifier"); case MCSymbolRefExpr::VK_PPC_TOC_ENTRY: Type = ELF::R_PPC64_TOC16_DS; break; @@ -253,8 +236,7 @@ adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) { switch ((unsigned)Fixup.getKind()) { case PPC::fixup_ppc_ha16: case PPC::fixup_ppc_lo16: - case PPC::fixup_ppc_toc16: - case PPC::fixup_ppc_toc16_ds: + case PPC::fixup_ppc_lo16_ds: RelocOffset += 2; break; default: diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h index 709daa4..86c44f5 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h @@ -33,19 +33,9 @@ enum Fixups { /// like 'lis'. fixup_ppc_ha16, - /// fixup_ppc_lo14 - A 14-bit fixup corresponding to lo16(_foo) for instrs - /// like 'std'. - fixup_ppc_lo14, - - /// fixup_ppc_toc - Insert value of TOC base (.TOC.). - fixup_ppc_toc, - - /// fixup_ppc_toc16 - A 16-bit signed fixup relative to the TOC base. - fixup_ppc_toc16, - - /// fixup_ppc_toc16_ds - A 14-bit signed fixup relative to the TOC base with - /// implied 2 zero bits - fixup_ppc_toc16_ds, + /// fixup_ppc_lo16_ds - A 14-bit fixup corresponding to lo16(_foo) with + /// implied 2 zero bits for instrs like 'std'. + fixup_ppc_lo16_ds, /// fixup_ppc_tlsreg - Insert thread-pointer register number. fixup_ppc_tlsreg, diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index d048426..2223cd6 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -13,10 +13,10 @@ #define DEBUG_TYPE "mccodeemitter" #include "MCTargetDesc/PPCMCTargetDesc.h" -#include "MCTargetDesc/PPCBaseInfo.h" #include "MCTargetDesc/PPCFixupKinds.h" #include "llvm/ADT/Statistic.h" #include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" @@ -33,24 +33,17 @@ class PPCMCCodeEmitter : public MCCodeEmitter { void operator=(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCSubtargetInfo &STI; + const MCContext &CTX; Triple TT; public: PPCMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, MCContext &ctx) - : STI(sti), TT(STI.getTargetTriple()) { + : STI(sti), CTX(ctx), TT(STI.getTargetTriple()) { } ~PPCMCCodeEmitter() {} - bool is64BitMode() const { - return (STI.getFeatureBits() & PPC::Feature64Bit) != 0; - } - - bool isSVR4ABI() const { - return TT.isMacOSX() == 0; - } - unsigned getDirectBrEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups) const; unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo, @@ -81,12 +74,11 @@ public: SmallVectorImpl<MCFixup> &Fixups) const { uint64_t Bits = getBinaryCodeForInstr(MI, Fixups); - // BL8_NOP_ELF, BLA8_NOP_ELF, etc., all have a size of 8 because of the - // following 'nop'. + // BL8_NOP etc. all have a size of 8 because of the following 'nop'. unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value! unsigned Opcode = MI.getOpcode(); - if (Opcode == PPC::BL8_NOP_ELF || Opcode == PPC::BLA8_NOP_ELF || - Opcode == PPC::BL8_NOP_ELF_TLSGD || Opcode == PPC::BL8_NOP_ELF_TLSLD) + if (Opcode == PPC::BL8_NOP || Opcode == PPC::BLA8_NOP || + Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD) Size = 8; // Output the constant in big endian byte order. @@ -121,11 +113,11 @@ getDirectBrEncoding(const MCInst &MI, unsigned OpNo, (MCFixupKind)PPC::fixup_ppc_br24)); // For special TLS calls, add another fixup for the symbol. Apparently - // BL8_NOP_ELF, BL8_NOP_ELF_TLSGD, and BL8_NOP_ELF_TLSLD are sufficiently + // BL8_NOP, BL8_NOP_TLSGD, and BL8_NOP_TLSLD are sufficiently // similar that TblGen will not generate a separate case for the latter // two, so this is the only way to get the extra fixup generated. unsigned Opcode = MI.getOpcode(); - if (Opcode == PPC::BL8_NOP_ELF_TLSGD || Opcode == PPC::BL8_NOP_ELF_TLSLD) { + if (Opcode == PPC::BL8_NOP_TLSGD || Opcode == PPC::BL8_NOP_TLSLD) { const MCOperand &MO2 = MI.getOperand(OpNo+1); Fixups.push_back(MCFixup::Create(0, MO2.getExpr(), (MCFixupKind)PPC::fixup_ppc_nofixup)); @@ -178,12 +170,8 @@ unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo, return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits; // Add a fixup for the displacement field. - if (isSVR4ABI() && is64BitMode()) - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_toc16)); - else - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_lo16)); + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_lo16)); return RegBits; } @@ -199,13 +187,9 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo, if (MO.isImm()) return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits; - // Add a fixup for the branch target. - if (isSVR4ABI() && is64BitMode()) - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_toc16_ds)); - else - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_lo14)); + // Add a fixup for the displacement field. + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_lo16_ds)); return RegBits; } @@ -220,7 +204,7 @@ unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo, // Return the thread-pointer register's encoding. Fixups.push_back(MCFixup::Create(0, MO.getExpr(), (MCFixupKind)PPC::fixup_ppc_tlsreg)); - return getPPCRegisterNumbering(PPC::X13); + return CTX.getRegisterInfo().getEncodingValue(PPC::X13); } unsigned PPCMCCodeEmitter:: @@ -231,7 +215,7 @@ get_crbitm_encoding(const MCInst &MI, unsigned OpNo, MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MTCRF8) && (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)); - return 0x80 >> getPPCRegisterNumbering(MO.getReg()); + return 0x80 >> CTX.getRegisterInfo().getEncodingValue(MO.getReg()); } @@ -243,7 +227,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, // The GPR operand should come through here though. assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) || MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7); - return getPPCRegisterNumbering(MO.getReg()); + return CTX.getRegisterInfo().getEncodingValue(MO.getReg()); } assert(MO.isImm() && diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp index 12bb0a1..853e505 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp @@ -18,7 +18,6 @@ using namespace llvm; PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) { switch (Opcode) { - default: llvm_unreachable("Unknown PPC branch opcode!"); case PPC::PRED_EQ: return PPC::PRED_NE; case PPC::PRED_NE: return PPC::PRED_EQ; case PPC::PRED_LT: return PPC::PRED_GE; @@ -28,4 +27,20 @@ PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) { case PPC::PRED_NU: return PPC::PRED_UN; case PPC::PRED_UN: return PPC::PRED_NU; } + llvm_unreachable("Unknown PPC branch opcode!"); } + +PPC::Predicate PPC::getSwappedPredicate(PPC::Predicate Opcode) { + switch (Opcode) { + case PPC::PRED_EQ: return PPC::PRED_EQ; + case PPC::PRED_NE: return PPC::PRED_NE; + case PPC::PRED_LT: return PPC::PRED_GT; + case PPC::PRED_GE: return PPC::PRED_LE; + case PPC::PRED_GT: return PPC::PRED_LT; + case PPC::PRED_LE: return PPC::PRED_GE; + case PPC::PRED_NU: return PPC::PRED_NU; + case PPC::PRED_UN: return PPC::PRED_UN; + } + llvm_unreachable("Unknown PPC branch opcode!"); +} + diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h index b0680fb..444758c 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h @@ -25,7 +25,6 @@ namespace llvm { namespace PPC { /// Predicate - These are "(BI << 5) | BO" for various predicates. enum Predicate { - PRED_ALWAYS = (0 << 5) | 20, PRED_LT = (0 << 5) | 12, PRED_LE = (1 << 5) | 4, PRED_EQ = (2 << 5) | 12, @@ -38,6 +37,10 @@ namespace PPC { /// Invert the specified predicate. != -> ==, < -> >=. Predicate InvertPredicate(Predicate Opcode); + + /// Assume the condition register is set by MI(a,b), return the predicate if + /// we modify the instructions such that condition register is set by MI(b,a). + Predicate getSwappedPredicate(Predicate Opcode); } } diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h index f71979f..b4be51a 100644 --- a/lib/Target/PowerPC/PPC.h +++ b/lib/Target/PowerPC/PPC.h @@ -15,7 +15,6 @@ #ifndef LLVM_TARGET_POWERPC_H #define LLVM_TARGET_POWERPC_H -#include "MCTargetDesc/PPCBaseInfo.h" #include "MCTargetDesc/PPCMCTargetDesc.h" #include <string> @@ -32,6 +31,7 @@ namespace llvm { class MCInst; FunctionPass *createPPCCTRLoops(); + FunctionPass *createPPCEarlyReturnPass(); FunctionPass *createPPCBranchSelectionPass(); FunctionPass *createPPCISelDag(PPCTargetMachine &TM); FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM, @@ -41,7 +41,7 @@ namespace llvm { /// \brief Creates an PPC-specific Target Transformation Info pass. ImmutablePass *createPPCTargetTransformInfoPass(const PPCTargetMachine *TM); - + namespace PPCII { /// Target Operand Flag enum. diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td index 9929136..649ffc1 100644 --- a/lib/Target/PowerPC/PPC.td +++ b/lib/Target/PowerPC/PPC.td @@ -57,10 +57,30 @@ def FeatureMFOCRF : SubtargetFeature<"mfocrf","HasMFOCRF", "true", "Enable the MFOCRF instruction">; def FeatureFSqrt : SubtargetFeature<"fsqrt","HasFSQRT", "true", "Enable the fsqrt instruction">; +def FeatureFRE : SubtargetFeature<"fre", "HasFRE", "true", + "Enable the fre instruction">; +def FeatureFRES : SubtargetFeature<"fres", "HasFRES", "true", + "Enable the fres instruction">; +def FeatureFRSQRTE : SubtargetFeature<"frsqrte", "HasFRSQRTE", "true", + "Enable the frsqrte instruction">; +def FeatureFRSQRTES : SubtargetFeature<"frsqrtes", "HasFRSQRTES", "true", + "Enable the frsqrtes instruction">; +def FeatureRecipPrec : SubtargetFeature<"recipprec", "HasRecipPrec", "true", + "Assume higher precision reciprocal estimates">; def FeatureSTFIWX : SubtargetFeature<"stfiwx","HasSTFIWX", "true", "Enable the stfiwx instruction">; +def FeatureLFIWAX : SubtargetFeature<"lfiwax","HasLFIWAX", "true", + "Enable the lfiwax instruction">; +def FeatureFPRND : SubtargetFeature<"fprnd", "HasFPRND", "true", + "Enable the fri[mnpz] instructions">; +def FeatureFPCVT : SubtargetFeature<"fpcvt", "HasFPCVT", "true", + "Enable fc[ft]* (unsigned and single-precision) and lfiwzx instructions">; def FeatureISEL : SubtargetFeature<"isel","HasISEL", "true", "Enable the isel instruction">; +def FeaturePOPCNTD : SubtargetFeature<"popcntd","HasPOPCNTD", "true", + "Enable the popcnt[dw] instructions">; +def FeatureLDBRX : SubtargetFeature<"ldbrx","HasLDBRX", "true", + "Enable the ldbrx instruction">; def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true", "Enable Book E instructions">; def FeatureQPX : SubtargetFeature<"qpx","HasQPX", "true", @@ -71,19 +91,47 @@ def FeatureQPX : SubtargetFeature<"qpx","HasQPX", "true", // // CMPB p6, p6x, p7 cmpb // DFP p6, p6x, p7 decimal floating-point instructions -// FLT_CVT p7 fcfids, fcfidu, fcfidus, fcfiduz, fctiwuz -// FPRND p5x, p6, p6x, p7 frim, frin, frip, friz -// FRE p5 through p7 fre (vs. fres, available since p3) -// FRSQRTES p5 through p7 frsqrtes (vs. frsqrte, available since p3) -// LDBRX p7 load with byte reversal -// LFIWAX p6, p6x, p7 lfiwax -// LFIWZX p7 lfiwzx // POPCNTB p5 through p7 popcntb and related instructions -// POPCNTD p7 popcntd and related instructions -// RECIP_PREC p6, p6x, p7 higher precision reciprocal estimates // VSX p7 vector-scalar instruction set //===----------------------------------------------------------------------===// +// Classes used for relation maps. +//===----------------------------------------------------------------------===// +// RecFormRel - Filter class used to relate non-record-form instructions with +// their record-form variants. +class RecFormRel; + +//===----------------------------------------------------------------------===// +// Relation Map Definitions. +//===----------------------------------------------------------------------===// + +def getRecordFormOpcode : InstrMapping { + let FilterClass = "RecFormRel"; + // Instructions with the same BaseName and Interpretation64Bit values + // form a row. + let RowFields = ["BaseName", "Interpretation64Bit"]; + // Instructions with the same RC value form a column. + let ColFields = ["RC"]; + // The key column are the non-record-form instructions. + let KeyCol = ["0"]; + // Value columns RC=1 + let ValueCols = [["1"]]; +} + +def getNonRecordFormOpcode : InstrMapping { + let FilterClass = "RecFormRel"; + // Instructions with the same BaseName and Interpretation64Bit values + // form a row. + let RowFields = ["BaseName", "Interpretation64Bit"]; + // Instructions with the same RC value form a column. + let ColFields = ["RC"]; + // The key column are the record-form instructions. + let KeyCol = ["1"]; + // Value columns are RC=0 + let ValueCols = [["0"]]; +} + +//===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// @@ -97,30 +145,46 @@ include "PPCInstrInfo.td" def : Processor<"generic", G3Itineraries, [Directive32]>; def : Processor<"440", PPC440Itineraries, [Directive440, FeatureISEL, + FeatureFRES, FeatureFRSQRTE, FeatureBookE]>; def : Processor<"450", PPC440Itineraries, [Directive440, FeatureISEL, + FeatureFRES, FeatureFRSQRTE, FeatureBookE]>; def : Processor<"601", G3Itineraries, [Directive601]>; def : Processor<"602", G3Itineraries, [Directive602]>; -def : Processor<"603", G3Itineraries, [Directive603]>; -def : Processor<"603e", G3Itineraries, [Directive603]>; -def : Processor<"603ev", G3Itineraries, [Directive603]>; -def : Processor<"604", G3Itineraries, [Directive604]>; -def : Processor<"604e", G3Itineraries, [Directive604]>; -def : Processor<"620", G3Itineraries, [Directive620]>; -def : Processor<"750", G4Itineraries, [Directive750]>; -def : Processor<"g3", G3Itineraries, [Directive750]>; -def : Processor<"7400", G4Itineraries, [Directive7400, FeatureAltivec]>; -def : Processor<"g4", G4Itineraries, [Directive7400, FeatureAltivec]>; -def : Processor<"7450", G4PlusItineraries, [Directive7400, FeatureAltivec]>; -def : Processor<"g4+", G4PlusItineraries, [Directive7400, FeatureAltivec]>; -def : Processor<"970", G5Itineraries, +def : Processor<"603", G3Itineraries, [Directive603, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"603e", G3Itineraries, [Directive603, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"603ev", G3Itineraries, [Directive603, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"604", G3Itineraries, [Directive604, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"604e", G3Itineraries, [Directive604, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"620", G3Itineraries, [Directive620, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"750", G4Itineraries, [Directive750, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"g3", G3Itineraries, [Directive750, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"7400", G4Itineraries, [Directive7400, FeatureAltivec, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"g4", G4Itineraries, [Directive7400, FeatureAltivec, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"7450", G4PlusItineraries, [Directive7400, FeatureAltivec, + FeatureFRES, FeatureFRSQRTE]>; +def : Processor<"g4+", G4PlusItineraries, [Directive7400, FeatureAltivec, + FeatureFRES, FeatureFRSQRTE]>; +def : ProcessorModel<"970", G5Model, [Directive970, FeatureAltivec, - FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, + FeatureMFOCRF, FeatureFSqrt, + FeatureFRES, FeatureFRSQRTE, FeatureSTFIWX, Feature64Bit /*, Feature64BitRegs */]>; -def : Processor<"g5", G5Itineraries, +def : ProcessorModel<"g5", G5Model, [Directive970, FeatureAltivec, FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, + FeatureFRES, FeatureFRSQRTE, Feature64Bit /*, Feature64BitRegs */]>; def : ProcessorModel<"e500mc", PPCE500mcModel, [DirectiveE500mc, FeatureMFOCRF, @@ -128,46 +192,67 @@ def : ProcessorModel<"e500mc", PPCE500mcModel, def : ProcessorModel<"e5500", PPCE5500Model, [DirectiveE5500, FeatureMFOCRF, Feature64Bit, FeatureSTFIWX, FeatureBookE, FeatureISEL]>; -def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE, - FeatureMFOCRF, FeatureFSqrt, - FeatureSTFIWX, FeatureISEL, - Feature64Bit - /*, Feature64BitRegs */]>; -def : Processor<"a2q", PPCA2Itineraries, [DirectiveA2, FeatureBookE, - FeatureMFOCRF, FeatureFSqrt, - FeatureSTFIWX, FeatureISEL, - Feature64Bit /*, Feature64BitRegs */, - FeatureQPX]>; -def : Processor<"pwr3", G5Itineraries, - [DirectivePwr3, FeatureAltivec, FeatureMFOCRF, +def : ProcessorModel<"a2", PPCA2Model, + [DirectiveA2, FeatureBookE, FeatureMFOCRF, + FeatureFSqrt, FeatureFRE, FeatureFRES, + FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec, + FeatureSTFIWX, FeatureLFIWAX, + FeatureFPRND, FeatureFPCVT, FeatureISEL, + FeaturePOPCNTD, FeatureLDBRX, Feature64Bit + /*, Feature64BitRegs */]>; +def : ProcessorModel<"a2q", PPCA2Model, + [DirectiveA2, FeatureBookE, FeatureMFOCRF, + FeatureFSqrt, FeatureFRE, FeatureFRES, + FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec, + FeatureSTFIWX, FeatureLFIWAX, + FeatureFPRND, FeatureFPCVT, FeatureISEL, + FeaturePOPCNTD, FeatureLDBRX, Feature64Bit + /*, Feature64BitRegs */, FeatureQPX]>; +def : ProcessorModel<"pwr3", G5Model, + [DirectivePwr3, FeatureAltivec, + FeatureFRES, FeatureFRSQRTE, FeatureMFOCRF, FeatureSTFIWX, Feature64Bit]>; -def : Processor<"pwr4", G5Itineraries, +def : ProcessorModel<"pwr4", G5Model, [DirectivePwr4, FeatureAltivec, FeatureMFOCRF, - FeatureFSqrt, FeatureSTFIWX, Feature64Bit]>; -def : Processor<"pwr5", G5Itineraries, + FeatureFSqrt, FeatureFRES, FeatureFRSQRTE, + FeatureSTFIWX, Feature64Bit]>; +def : ProcessorModel<"pwr5", G5Model, [DirectivePwr5, FeatureAltivec, FeatureMFOCRF, - FeatureFSqrt, FeatureSTFIWX, Feature64Bit]>; -def : Processor<"pwr5x", G5Itineraries, + FeatureFSqrt, FeatureFRE, FeatureFRES, + FeatureFRSQRTE, FeatureFRSQRTES, + FeatureSTFIWX, Feature64Bit]>; +def : ProcessorModel<"pwr5x", G5Model, [DirectivePwr5x, FeatureAltivec, FeatureMFOCRF, - FeatureFSqrt, FeatureSTFIWX, Feature64Bit]>; -def : Processor<"pwr6", G5Itineraries, + FeatureFSqrt, FeatureFRE, FeatureFRES, + FeatureFRSQRTE, FeatureFRSQRTES, + FeatureSTFIWX, FeatureFPRND, Feature64Bit]>; +def : ProcessorModel<"pwr6", G5Model, [DirectivePwr6, FeatureAltivec, - FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, - Feature64Bit /*, Feature64BitRegs */]>; -def : Processor<"pwr6x", G5Itineraries, + FeatureMFOCRF, FeatureFSqrt, FeatureFRE, + FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES, + FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX, + FeatureFPRND, Feature64Bit /*, Feature64BitRegs */]>; +def : ProcessorModel<"pwr6x", G5Model, [DirectivePwr5x, FeatureAltivec, FeatureMFOCRF, - FeatureFSqrt, FeatureSTFIWX, Feature64Bit]>; -def : Processor<"pwr7", G5Itineraries, + FeatureFSqrt, FeatureFRE, FeatureFRES, + FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec, + FeatureSTFIWX, FeatureLFIWAX, + FeatureFPRND, Feature64Bit]>; +def : ProcessorModel<"pwr7", G5Model, [DirectivePwr7, FeatureAltivec, - FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, - FeatureISEL, Feature64Bit /*, Feature64BitRegs */]>; + FeatureMFOCRF, FeatureFSqrt, FeatureFRE, + FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES, + FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX, + FeatureFPRND, FeatureFPCVT, FeatureISEL, + FeaturePOPCNTD, FeatureLDBRX, + Feature64Bit /*, Feature64BitRegs */]>; def : Processor<"ppc", G3Itineraries, [Directive32]>; -def : Processor<"ppc64", G5Itineraries, +def : ProcessorModel<"ppc64", G5Model, [Directive64, FeatureAltivec, - FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, + FeatureMFOCRF, FeatureFSqrt, FeatureFRES, + FeatureFRSQRTE, FeatureSTFIWX, Feature64Bit /*, Feature64BitRegs */]>; - //===----------------------------------------------------------------------===// // Calling Conventions //===----------------------------------------------------------------------===// diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index eae9b7b..3c7cc4e 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -370,7 +370,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCSymbol *PICBase = MF->getPICBaseSymbol(); // Emit the 'bl'. - OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL_Darwin) // Darwin vs SVR4 doesn't matter here. + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL) // FIXME: We would like an efficient form for this, so we don't have to do // a lot of extra uniquing. .addExpr(MCSymbolRefExpr::Create(PICBase, OutContext))); @@ -458,11 +458,10 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { // Transform %Xd = LDtocL <ga:@sym>, %Xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); - // Change the opcode to LDrs, which is a form of LD with the offset - // specified by a SymbolLo. If the global address is external, has + // Change the opcode to LD. If the global address is external, has // common linkage, or is a jump table address, then reference the // associated TOC entry. Otherwise reference the symbol directly. - TmpInst.setOpcode(PPC::LDrs); + TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); assert((MO.isGlobal() || MO.isJTI() || MO.isCPI()) && "Invalid operand for LDtocL!"); @@ -496,10 +495,10 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { // Transform %Xd = ADDItocL %Xs, <ga:@sym> LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); - // Change the opcode to ADDI8L. If the global address is external, then + // Change the opcode to ADDI8. If the global address is external, then // generate a TOC entry and reference that. Otherwise reference the // symbol directly. - TmpInst.setOpcode(PPC::ADDI8L); + TmpInst.setOpcode(PPC::ADDI8); const MachineOperand &MO = MI->getOperand(2); assert((MO.isGlobal() || MO.isCPI()) && "Invalid operand for ADDItocL"); MCSymbol *MOSymbol = 0; @@ -548,9 +547,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { // Transform %Xd = LDgotTprelL <ga:@sym>, %Xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); - // Change the opcode to LDrs, which is a form of LD with the offset - // specified by a SymbolLo. - TmpInst.setOpcode(PPC::LDrs); + // Change the opcode to LD. + TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = Mang->getSymbol(GValue); @@ -579,7 +577,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::ADDItlsgdL: { // Transform: %Xd = ADDItlsgdL %Xs, <ga:@sym> - // Into: %Xd = ADDI8L %Xs, sym@got@tlsgd@l + // Into: %Xd = ADDI8 %Xs, sym@got@tlsgd@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -587,7 +585,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymGotTlsGD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD16_LO, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8L) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymGotTlsGD)); @@ -595,7 +593,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::GETtlsADDR: { // Transform: %X3 = GETtlsADDR %X3, <ga:@sym> - // Into: BL8_NOP_ELF_TLSGD __tls_get_addr(sym@tlsgd) + // Into: BL8_NOP_TLSGD __tls_get_addr(sym@tlsgd) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; @@ -608,7 +606,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_ELF_TLSGD) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSGD) .addExpr(TlsRef) .addExpr(SymVar)); return; @@ -631,7 +629,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::ADDItlsldL: { // Transform: %Xd = ADDItlsldL %Xs, <ga:@sym> - // Into: %Xd = ADDI8L %Xs, sym@got@tlsld@l + // Into: %Xd = ADDI8 %Xs, sym@got@tlsld@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -639,7 +637,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymGotTlsLD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD16_LO, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8L) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymGotTlsLD)); @@ -647,7 +645,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::GETtlsldADDR: { // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym> - // Into: BL8_NOP_ELF_TLSLD __tls_get_addr(sym@tlsld) + // Into: BL8_NOP_TLSLD __tls_get_addr(sym@tlsld) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; @@ -660,7 +658,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_ELF_TLSLD) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLSLD) .addExpr(TlsRef) .addExpr(SymVar)); return; @@ -683,7 +681,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::ADDIdtprelL: { // Transform: %Xd = ADDIdtprelL %Xs, <ga:@sym> - // Into: %Xd = ADDI8L %Xs, sym@dtprel@l + // Into: %Xd = ADDI8 %Xs, sym@dtprel@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -691,7 +689,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCExpr *SymDtprel = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL16_LO, OutContext); - OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8L) + OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymDtprel)); @@ -723,7 +721,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { return AsmPrinter::EmitFunctionEntryLabel(); // Emit an official procedure descriptor. - const MCSection *Current = OutStreamer.getCurrentSection(); + MCSectionSubPair Current = OutStreamer.getCurrentSection(); const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".opd", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC, SectionKind::getReadOnly()); @@ -743,7 +741,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { 8/*size*/); // Emit a null environment pointer. OutStreamer.EmitIntValue(0, 8 /* size */); - OutStreamer.SwitchSection(Current); + OutStreamer.SwitchSection(Current.first, Current.second); MCSymbol *RealFnSym = OutContext.GetOrCreateSymbol( ".L." + Twine(CurrentFnSym->getName())); @@ -911,18 +909,19 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { OutStreamer.EmitLabel(Stub); OutStreamer.EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol); + const MCExpr *Anon = MCSymbolRefExpr::Create(AnonSymbol, OutContext); + // mflr r0 OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R0)); - // FIXME: MCize this. - OutStreamer.EmitRawText("\tbcl 20, 31, " + Twine(AnonSymbol->getName())); + // bcl 20, 31, AnonSymbol + OutStreamer.EmitInstruction(MCInstBuilder(PPC::BCLalways).addExpr(Anon)); OutStreamer.EmitLabel(AnonSymbol); // mflr r11 OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R11)); // addis r11, r11, ha16(LazyPtr - AnonSymbol) const MCExpr *Sub = MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(LazyPtr, OutContext), - MCSymbolRefExpr::Create(AnonSymbol, OutContext), - OutContext); + Anon, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS) .addReg(PPC::R11) .addReg(PPC::R11) diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index ecece8c..81a54d7 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -396,7 +396,7 @@ CountValue *PPCCTRLoops::getTripCount(MachineLoop *L, // Here we need to look for an immediate load (an li or lis/ori pair). if (DefInstr && (DefInstr->getOpcode() == PPC::ORI8 || DefInstr->getOpcode() == PPC::ORI)) { - int64_t start = (short) DefInstr->getOperand(2).getImm(); + int64_t start = DefInstr->getOperand(2).getImm(); MachineInstr *DefInstr2 = MRI->getVRegDef(DefInstr->getOperand(1).getReg()); if (DefInstr2 && (DefInstr2->getOpcode() == PPC::LIS8 || @@ -685,7 +685,7 @@ bool PPCCTRLoops::convertToCTRLoop(MachineLoop *L) { const TargetRegisterClass *SrcRC = MF->getRegInfo().getRegClass(TripCount->getReg()); CountReg = MF->getRegInfo().createVirtualRegister(RC); - unsigned CopyOp = (isPPC64 && SrcRC == GPRC) ? + unsigned CopyOp = (isPPC64 && GPRC->hasSubClassEq(SrcRC)) ? (unsigned) PPC::EXTSW_32_64 : (unsigned) TargetOpcode::COPY; BuildMI(*Preheader, InsertPos, dl, diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index caeb179..c8a29a3 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -136,3 +136,9 @@ def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, VRSAV F27, F28, F29, F30, F31, CR2, CR3, CR4, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V30, V31)>; + +def CSR_NoRegs : CalleeSavedRegs<(add VRSAVE)>; +def CSR_NoRegs_Darwin : CalleeSavedRegs<(add)>; + +def CSR_NoRegs_Altivec : CalleeSavedRegs<(add (sequence "V%u", 0, 31), VRSAVE)>; + diff --git a/lib/Target/PowerPC/PPCCodeEmitter.cpp b/lib/Target/PowerPC/PPCCodeEmitter.cpp index d68bfd1..6478718 100644 --- a/lib/Target/PowerPC/PPCCodeEmitter.cpp +++ b/lib/Target/PowerPC/PPCCodeEmitter.cpp @@ -142,7 +142,7 @@ unsigned PPCCodeEmitter::get_crbitm_encoding(const MachineInstr &MI, assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MTCRF8 || MI.getOpcode() == PPC::MFOCRF) && (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)); - return 0x80 >> getPPCRegisterNumbering(MO.getReg()); + return 0x80 >> TM.getRegisterInfo()->getEncodingValue(MO.getReg()); } MachineRelocation PPCCodeEmitter::GetRelocation(const MachineOperand &MO, @@ -260,7 +260,7 @@ unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI, assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MTCRF8 && MI.getOpcode() != PPC::MFOCRF) || MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7); - return getPPCRegisterNumbering(MO.getReg()); + return TM.getRegisterInfo()->getEncodingValue(MO.getReg()); } assert(MO.isImm() && diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index 353560d..9ec10f6 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -103,6 +103,7 @@ static void RemoveVRSaveCode(MachineInstr *MI) { // transform this into the appropriate ORI instruction. static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) { MachineFunction *MF = MI->getParent()->getParent(); + const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); DebugLoc dl = MI->getDebugLoc(); unsigned UsedRegMask = 0; @@ -115,7 +116,7 @@ static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) { for (MachineRegisterInfo::livein_iterator I = MF->getRegInfo().livein_begin(), E = MF->getRegInfo().livein_end(); I != E; ++I) { - unsigned RegNo = getPPCRegisterNumbering(I->first); + unsigned RegNo = TRI->getEncodingValue(I->first); if (VRRegNo[RegNo] == I->first) // If this really is a vector reg. UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked. } @@ -131,7 +132,7 @@ static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) { const MachineOperand &MO = Ret.getOperand(I); if (!MO.isReg() || !PPC::VRRCRegClass.contains(MO.getReg())) continue; - unsigned RegNo = getPPCRegisterNumbering(MO.getReg()); + unsigned RegNo = TRI->getEncodingValue(MO.getReg()); UsedRegMask &= ~(1 << (31-RegNo)); } } @@ -188,6 +189,11 @@ static bool spillsCR(const MachineFunction &MF) { return FuncInfo->isCRSpilled(); } +static bool spillsVRSAVE(const MachineFunction &MF) { + const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); + return FuncInfo->isVRSAVESpilled(); +} + static bool hasSpills(const MachineFunction &MF) { const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); return FuncInfo->hasSpills(); @@ -217,9 +223,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF, // If we are a leaf function, and use up to 224 bytes of stack space, // don't have a frame pointer, calls, or dynamic alloca then we do not need - // to adjust the stack pointer (we fit in the Red Zone). For 64-bit - // SVR4, we also require a stack frame if we need to spill the CR, - // since this spill area is addressed relative to the stack pointer. + // to adjust the stack pointer (we fit in the Red Zone). // The 32-bit SVR4 ABI has no Red Zone. However, it can still generate // stackless code if all local vars are reg-allocated. bool DisableRedZone = MF.getFunction()->getAttributes(). @@ -231,9 +235,6 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF, FrameSize <= 224 && // Fits in red zone. !MFI->hasVarSizedObjects() && // No dynamic alloca. !MFI->adjustsStack() && // No calls. - !(Subtarget.isPPC64() && // No 64-bit SVR4 CRsave. - Subtarget.isSVR4ABI() - && spillsCR(MF)) && (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment. // No need for frame if (UpdateMF) @@ -299,6 +300,31 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { MF.getInfo<PPCFunctionInfo>()->hasFastCall()); } +void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const { + bool is31 = needsFP(MF); + unsigned FPReg = is31 ? PPC::R31 : PPC::R1; + unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1; + + for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); + BI != BE; ++BI) + for (MachineBasicBlock::iterator MBBI = BI->end(); MBBI != BI->begin(); ) { + --MBBI; + for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) { + MachineOperand &MO = MBBI->getOperand(I); + if (!MO.isReg()) + continue; + + switch (MO.getReg()) { + case PPC::FP: + MO.setReg(FPReg); + break; + case PPC::FP8: + MO.setReg(FP8Reg); + break; + } + } + } +} void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB @@ -332,6 +358,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { unsigned FrameSize = determineFrameLayout(MF); int NegFrameSize = -FrameSize; + if (MFI->isFrameAddressTaken()) + replaceFPWithRealFP(MF); + // Get processor type. bool isPPC64 = Subtarget.isPPC64(); // Get operating system @@ -339,6 +368,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // Check if the link register (LR) must be saved. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); bool MustSaveLR = FI->mustSaveLR(); + const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs(); // Do we have a frame pointer for this function? bool HasFP = hasFP(MF); @@ -360,6 +390,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR8), PPC::X0); + if (!MustSaveCRs.empty()) { + MachineInstrBuilder MIB = + BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), PPC::X12); + for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i) + MIB.addReg(MustSaveCRs[i], RegState::ImplicitKill); + } + if (HasFP) BuildMI(MBB, MBBI, dl, TII.get(PPC::STD)) .addReg(PPC::X31) @@ -371,6 +408,12 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { .addReg(PPC::X0) .addImm(LROffset / 4) .addReg(PPC::X1); + + if (!MustSaveCRs.empty()) + BuildMI(MBB, MBBI, dl, TII.get(PPC::STW8)) + .addReg(PPC::X12, getKillRegState(true)) + .addImm(8) + .addReg(PPC::X1); } else { if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR), PPC::R0); @@ -383,6 +426,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { .addImm(FPOffset) .addReg(PPC::R1); + assert(MustSaveCRs.empty() && + "Prologue CR saving supported only in 64-bit mode"); + if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::STW)) .addReg(PPC::R0) @@ -546,7 +592,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // spilled CRs. if (Subtarget.isSVR4ABI() && (PPC::CR2 <= Reg && Reg <= PPC::CR4) - && !spillsCR(MF)) + && MustSaveCRs.empty()) continue; // For 64-bit SVR4 when we have spilled CRs, the spill location @@ -602,6 +648,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, // Check if the link register (LR) has been saved. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); bool MustSaveLR = FI->mustSaveLR(); + const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs(); // Do we have a frame pointer for this function? bool HasFP = hasFP(MF); @@ -702,10 +749,19 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X0) .addImm(LROffset/4).addReg(PPC::X1); + if (!MustSaveCRs.empty()) + BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ8), PPC::X12) + .addImm(8).addReg(PPC::X1); + if (HasFP) BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31) .addImm(FPOffset/4).addReg(PPC::X1); + if (!MustSaveCRs.empty()) + for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i) + BuildMI(MBB, MBBI, dl, TII.get(PPC::MTCRF8), MustSaveCRs[i]) + .addReg(PPC::X12, getKillRegState(i == e-1)); + if (MustSaveLR) BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR8)).addReg(PPC::X0); } else { @@ -713,6 +769,9 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R0) .addImm(LROffset).addReg(PPC::R1); + assert(MustSaveCRs.empty() && + "Epilogue CR restoring supported only in 64-bit mode"); + if (HasFP) BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R31) .addImm(FPOffset).addReg(PPC::R1); @@ -917,6 +976,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, } PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>(); + const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); int64_t LowerBound = 0; @@ -936,7 +996,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); } - LowerBound -= (31 - getPPCRegisterNumbering(MinFPR) + 1) * 8; + LowerBound -= (31 - TRI->getEncodingValue(MinFPR) + 1) * 8; } // Check whether the frame pointer register is allocated. If so, make sure it @@ -970,8 +1030,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, } unsigned MinReg = - std::min<unsigned>(getPPCRegisterNumbering(MinGPR), - getPPCRegisterNumbering(MinG8R)); + std::min<unsigned>(TRI->getEncodingValue(MinGPR), + TRI->getEncodingValue(MinG8R)); if (Subtarget.isPPC64()) { LowerBound -= (31 - MinReg + 1) * 8; @@ -1053,14 +1113,21 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF, // needed alignment padding. unsigned StackSize = determineFrameLayout(MF, false, true); MachineFrameInfo *MFI = MF.getFrameInfo(); - if (MFI->hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) || - (hasSpills(MF) && !isInt<16>(StackSize))) { + if (MFI->hasVarSizedObjects() || spillsCR(MF) || spillsVRSAVE(MF) || + hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize))) { const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC; - RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); + + // These kinds of spills might need two registers. + if (spillsCR(MF) || spillsVRSAVE(MF)) + RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RC->getAlignment(), + false)); + } } @@ -1080,44 +1147,41 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, *static_cast<const PPCInstrInfo*>(MF->getTarget().getInstrInfo()); DebugLoc DL; bool CRSpilled = false; + MachineInstrBuilder CRMIB; for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); // CR2 through CR4 are the nonvolatile CR fields. bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4; - if (CRSpilled && IsCRField) - continue; - // Add the callee-saved register as live-in; it's killed at the spill. MBB.addLiveIn(Reg); + if (CRSpilled && IsCRField) { + CRMIB.addReg(Reg, RegState::ImplicitKill); + continue; + } + // Insert the spill to the stack frame. if (IsCRField) { - CRSpilled = true; - // The first time we see a CR field, store the whole CR into the - // save slot via GPR12 (available in the prolog for 32- and 64-bit). + PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>(); if (Subtarget.isPPC64()) { - // 64-bit: SP+8 - MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::X12)); - MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::STW)) - .addReg(PPC::X12, - getKillRegState(true)) - .addImm(8) - .addReg(PPC::X1)); + // The actual spill will happen at the start of the prologue. + FuncInfo->addMustSaveCR(Reg); } else { + CRSpilled = true; + // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have // the same frame index in PPCRegisterInfo::hasReservedSpillSlot. - MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)); + CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12) + .addReg(Reg, RegState::ImplicitKill); + + MBB.insert(MI, CRMIB); MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW)) .addReg(PPC::R12, getKillRegState(true)), CSI[i].getFrameIdx())); } - - // Record that we spill the CR in this function. - PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>(); - FuncInfo->setSpillsCR(); } else { const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); TII.storeRegToStackSlot(MBB, MI, Reg, true, @@ -1128,7 +1192,8 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, } static void -restoreCRs(bool isPPC64, bool CR2Spilled, bool CR3Spilled, bool CR4Spilled, +restoreCRs(bool isPPC64, bool is31, + bool CR2Spilled, bool CR3Spilled, bool CR4Spilled, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, unsigned CSIIndex) { @@ -1138,14 +1203,10 @@ restoreCRs(bool isPPC64, bool CR2Spilled, bool CR3Spilled, bool CR4Spilled, DebugLoc DL; unsigned RestoreOp, MoveReg; - if (isPPC64) { - // 64-bit: SP+8 - MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::LWZ), PPC::X12) - .addImm(8) - .addReg(PPC::X1)); - RestoreOp = PPC::MTCRF8; - MoveReg = PPC::X12; - } else { + if (isPPC64) + // This is handled during epilogue generation. + return; + else { // 32-bit: FP-relative MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), PPC::R12), @@ -1156,15 +1217,15 @@ restoreCRs(bool isPPC64, bool CR2Spilled, bool CR3Spilled, bool CR4Spilled, if (CR2Spilled) MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2) - .addReg(MoveReg)); + .addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled))); if (CR3Spilled) MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3) - .addReg(MoveReg)); + .addReg(MoveReg, getKillRegState(!CR4Spilled))); if (CR4Spilled) MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4) - .addReg(MoveReg)); + .addReg(MoveReg, getKillRegState(true))); } void PPCFrameLowering:: @@ -1255,7 +1316,9 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, // least one CR register, restore all spilled CRs together. if ((CR2Spilled || CR3Spilled || CR4Spilled) && !(PPC::CR2 <= Reg && Reg <= PPC::CR4)) { - restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled, + bool is31 = needsFP(*MF); + restoreCRs(Subtarget.isPPC64(), is31, + CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex); CR2Spilled = CR3Spilled = CR4Spilled = false; } @@ -1278,9 +1341,11 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, } // If we haven't yet spilled the CRs, do so now. - if (CR2Spilled || CR3Spilled || CR4Spilled) - restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled, + if (CR2Spilled || CR3Spilled || CR4Spilled) { + bool is31 = needsFP(*MF); + restoreCRs(Subtarget.isPPC64(), is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex); + } return true; } diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h index 53ee326..6f5f936 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.h +++ b/lib/Target/PowerPC/PPCFrameLowering.h @@ -43,6 +43,7 @@ public: bool hasFP(const MachineFunction &MF) const; bool needsFP(const MachineFunction &MF) const; + void replaceFPWithRealFP(MachineFunction &MF) const; void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS = NULL) const; diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp index 6ed1fb9..4bf1e33 100644 --- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp +++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp @@ -179,7 +179,7 @@ getHazardType(SUnit *SU, int Stalls) { } // Do not allow MTCTR and BCTRL to be in the same dispatch group. - if (HasCTRSet && (Opcode == PPC::BCTRL_Darwin || Opcode == PPC::BCTRL_SVR4)) + if (HasCTRSet && Opcode == PPC::BCTRL) return NoopHazard; // If this is a load following a store, make sure it's not to the same or diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 17bea8a..aed0fbb 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -120,10 +120,10 @@ namespace { } /// SelectAddrImmOffs - Return true if the operand is valid for a preinc - /// immediate field. Because preinc imms have already been validated, just - /// accept it. + /// immediate field. Note that the operand at this point is already the + /// result of a prior SelectAddressRegImm call. bool SelectAddrImmOffs(SDValue N, SDValue &Out) const { - if (isa<ConstantSDNode>(N) || N.getOpcode() == PPCISD::Lo || + if (N.getOpcode() == ISD::TargetConstant || N.getOpcode() == ISD::TargetGlobalAddress) { Out = N; return true; @@ -132,18 +132,6 @@ namespace { return false; } - /// SelectAddrIdxOffs - Return true if the operand is valid for a preinc - /// index field. Because preinc imms have already been validated, just - /// accept it. - bool SelectAddrIdxOffs(SDValue N, SDValue &Out) const { - if (isa<ConstantSDNode>(N) || N.getOpcode() == PPCISD::Lo || - N.getOpcode() == ISD::TargetGlobalAddress) - return false; - - Out = N; - return true; - } - /// SelectAddrIdx - Given the specified addressed, check to see if it can be /// represented as an indexed [r+r] operation. Returns false if it can /// be represented by [r+imm], which are preferred. @@ -164,6 +152,12 @@ namespace { return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG); } + // Select an address into a single register. + bool SelectAddr(SDValue N, SDValue &Base) { + Base = N; + return true; + } + /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. It is always correct to compute the value into /// a register. The case of adding a (possibly relocatable) constant to a @@ -463,7 +457,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { SH &= 31; SDValue Ops[] = { Op0, Op1, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; - return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5); + return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops); } } return 0; @@ -786,7 +780,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { } case ISD::SETGT: { SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; - Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), + Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1)); @@ -879,7 +873,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { // Get the specified bit. SDValue Tmp = - SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0); + SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); if (Inv) { assert(OtherCondIdx == -1 && "Can't have split plus negation"); return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1)); @@ -891,7 +885,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { // Get the other bit of the comparison. Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31); SDValue OtherCond = - SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0); + SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond); } @@ -1050,7 +1044,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { break; SDValue Offset = LD->getOffset(); - if (isa<ConstantSDNode>(Offset) || + if (Offset.getOpcode() == ISD::TargetConstant || Offset.getOpcode() == ISD::TargetGlobalAddress) { unsigned Opcode; @@ -1085,7 +1079,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { Offset, Base, Chain }; return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0), PPCLowering.getPointerTy(), - MVT::Other, Ops, 3); + MVT::Other, Ops); } else { unsigned Opcode; bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD; @@ -1117,10 +1111,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); - SDValue Ops[] = { Offset, Base, Chain }; + SDValue Ops[] = { Base, Offset, Chain }; return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0), PPCLowering.getPointerTy(), - MVT::Other, Ops, 3); + MVT::Other, Ops); } } @@ -1169,7 +1163,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { N->getOperand(0).getOperand(0), N->getOperand(0).getOperand(1), getI32Imm(0), getI32Imm(MB),getI32Imm(ME) }; - return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5); + return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops); } } @@ -1483,8 +1477,7 @@ void PPCDAGToDAGISel::PostprocessISelDAG() { default: continue; case PPC::ADDI8: - case PPC::ADDI8L: - case PPC::ADDIL: + case PPC::ADDI: // In some cases (such as TLS) the relocation information // is already in place on the operand, so copying the operand // is sufficient. diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 741e25e..3fcafdc 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -70,6 +70,8 @@ static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); + PPCRegInfo = TM.getRegisterInfo(); + PPCII = TM.getInstrInfo(); setPow2DivIsCheap(); @@ -115,6 +117,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); + setOperationAction(ISD::FREM, MVT::ppcf128, Expand); // PowerPC has no SREM/UREM instructions setOperationAction(ISD::SREM, MVT::i32, Expand); @@ -149,26 +152,58 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); // If we're enabling GP optimizations, use hardware square root - if (!Subtarget->hasFSQRT()) { + if (!Subtarget->hasFSQRT() && + !(TM.Options.UnsafeFPMath && + Subtarget->hasFRSQRTE() && Subtarget->hasFRE())) setOperationAction(ISD::FSQRT, MVT::f64, Expand); + + if (!Subtarget->hasFSQRT() && + !(TM.Options.UnsafeFPMath && + Subtarget->hasFRSQRTES() && Subtarget->hasFRES())) setOperationAction(ISD::FSQRT, MVT::f32, Expand); - } setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); + if (Subtarget->hasFPRND()) { + setOperationAction(ISD::FFLOOR, MVT::f64, Legal); + setOperationAction(ISD::FCEIL, MVT::f64, Legal); + setOperationAction(ISD::FTRUNC, MVT::f64, Legal); + + setOperationAction(ISD::FFLOOR, MVT::f32, Legal); + setOperationAction(ISD::FCEIL, MVT::f32, Legal); + setOperationAction(ISD::FTRUNC, MVT::f32, Legal); + + // frin does not implement "ties to even." Thus, this is safe only in + // fast-math mode. + if (TM.Options.UnsafeFPMath) { + setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); + setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); + + // These need to set FE_INEXACT, and use a custom inserter. + setOperationAction(ISD::FRINT, MVT::f64, Legal); + setOperationAction(ISD::FRINT, MVT::f32, Legal); + } + } + // PowerPC does not have BSWAP, CTPOP or CTTZ setOperationAction(ISD::BSWAP, MVT::i32 , Expand); - setOperationAction(ISD::CTPOP, MVT::i32 , Expand); setOperationAction(ISD::CTTZ , MVT::i32 , Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); setOperationAction(ISD::BSWAP, MVT::i64 , Expand); - setOperationAction(ISD::CTPOP, MVT::i64 , Expand); setOperationAction(ISD::CTTZ , MVT::i64 , Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); + if (Subtarget->hasPOPCNTD()) { + setOperationAction(ISD::CTPOP, MVT::i32 , Legal); + setOperationAction(ISD::CTPOP, MVT::i64 , Legal); + } else { + setOperationAction(ISD::CTPOP, MVT::i32 , Expand); + setOperationAction(ISD::CTPOP, MVT::i64 , Expand); + } + // PowerPC does not have ROTR setOperationAction(ISD::ROTR, MVT::i32 , Expand); setOperationAction(ISD::ROTR, MVT::i64 , Expand); @@ -211,6 +246,14 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); + // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support + // SjLj exception handling but a light-weight setjmp/longjmp replacement to + // support continuation, user-level threading, and etc.. As a result, no + // other SjLj exception interfaces are implemented and please don't build + // your own exception handling based on them. + // LLVM/Clang supports zero-cost DWARF exception handling. + setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); + setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); // We want to legalize GlobalAddress and ConstantPool nodes into the // appropriate instructions to materialize the address. @@ -290,15 +333,28 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) // We cannot do this with Promote because i64 is not a legal type. setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - // FIXME: disable this lowered code. This generates 64-bit register values, - // and we don't model the fact that the top part is clobbered by calls. We - // need to flag these together so that the value isn't live across a call. - //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); + if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64()) + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); } else { // PowerPC does not have FP_TO_UINT on 32-bit implementations. setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); } + // With the instructions enabled under FPCVT, we can do everything. + if (PPCSubTarget.hasFPCVT()) { + if (Subtarget->has64BitSupport()) { + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); + } + + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); + } + if (Subtarget->use64BitRegs()) { // 64-bit PowerPC implementations can support i64 types directly addRegisterClass(MVT::i64, &PPC::G8RCRegClass); @@ -420,6 +476,12 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::MUL, MVT::v4f32, Legal); setOperationAction(ISD::FMA, MVT::v4f32, Legal); + + if (TM.Options.UnsafeFPMath) { + setOperationAction(ISD::FDIV, MVT::v4f32, Legal); + setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); + } + setOperationAction(ISD::MUL, MVT::v4i32, Custom); setOperationAction(ISD::MUL, MVT::v8i16, Custom); setOperationAction(ISD::MUL, MVT::v16i8, Custom); @@ -452,7 +514,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); setBooleanContents(ZeroOrOneBooleanContent); - setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? + // Altivec instructions set fields to all zeros or all ones. + setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); if (isPPC64) { setStackPointerRegisterToSaveRestore(PPC::X1); @@ -470,6 +533,12 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setTargetDAGCombine(ISD::BR_CC); setTargetDAGCombine(ISD::BSWAP); + // Use reciprocal estimates. + if (TM.Options.UnsafeFPMath) { + setTargetDAGCombine(ISD::FDIV); + setTargetDAGCombine(ISD::FSQRT); + } + // Darwin long double math library functions have $LDBL128 appended. if (Subtarget->isDarwin()) { setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); @@ -511,7 +580,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) MaxStoresPerMemmoveOptSize = 8; setPrefFunctionAlignment(4); - BenefitFromCodePlacementOpt = true; } } @@ -542,6 +610,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::FCFID: return "PPCISD::FCFID"; case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; + case PPCISD::FRE: return "PPCISD::FRE"; + case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; case PPCISD::STFIWX: return "PPCISD::STFIWX"; case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; @@ -557,16 +627,13 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::SRL: return "PPCISD::SRL"; case PPCISD::SRA: return "PPCISD::SRA"; case PPCISD::SHL: return "PPCISD::SHL"; - case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; - case PPCISD::STD_32: return "PPCISD::STD_32"; - case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4"; - case PPCISD::CALL_NOP_SVR4: return "PPCISD::CALL_NOP_SVR4"; - case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin"; - case PPCISD::NOP: return "PPCISD::NOP"; + case PPCISD::CALL: return "PPCISD::CALL"; + case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; case PPCISD::MTCTR: return "PPCISD::MTCTR"; - case PPCISD::BCTRL_Darwin: return "PPCISD::BCTRL_Darwin"; - case PPCISD::BCTRL_SVR4: return "PPCISD::BCTRL_SVR4"; + case PPCISD::BCTRL: return "PPCISD::BCTRL"; case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; + case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; + case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; case PPCISD::MFCR: return "PPCISD::MFCR"; case PPCISD::VCMP: return "PPCISD::VCMP"; case PPCISD::VCMPo: return "PPCISD::VCMPo"; @@ -576,10 +643,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::STCX: return "PPCISD::STCX"; case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; case PPCISD::MFFS: return "PPCISD::MFFS"; - case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; - case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; - case PPCISD::MTFSF: return "PPCISD::MTFSF"; case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; case PPCISD::CR6SET: return "PPCISD::CR6SET"; case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; @@ -1031,7 +1095,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, short Imm; if (isIntS16Immediate(CN, Imm)) { Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); - Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, + Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, CN->getValueType(0)); return true; } @@ -1080,7 +1144,7 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, } // Otherwise, do it the hard way, using R0 as the base register. - Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, + Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, N.getValueType()); Index = N; return true; @@ -1143,7 +1207,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, short Imm; if (isIntS16Immediate(CN, Imm)) { Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); - Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, + Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, CN->getValueType(0)); return true; } @@ -1181,6 +1245,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, SelectionDAG &DAG) const { if (DisablePPCPreinc) return false; + bool isLoad = true; SDValue Ptr; EVT VT; unsigned Alignment; @@ -1192,6 +1257,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, Ptr = ST->getBasePtr(); VT = ST->getMemoryVT(); Alignment = ST->getAlignment(); + isLoad = false; } else return false; @@ -1199,7 +1265,25 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, if (VT.isVector()) return false; - if (SelectAddressRegReg(Ptr, Offset, Base, DAG)) { + if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { + + // Common code will reject creating a pre-inc form if the base pointer + // is a frame index, or if N is a store and the base pointer is either + // the same as or a predecessor of the value being stored. Check for + // those situations here, and try with swapped Base/Offset instead. + bool Swap = false; + + if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) + Swap = true; + else if (!isLoad) { + SDValue Val = cast<StoreSDNode>(N)->getValue(); + if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) + Swap = true; + } + + if (Swap) + std::swap(Base, Offset); + AM = ISD::PRE_INC; return true; } @@ -3105,7 +3189,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. - unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin; + unsigned CallOpc = PPCISD::CALL; bool needIndirectCall = true; if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { @@ -3238,8 +3322,11 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, NodeTys.push_back(MVT::Other); NodeTys.push_back(MVT::Glue); Ops.push_back(Chain); - CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin; + CallOpc = PPCISD::BCTRL; Callee.setNode(0); + // Add use of X11 (holding environment pointer) + if (isSVR4ABI && isPPC64) + Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); // Add CTR register as callee so a bctr can be emitted later. if (isTailCall) Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); @@ -3378,7 +3465,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool needsTOCRestore = false; if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { - if (CallOpc == PPCISD::BCTRL_SVR4) { + if (CallOpc == PPCISD::BCTRL) { // This is a call through a function pointer. // Restore the caller TOC from the save area into R2. // See PrepareCall() for more information about calls through function @@ -3389,9 +3476,9 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, // from allocating it), resulting in an additional register being // allocated and an unnecessary move instruction being generated. needsTOCRestore = true; - } else if ((CallOpc == PPCISD::CALL_SVR4) && !isLocalCall(Callee)) { + } else if ((CallOpc == PPCISD::CALL) && !isLocalCall(Callee)) { // Otherwise insert NOP for non-local calls. - CallOpc = PPCISD::CALL_NOP_SVR4; + CallOpc = PPCISD::CALL_NOP; } } @@ -4564,6 +4651,21 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); } +SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, + DAG.getVTList(MVT::i32, MVT::Other), + Op.getOperand(0), Op.getOperand(1)); +} + +SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, + Op.getOperand(0), Op.getOperand(1)); +} + /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when /// possible. SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { @@ -4572,10 +4674,14 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { !Op.getOperand(2).getValueType().isFloatingPoint()) return Op; - ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); + // We might be able to do better than this under some circumstances, but in + // general, fsel-based lowering of select is a finite-math-only optimization. + // For more information, see section F.3 of the 2.06 ISA specification. + if (!DAG.getTarget().Options.NoInfsFPMath || + !DAG.getTarget().Options.NoNaNsFPMath) + return Op; - // Cannot handle SETEQ/SETNE. - if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op; + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); EVT ResVT = Op.getValueType(); EVT CmpVT = Op.getOperand(0).getValueType(); @@ -4585,9 +4691,20 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // If the RHS of the comparison is a 0.0, we don't need to do the // subtraction at all. + SDValue Sel1; if (isFloatingPointZero(RHS)) switch (CC) { default: break; // SETUO etc aren't handled by fsel. + case ISD::SETNE: + std::swap(TV, FV); + case ISD::SETEQ: + if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits + LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); + Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); + if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits + Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, + DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); case ISD::SETULT: case ISD::SETLT: std::swap(TV, FV); // fsel is natively setge, swap operands for setlt @@ -4610,30 +4727,41 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Cmp; switch (CC) { default: break; // SETUO etc aren't handled by fsel. + case ISD::SETNE: + std::swap(TV, FV); + case ISD::SETEQ: + Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); + if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits + Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); + Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); + if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits + Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, + DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); case ISD::SETULT: case ISD::SETLT: Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); - return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); case ISD::SETOGE: case ISD::SETGE: Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); - return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); case ISD::SETUGT: case ISD::SETGT: Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); - return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); case ISD::SETOLE: case ISD::SETLE: Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); - return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); + return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); } return Op; } @@ -4651,37 +4779,72 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); case MVT::i32: Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : - PPCISD::FCTIDZ, + (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ : + PPCISD::FCTIDZ), dl, MVT::f64, Src); break; case MVT::i64: - Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src); + assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) && + "i64 FP_TO_UINT is supported only with FPCVT"); + Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : + PPCISD::FCTIDUZ, + dl, MVT::f64, Src); break; } // Convert the FP value to an int value through memory. - SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); + bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() && + (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()); + SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); + int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); + MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); // Emit a store to the stack slot. - SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, - MachinePointerInfo(), false, false, 0); + SDValue Chain; + if (i32Stack) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); + SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; + Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, + DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), + MVT::i32, MMO); + } else + Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, + MPI, false, false, 0); // Result is a load from the stack slot. If loading 4 bytes, make sure to // add in a bias. - if (Op.getValueType() == MVT::i32) + if (Op.getValueType() == MVT::i32 && !i32Stack) { FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, DAG.getConstant(4, FIPtr.getValueType())); - return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(), + MPI = MachinePointerInfo(); + } + + return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, false, false, false, 0); } -SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, +SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { DebugLoc dl = Op.getDebugLoc(); // Don't handle ppc_fp128 here; let it be lowered to a libcall. if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) return SDValue(); + assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) && + "UINT_TO_FP is supported only with FPCVT"); + + // If we have FCFIDS, then use it when converting to single-precision. + // Otherwise, convert to double-precision and then round. + unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? + (Op.getOpcode() == ISD::UINT_TO_FP ? + PPCISD::FCFIDUS : PPCISD::FCFIDS) : + (Op.getOpcode() == ISD::UINT_TO_FP ? + PPCISD::FCFIDU : PPCISD::FCFID); + MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? + MVT::f32 : MVT::f64; + if (Op.getOperand(0).getValueType() == MVT::i64) { SDValue SINT = Op.getOperand(0); // When converting to single-precision, we actually need to convert @@ -4695,6 +4858,7 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, // However, if -enable-unsafe-fp-math is in effect, accept double // rounding to avoid the extra overhead. if (Op.getValueType() == MVT::f32 && + !PPCSubTarget.hasFPCVT() && !DAG.getTarget().Options.UnsafeFPMath) { // Twiddle input to make sure the low 11 bits are zero. (If this @@ -4728,44 +4892,69 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); } + SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); - SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); - if (Op.getValueType() == MVT::f32) + SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); + + if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); return FP; } assert(Op.getOperand(0).getValueType() == MVT::i32 && - "Unhandled SINT_TO_FP type in custom expander!"); + "Unhandled INT_TO_FP type in custom expander!"); // Since we only generate this in 64-bit mode, we can take advantage of // 64-bit registers. In particular, sign extend the input value into the // 64-bit register with extsw, store the WHOLE 64-bit value into the stack // then lfd it and fcfid it. MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *FrameInfo = MF.getFrameInfo(); - int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); - SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32, + SDValue Ld; + if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) { + int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); + SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); + + SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, + MachinePointerInfo::getFixedStack(FrameIdx), + false, false, 0); + + assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && + "Expected an i32 store"); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), + MachineMemOperand::MOLoad, 4, 4); + SDValue Ops[] = { Store, FIdx }; + Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? + PPCISD::LFIWZX : PPCISD::LFIWAX, + dl, DAG.getVTList(MVT::f64, MVT::Other), + Ops, 2, MVT::i32, MMO); + } else { + assert(PPCSubTarget.isPPC64() && + "i32->FP without LFIWAX supported only on PPC64"); + + int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); + SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); + + SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Op.getOperand(0)); - // STD the extended value into the stack slot. - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), - MachineMemOperand::MOStore, 8, 8); - SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx }; - SDValue Store = - DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other), - Ops, 4, MVT::i64, MMO); - // Load the value as a double. - SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(), - false, false, false, 0); + // STD the extended value into the stack slot. + SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, + MachinePointerInfo::getFixedStack(FrameIdx), + false, false, 0); + + // Load the value as a double. + Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, + MachinePointerInfo::getFixedStack(FrameIdx), + false, false, false, 0); + } // FCFID it and return it. - SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld); - if (Op.getValueType() == MVT::f32) + SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); + if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); return FP; } @@ -5560,11 +5749,15 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); + case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); + case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::FP_TO_UINT: case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, Op.getDebugLoc()); - case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); + case ISD::UINT_TO_FP: + case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); // Lower 64-bit shifts. @@ -5618,50 +5811,8 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, MVT::f64, N->getOperand(0), DAG.getIntPtrConstant(1)); - // This sequence changes FPSCR to do round-to-zero, adds the two halves - // of the long double, and puts FPSCR back the way it was. We do not - // actually model FPSCR. - std::vector<EVT> NodeTys; - SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; - - NodeTys.push_back(MVT::f64); // Return register - NodeTys.push_back(MVT::Glue); // Returns a flag for later insns - Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); - MFFSreg = Result.getValue(0); - InFlag = Result.getValue(1); - - NodeTys.clear(); - NodeTys.push_back(MVT::Glue); // Returns a flag - Ops[0] = DAG.getConstant(31, MVT::i32); - Ops[1] = InFlag; - Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2); - InFlag = Result.getValue(0); - - NodeTys.clear(); - NodeTys.push_back(MVT::Glue); // Returns a flag - Ops[0] = DAG.getConstant(30, MVT::i32); - Ops[1] = InFlag; - Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2); - InFlag = Result.getValue(0); - - NodeTys.clear(); - NodeTys.push_back(MVT::f64); // result of add - NodeTys.push_back(MVT::Glue); // Returns a flag - Ops[0] = Lo; - Ops[1] = Hi; - Ops[2] = InFlag; - Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3); - FPreg = Result.getValue(0); - InFlag = Result.getValue(1); - - NodeTys.clear(); - NodeTys.push_back(MVT::f64); - Ops[0] = DAG.getConstant(1, MVT::i32); - Ops[1] = MFFSreg; - Ops[2] = FPreg; - Ops[3] = InFlag; - Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4); - FPreg = Result.getValue(0); + // Add the two halves of the long double in round-to-zero mode. + SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); // We know the low half is about to be thrown away, so just use something // convenient. @@ -5753,7 +5904,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, // registers without caring whether they're 32 or 64, but here we're // doing actual arithmetic on the addresses. bool is64bit = PPCSubTarget.isPPC64(); - unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; + unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; const BasicBlock *LLVM_BB = BB->getBasicBlock(); MachineFunction *F = BB->getParent(); @@ -5857,7 +6008,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, .addReg(TmpReg).addReg(MaskReg); BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) .addReg(Tmp3Reg).addReg(Tmp2Reg); - BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) + BuildMI(BB, dl, TII->get(PPC::STWCX)) .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); BuildMI(BB, dl, TII->get(PPC::BCC)) .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); @@ -5872,9 +6023,238 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, return BB; } +llvm::MachineBasicBlock* +PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const { + DebugLoc DL = MI->getDebugLoc(); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = MBB; + ++I; + + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + unsigned DstReg = MI->getOperand(0).getReg(); + const TargetRegisterClass *RC = MRI.getRegClass(DstReg); + assert(RC->hasType(MVT::i32) && "Invalid destination!"); + unsigned mainDstReg = MRI.createVirtualRegister(RC); + unsigned restoreDstReg = MRI.createVirtualRegister(RC); + + MVT PVT = getPointerTy(); + assert((PVT == MVT::i64 || PVT == MVT::i32) && + "Invalid Pointer Size!"); + // For v = setjmp(buf), we generate + // + // thisMBB: + // SjLjSetup mainMBB + // bl mainMBB + // v_restore = 1 + // b sinkMBB + // + // mainMBB: + // buf[LabelOffset] = LR + // v_main = 0 + // + // sinkMBB: + // v = phi(main, restore) + // + + MachineBasicBlock *thisMBB = MBB; + MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(I, mainMBB); + MF->insert(I, sinkMBB); + + MachineInstrBuilder MIB; + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), MBB, + llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // Note that the structure of the jmp_buf used here is not compatible + // with that used by libc, and is not designed to be. Specifically, it + // stores only those 'reserved' registers that LLVM does not otherwise + // understand how to spill. Also, by convention, by the time this + // intrinsic is called, Clang has already stored the frame address in the + // first slot of the buffer and stack address in the third. Following the + // X86 target code, we'll store the jump address in the second slot. We also + // need to save the TOC pointer (R2) to handle jumps between shared + // libraries, and that will be stored in the fourth slot. The thread + // identifier (R13) is not affected. + + // thisMBB: + const int64_t LabelOffset = 1 * PVT.getStoreSize(); + const int64_t TOCOffset = 3 * PVT.getStoreSize(); + + // Prepare IP either in reg. + const TargetRegisterClass *PtrRC = getRegClassFor(PVT); + unsigned LabelReg = MRI.createVirtualRegister(PtrRC); + unsigned BufReg = MI->getOperand(1).getReg(); + + if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) { + MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) + .addReg(PPC::X2) + .addImm(TOCOffset / 4) + .addReg(BufReg); + + MIB.setMemRefs(MMOBegin, MMOEnd); + } + + // Setup + MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); + MIB.addRegMask(PPCRegInfo->getNoPreservedMask()); + + BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); + + MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) + .addMBB(mainMBB); + MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); + + thisMBB->addSuccessor(mainMBB, /* weight */ 0); + thisMBB->addSuccessor(sinkMBB, /* weight */ 1); + + // mainMBB: + // mainDstReg = 0 + MIB = BuildMI(mainMBB, DL, + TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); + + // Store IP + if (PPCSubTarget.isPPC64()) { + MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) + .addReg(LabelReg) + .addImm(LabelOffset / 4) + .addReg(BufReg); + } else { + MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) + .addReg(LabelReg) + .addImm(LabelOffset) + .addReg(BufReg); + } + + MIB.setMemRefs(MMOBegin, MMOEnd); + + BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); + mainMBB->addSuccessor(sinkMBB); + + // sinkMBB: + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(PPC::PHI), DstReg) + .addReg(mainDstReg).addMBB(mainMBB) + .addReg(restoreDstReg).addMBB(thisMBB); + + MI->eraseFromParent(); + return sinkMBB; +} + +MachineBasicBlock * +PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const { + DebugLoc DL = MI->getDebugLoc(); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + MVT PVT = getPointerTy(); + assert((PVT == MVT::i64 || PVT == MVT::i32) && + "Invalid Pointer Size!"); + + const TargetRegisterClass *RC = + (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; + unsigned Tmp = MRI.createVirtualRegister(RC); + // Since FP is only updated here but NOT referenced, it's treated as GPR. + unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; + unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; + + MachineInstrBuilder MIB; + + const int64_t LabelOffset = 1 * PVT.getStoreSize(); + const int64_t SPOffset = 2 * PVT.getStoreSize(); + const int64_t TOCOffset = 3 * PVT.getStoreSize(); + + unsigned BufReg = MI->getOperand(0).getReg(); + + // Reload FP (the jumped-to function may not have had a + // frame pointer, and if so, then its r31 will be restored + // as necessary). + if (PVT == MVT::i64) { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) + .addImm(0) + .addReg(BufReg); + } else { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) + .addImm(0) + .addReg(BufReg); + } + MIB.setMemRefs(MMOBegin, MMOEnd); + + // Reload IP + if (PVT == MVT::i64) { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) + .addImm(LabelOffset / 4) + .addReg(BufReg); + } else { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) + .addImm(LabelOffset) + .addReg(BufReg); + } + MIB.setMemRefs(MMOBegin, MMOEnd); + + // Reload SP + if (PVT == MVT::i64) { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) + .addImm(SPOffset / 4) + .addReg(BufReg); + } else { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) + .addImm(SPOffset) + .addReg(BufReg); + } + MIB.setMemRefs(MMOBegin, MMOEnd); + + // FIXME: When we also support base pointers, that register must also be + // restored here. + + // Reload TOC + if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { + MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) + .addImm(TOCOffset / 4) + .addReg(BufReg); + + MIB.setMemRefs(MMOBegin, MMOEnd); + } + + // Jump + BuildMI(*MBB, MI, DL, + TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); + BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); + + MI->eraseFromParent(); + return MBB; +} + MachineBasicBlock * PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { + if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || + MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { + return emitEHSjLjSetJmp(MI, BB); + } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || + MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { + return emitEHSjLjLongJmp(MI, BB); + } + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); // To "insert" these instructions we actually have to insert their @@ -5887,29 +6267,13 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || MI->getOpcode() == PPC::SELECT_CC_I8)) { - unsigned OpCode = MI->getOpcode() == PPC::SELECT_CC_I8 ? - PPC::ISEL8 : PPC::ISEL; - unsigned SelectPred = MI->getOperand(4).getImm(); - DebugLoc dl = MI->getDebugLoc(); + SmallVector<MachineOperand, 2> Cond; + Cond.push_back(MI->getOperand(4)); + Cond.push_back(MI->getOperand(1)); - // The SelectPred is ((BI << 5) | BO) for a BCC - unsigned BO = SelectPred & 0xF; - assert((BO == 12 || BO == 4) && "invalid predicate BO field for isel"); - - unsigned TrueOpNo, FalseOpNo; - if (BO == 12) { - TrueOpNo = 2; - FalseOpNo = 3; - } else { - TrueOpNo = 3; - FalseOpNo = 2; - SelectPred = PPC::InvertPredicate((PPC::Predicate)SelectPred); - } - - BuildMI(*BB, MI, dl, TII->get(OpCode), MI->getOperand(0).getReg()) - .addReg(MI->getOperand(TrueOpNo).getReg()) - .addReg(MI->getOperand(FalseOpNo).getReg()) - .addImm(SelectPred).addReg(MI->getOperand(1).getReg()); + DebugLoc dl = MI->getDebugLoc(); + PPCII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), Cond, + MI->getOperand(2).getReg(), MI->getOperand(3).getReg()); } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || MI->getOpcode() == PPC::SELECT_CC_I8 || MI->getOpcode() == PPC::SELECT_CC_F4 || @@ -6142,7 +6506,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); unsigned Ptr1Reg; unsigned TmpReg = RegInfo.createVirtualRegister(RC); - unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; + unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; // thisMBB: // ... // fallthrough --> loopMBB @@ -6245,6 +6609,75 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, BB = exitMBB; BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) .addReg(ShiftReg); + } else if (MI->getOpcode() == PPC::FADDrtz) { + // This pseudo performs an FADD with rounding mode temporarily forced + // to round-to-zero. We emit this via custom inserter since the FPSCR + // is not modeled at the SelectionDAG level. + unsigned Dest = MI->getOperand(0).getReg(); + unsigned Src1 = MI->getOperand(1).getReg(); + unsigned Src2 = MI->getOperand(2).getReg(); + DebugLoc dl = MI->getDebugLoc(); + + MachineRegisterInfo &RegInfo = F->getRegInfo(); + unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); + + // Save FPSCR value. + BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); + + // Set rounding mode to round-to-zero. + BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); + BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); + + // Perform addition. + BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); + + // Restore FPSCR value. + BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); + } else if (MI->getOpcode() == PPC::FRINDrint || + MI->getOpcode() == PPC::FRINSrint) { + bool isf32 = MI->getOpcode() == PPC::FRINSrint; + unsigned Dest = MI->getOperand(0).getReg(); + unsigned Src = MI->getOperand(1).getReg(); + DebugLoc dl = MI->getDebugLoc(); + + MachineRegisterInfo &RegInfo = F->getRegInfo(); + unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); + + // Perform the rounding. + BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FRINS : PPC::FRIND), Dest) + .addReg(Src); + + // Compare the results. + BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FCMPUS : PPC::FCMPUD), CRReg) + .addReg(Dest).addReg(Src); + + // If the results were not equal, then set the FPSCR XX bit. + MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, midMBB); + F->insert(It, exitMBB); + exitMBB->splice(exitMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), + BB->end()); + exitMBB->transferSuccessorsAndUpdatePHIs(BB); + + BuildMI(*BB, MI, dl, TII->get(PPC::BCC)) + .addImm(PPC::PRED_EQ).addReg(CRReg).addMBB(exitMBB); + + BB->addSuccessor(midMBB); + BB->addSuccessor(exitMBB); + + BB = midMBB; + + // Set the FPSCR XX bit (FE_INEXACT). Note that we cannot just set + // the FI bit here because that will not automatically set XX also, + // and XX is what libm interprets as the FE_INEXACT flag. + BuildMI(BB, dl, TII->get(PPC::MTFSB1)).addImm(/* 38 - 32 = */ 6); + BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); + + BB->addSuccessor(exitMBB); + + BB = exitMBB; } else { llvm_unreachable("Unexpected instr type to insert"); } @@ -6257,6 +6690,139 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Target Optimization Hooks //===----------------------------------------------------------------------===// +SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, + DAGCombinerInfo &DCI) const { + if (DCI.isAfterLegalizeVectorOps()) + return SDValue(); + + EVT VT = Op.getValueType(); + + if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) || + (VT == MVT::f64 && PPCSubTarget.hasFRE()) || + (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { + + // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) + // For the reciprocal, we need to find the zero of the function: + // F(X) = A X - 1 [which has a zero at X = 1/A] + // => + // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form + // does not require additional intermediate precision] + + // Convergence is quadratic, so we essentially double the number of digits + // correct after every iteration. The minimum architected relative + // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has + // 23 digits and double has 52 digits. + int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; + if (VT.getScalarType() == MVT::f64) + ++Iterations; + + SelectionDAG &DAG = DCI.DAG; + DebugLoc dl = Op.getDebugLoc(); + + SDValue FPOne = + DAG.getConstantFP(1.0, VT.getScalarType()); + if (VT.isVector()) { + assert(VT.getVectorNumElements() == 4 && + "Unknown vector type"); + FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + FPOne, FPOne, FPOne, FPOne); + } + + SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); + DCI.AddToWorklist(Est.getNode()); + + // Newton iterations: Est = Est + Est (1 - Arg * Est) + for (int i = 0; i < Iterations; ++i) { + SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); + DCI.AddToWorklist(NewEst.getNode()); + + NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); + DCI.AddToWorklist(NewEst.getNode()); + + NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); + DCI.AddToWorklist(NewEst.getNode()); + + Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); + DCI.AddToWorklist(Est.getNode()); + } + + return Est; + } + + return SDValue(); +} + +SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, + DAGCombinerInfo &DCI) const { + if (DCI.isAfterLegalizeVectorOps()) + return SDValue(); + + EVT VT = Op.getValueType(); + + if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) || + (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) || + (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { + + // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) + // For the reciprocal sqrt, we need to find the zero of the function: + // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] + // => + // X_{i+1} = X_i (1.5 - A X_i^2 / 2) + // As a result, we precompute A/2 prior to the iteration loop. + + // Convergence is quadratic, so we essentially double the number of digits + // correct after every iteration. The minimum architected relative + // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has + // 23 digits and double has 52 digits. + int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; + if (VT.getScalarType() == MVT::f64) + ++Iterations; + + SelectionDAG &DAG = DCI.DAG; + DebugLoc dl = Op.getDebugLoc(); + + SDValue FPThreeHalves = + DAG.getConstantFP(1.5, VT.getScalarType()); + if (VT.isVector()) { + assert(VT.getVectorNumElements() == 4 && + "Unknown vector type"); + FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + FPThreeHalves, FPThreeHalves, + FPThreeHalves, FPThreeHalves); + } + + SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); + DCI.AddToWorklist(Est.getNode()); + + // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that + // this entire sequence requires only one FP constant. + SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); + DCI.AddToWorklist(HalfArg.getNode()); + + HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); + DCI.AddToWorklist(HalfArg.getNode()); + + // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) + for (int i = 0; i < Iterations; ++i) { + SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); + DCI.AddToWorklist(NewEst.getNode()); + + NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); + DCI.AddToWorklist(NewEst.getNode()); + + NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); + DCI.AddToWorklist(NewEst.getNode()); + + Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); + DCI.AddToWorklist(Est.getNode()); + } + + return Est; + } + + return SDValue(); +} + SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { const TargetMachine &TM = getTargetMachine(); @@ -6283,7 +6849,72 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, return N->getOperand(0); } break; + case ISD::FDIV: { + assert(TM.Options.UnsafeFPMath && + "Reciprocal estimates require UnsafeFPMath"); + + if (N->getOperand(1).getOpcode() == ISD::FSQRT) { + SDValue RV = + DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); + if (RV.getNode() != 0) { + DCI.AddToWorklist(RV.getNode()); + return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), + N->getOperand(0), RV); + } + } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && + N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { + SDValue RV = + DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), + DCI); + if (RV.getNode() != 0) { + DCI.AddToWorklist(RV.getNode()); + RV = DAG.getNode(ISD::FP_EXTEND, N->getOperand(1).getDebugLoc(), + N->getValueType(0), RV); + DCI.AddToWorklist(RV.getNode()); + return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), + N->getOperand(0), RV); + } + } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && + N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { + SDValue RV = + DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), + DCI); + if (RV.getNode() != 0) { + DCI.AddToWorklist(RV.getNode()); + RV = DAG.getNode(ISD::FP_ROUND, N->getOperand(1).getDebugLoc(), + N->getValueType(0), RV, + N->getOperand(1).getOperand(1)); + DCI.AddToWorklist(RV.getNode()); + return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), + N->getOperand(0), RV); + } + } + SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); + if (RV.getNode() != 0) { + DCI.AddToWorklist(RV.getNode()); + return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), + N->getOperand(0), RV); + } + + } + break; + case ISD::FSQRT: { + assert(TM.Options.UnsafeFPMath && + "Reciprocal estimates require UnsafeFPMath"); + + // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the + // reciprocal sqrt. + SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); + if (RV.getNode() != 0) { + DCI.AddToWorklist(RV.getNode()); + RV = DAGCombineFastRecip(RV, DCI); + if (RV.getNode() != 0) + return RV; + } + + } + break; case ISD::SINT_TO_FP: if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { @@ -6330,8 +6961,15 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); DCI.AddToWorklist(Val.getNode()); - Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val, - N->getOperand(2), N->getOperand(3)); + SDValue Ops[] = { + N->getOperand(0), Val, N->getOperand(2), + DAG.getValueType(N->getOperand(1).getValueType()) + }; + + Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, + DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), + cast<StoreSDNode>(N)->getMemoryVT(), + cast<StoreSDNode>(N)->getMemOperand()); DCI.AddToWorklist(Val.getNode()); return Val; } @@ -6341,7 +6979,10 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(1).getOpcode() == ISD::BSWAP && N->getOperand(1).getNode()->hasOneUse() && (N->getOperand(1).getValueType() == MVT::i32 || - N->getOperand(1).getValueType() == MVT::i16)) { + N->getOperand(1).getValueType() == MVT::i16 || + (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && + TM.getSubtarget<PPCSubtarget>().isPPC64() && + N->getOperand(1).getValueType() == MVT::i64))) { SDValue BSwapOp = N->getOperand(1).getOperand(0); // Do an any-extend to 32-bits if this is a half-word input. if (BSwapOp.getValueType() == MVT::i16) @@ -6362,7 +7003,10 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // Turn BSWAP (LOAD) -> lhbrx/lwbrx. if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && N->getOperand(0).hasOneUse() && - (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { + (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || + (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && + TM.getSubtarget<PPCSubtarget>().isPPC64() && + N->getValueType(0) == MVT::i64))) { SDValue Load = N->getOperand(0); LoadSDNode *LD = cast<LoadSDNode>(Load); // Create the byte-swapping load. @@ -6373,8 +7017,9 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, }; SDValue BSLoad = DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, - DAG.getVTList(MVT::i32, MVT::Other), Ops, 3, - LD->getMemoryVT(), LD->getMemOperand()); + DAG.getVTList(N->getValueType(0) == MVT::i64 ? + MVT::i64 : MVT::i32, MVT::Other), + Ops, 3, LD->getMemoryVT(), LD->getMemOperand()); // If this is an i16 load, insert the truncate. SDValue ResVal = BSLoad; @@ -6631,6 +7276,9 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, // GCC RS6000 Constraint Letters switch (Constraint[0]) { case 'b': // R1-R31 + if (VT == MVT::i64 && PPCSubTarget.isPPC64()) + return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); + return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); case 'r': // R0-R31 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) return std::make_pair(0U, &PPC::G8RCRegClass); @@ -6815,13 +7463,16 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MFI->setFrameAddressIsTaken(true); - bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) || - MFI->hasVarSizedObjects()) && - MFI->getStackSize() && - !MF.getFunction()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked); - unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : - (is31 ? PPC::R31 : PPC::R1); + + // Naked functions never have a frame pointer, and so we use r1. For all + // other functions, this decision must be delayed until during PEI. + unsigned FrameReg; + if (MF.getFunction()->getAttributes().hasAttribute( + AttributeSet::FunctionIndex, Attribute::Naked)) + FrameReg = isPPC64 ? PPC::X1 : PPC::R1; + else + FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); while (Depth--) diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index 8d44d9f..423e983 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -16,6 +16,8 @@ #define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H #include "PPC.h" +#include "PPCInstrInfo.h" +#include "PPCRegisterInfo.h" #include "PPCSubtarget.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/Target/TargetLowering.h" @@ -35,14 +37,21 @@ namespace llvm { /// was temporarily in the f64 operand. FCFID, + /// Newer FCFID[US] integer-to-floating-point conversion instructions for + /// unsigned integers and single-precision outputs. + FCFIDU, FCFIDS, FCFIDUS, + /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 /// operand, producing an f64 value containing the integer representation /// of that FP value. FCTIDZ, FCTIWZ, - /// STFIWX - The STFIWX instruction. The first operand is an input token - /// chain, then an f64 value to store, then an address to store it to. - STFIWX, + /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for + /// unsigned integers. + FCTIDUZ, FCTIWUZ, + + /// Reciprocal estimate instructions (unary FP ops). + FRE, FRSQRTE, // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking // three v4f32 operands and producing a v4f32 result. @@ -90,17 +99,10 @@ namespace llvm { /// code. SRL, SRA, SHL, - /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" - /// registers. - EXTSW_32, - /// CALL - A direct function call. - /// CALL_NOP_SVR4 is a call with the special NOP which follows 64-bit + /// CALL_NOP is a call with the special NOP which follows 64-bit /// SVR4 calls. - CALL_Darwin, CALL_SVR4, CALL_NOP_SVR4, - - /// NOP - Special NOP which follows 64-bit SVR4 calls. - NOP, + CALL, CALL_NOP, /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a /// MTCTR instruction. @@ -108,7 +110,7 @@ namespace llvm { /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a /// BCTRL instruction. - BCTRL_Darwin, BCTRL_SVR4, + BCTRL, /// Return with a flag operand, matched by 'blr' RET_FLAG, @@ -119,6 +121,12 @@ namespace llvm { /// are undefined. MFCR, + // EH_SJLJ_SETJMP - SjLj exception handling setjmp. + EH_SJLJ_SETJMP, + + // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. + EH_SJLJ_LONGJMP, + /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* /// instructions. For lack of better number, we use the opcode number /// encoding for the OPC field to identify the compare. For example, 838 @@ -138,26 +146,13 @@ namespace llvm { /// an optional input flag argument. COND_BRANCH, - // The following 5 instructions are used only as part of the - // long double-to-int conversion sequence. - - /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the - /// register. - MFFS, - - /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. - MTFSB0, - - /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. - MTFSB1, - - /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with - /// rounding towards zero. It has flags added so it won't move past the - /// FPSCR-setting instructions. + /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding + /// towards zero. Used only as part of the long double-to-int + /// conversion sequence. FADDRTZ, - /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. - MTFSF, + /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register. + MFFS, /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and /// reserve indexed. This is used to implement atomic operations. @@ -243,14 +238,11 @@ namespace llvm { /// optimizations due to constant folding. VADD_SPLAT, - /// STD_32 - This is the STD instruction for use with "32-bit" registers. - STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, - /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a /// byte-swapping store instruction. It byte-swaps the low "Type" bits of /// the GPRC input, then stores it through Ptr. Type can be either i16 or /// i32. - STBRX, + STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE, /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, @@ -258,6 +250,20 @@ namespace llvm { /// or i32. LBRX, + /// STFIWX - The STFIWX instruction. The first operand is an input token + /// chain, then an f64 value to store, then an address to store it to. + STFIWX, + + /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point + /// load which sign-extends from a 32-bit integer value into the + /// destination 64-bit register. + LFIWAX, + + /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point + /// load which zero-extends from a 32-bit integer value into the + /// destination 64-bit register. + LFIWZX, + /// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium and large code model, /// produces an ADDIS8 instruction that adds the TOC base register to /// sym@toc@ha. @@ -321,6 +327,8 @@ namespace llvm { class PPCTargetLowering : public TargetLowering { const PPCSubtarget &PPCSubTarget; + const PPCRegisterInfo *PPCRegInfo; + const PPCInstrInfo *PPCII; public: explicit PPCTargetLowering(PPCTargetMachine &TM); @@ -395,6 +403,12 @@ namespace llvm { MachineBasicBlock *MBB, bool is8bit, unsigned Opcode) const; + MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const; + ConstraintType getConstraintType(const std::string &Constraint) const; /// Examine constraint string and operand type and determine a weight value. @@ -498,7 +512,7 @@ namespace llvm { const PPCSubtarget &Subtarget) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; - SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; @@ -608,6 +622,12 @@ namespace llvm { const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; + + SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; + + SDValue DAGCombineFastRecip(SDValue Op, DAGCombinerInfo &DCI) const; + SDValue DAGCombineFastRecipFSQRT(SDValue Op, DAGCombinerInfo &DCI) const; }; } diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index bca1bd5..e5d0b91 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -30,12 +30,7 @@ def symbolLo64 : Operand<i64> { let EncoderMethod = "getLO16Encoding"; } def tocentry : Operand<iPTR> { - let MIOperandInfo = (ops i32imm:$imm); -} -def memrs : Operand<iPTR> { // memri where the immediate is a symbolLo64 - let PrintMethod = "printMemRegImm"; - let EncoderMethod = "getMemRIXEncoding"; - let MIOperandInfo = (ops symbolLo64:$off, ptr_rc:$reg); + let MIOperandInfo = (ops i64imm:$imm); } def tlsreg : Operand<i64> { let EncoderMethod = "getTLSRegEncoding"; @@ -71,135 +66,136 @@ def HI48_64 : SDNodeXForm<imm, [{ // Calls. // +let Interpretation64Bit = 1 in { +let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { + let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in { + def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, + Requires<[In64BitMode]>; + + let isCodeGenOnly = 1 in + def BCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond), + "b${cond:cc}ctr ${cond:reg}", BrB, []>, + Requires<[In64BitMode]>; + } +} + let Defs = [LR8] in def MovePCtoLR8 : Pseudo<(outs), (ins), "#MovePCtoLR8", []>, PPC970_Unit_BRU; -// Darwin ABI Calls. -let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in { - // Convenient aliases for call instructions - let Uses = [RM] in { - def BL8_Darwin : IForm<18, 0, 1, - (outs), (ins calltarget:$func), - "bl $func", BrB, []>; // See Pat patterns below. - def BLA8_Darwin : IForm<18, 1, 1, - (outs), (ins aaddr:$func), - "bla $func", BrB, [(PPCcall_Darwin (i64 imm:$func))]>; +let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { + let Defs = [CTR8], Uses = [CTR8] in { + def BDZ8 : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), + "bdz $dst">; + def BDNZ8 : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), + "bdnz $dst">; } - let Uses = [CTR8, RM] in { - def BCTRL8_Darwin : XLForm_2_ext<19, 528, 20, 0, 1, - (outs), (ins), - "bctrl", BrB, - [(PPCbctrl_Darwin)]>, Requires<[In64BitMode]>; + + let isReturn = 1, Defs = [CTR8], Uses = [CTR8, LR8, RM] in { + def BDZLR8 : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins), + "bdzlr", BrB, []>; + def BDNZLR8 : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins), + "bdnzlr", BrB, []>; } } -// ELF 64 ABI Calls = Darwin ABI Calls -// Used to define BL8_ELF and BLA8_ELF + + let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in { // Convenient aliases for call instructions let Uses = [RM] in { - def BL8_ELF : IForm<18, 0, 1, - (outs), (ins calltarget:$func), - "bl $func", BrB, []>; // See Pat patterns below. + def BL8 : IForm<18, 0, 1, (outs), (ins calltarget:$func), + "bl $func", BrB, []>; // See Pat patterns below. - let isCodeGenOnly = 1 in - def BL8_NOP_ELF : IForm_and_DForm_4_zero<18, 0, 1, 24, + def BLA8 : IForm<18, 1, 1, (outs), (ins aaddr:$func), + "bla $func", BrB, [(PPCcall (i64 imm:$func))]>; + } + let Uses = [RM], isCodeGenOnly = 1 in { + def BL8_NOP : IForm_and_DForm_4_zero<18, 0, 1, 24, (outs), (ins calltarget:$func), "bl $func\n\tnop", BrB, []>; - let isCodeGenOnly = 1 in - def BL8_NOP_ELF_TLSGD : IForm_and_DForm_4_zero<18, 0, 1, 24, + def BL8_NOP_TLSGD : IForm_and_DForm_4_zero<18, 0, 1, 24, (outs), (ins calltarget:$func, tlsgd:$sym), "bl $func($sym)\n\tnop", BrB, []>; - let isCodeGenOnly = 1 in - def BL8_NOP_ELF_TLSLD : IForm_and_DForm_4_zero<18, 0, 1, 24, + def BL8_NOP_TLSLD : IForm_and_DForm_4_zero<18, 0, 1, 24, (outs), (ins calltarget:$func, tlsgd:$sym), "bl $func($sym)\n\tnop", BrB, []>; - def BLA8_ELF : IForm<18, 1, 1, - (outs), (ins aaddr:$func), - "bla $func", BrB, [(PPCcall_SVR4 (i64 imm:$func))]>; - - let isCodeGenOnly = 1 in - def BLA8_NOP_ELF : IForm_and_DForm_4_zero<18, 1, 1, 24, + def BLA8_NOP : IForm_and_DForm_4_zero<18, 1, 1, 24, (outs), (ins aaddr:$func), "bla $func\n\tnop", BrB, - [(PPCcall_nop_SVR4 (i64 imm:$func))]>; + [(PPCcall_nop (i64 imm:$func))]>; } - let Uses = [X11, CTR8, RM] in { - def BCTRL8_ELF : XLForm_2_ext<19, 528, 20, 0, 1, - (outs), (ins), - "bctrl", BrB, - [(PPCbctrl_SVR4)]>, Requires<[In64BitMode]>; + let Uses = [CTR8, RM] in { + def BCTRL8 : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), + "bctrl", BrB, [(PPCbctrl)]>, + Requires<[In64BitMode]>; + + let isCodeGenOnly = 1 in + def BCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond), + "b${cond:cc}ctrl ${cond:reg}", BrB, []>, + Requires<[In64BitMode]>; } } - +} // Interpretation64Bit // Calls -def : Pat<(PPCcall_Darwin (i64 tglobaladdr:$dst)), - (BL8_Darwin tglobaladdr:$dst)>; -def : Pat<(PPCcall_Darwin (i64 texternalsym:$dst)), - (BL8_Darwin texternalsym:$dst)>; - -def : Pat<(PPCcall_SVR4 (i64 tglobaladdr:$dst)), - (BL8_ELF tglobaladdr:$dst)>; -def : Pat<(PPCcall_nop_SVR4 (i64 tglobaladdr:$dst)), - (BL8_NOP_ELF tglobaladdr:$dst)>; - -def : Pat<(PPCcall_SVR4 (i64 texternalsym:$dst)), - (BL8_ELF texternalsym:$dst)>; -def : Pat<(PPCcall_nop_SVR4 (i64 texternalsym:$dst)), - (BL8_NOP_ELF texternalsym:$dst)>; +def : Pat<(PPCcall (i64 tglobaladdr:$dst)), + (BL8 tglobaladdr:$dst)>; +def : Pat<(PPCcall_nop (i64 tglobaladdr:$dst)), + (BL8_NOP tglobaladdr:$dst)>; -def : Pat<(PPCnop), - (NOP)>; +def : Pat<(PPCcall (i64 texternalsym:$dst)), + (BL8 texternalsym:$dst)>; +def : Pat<(PPCcall_nop (i64 texternalsym:$dst)), + (BL8_NOP texternalsym:$dst)>; // Atomic operations let usesCustomInserter = 1 in { let Defs = [CR0] in { def ATOMIC_LOAD_ADD_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_ADD_I64", - [(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_ADD_I64", + [(set i64:$dst, (atomic_load_add_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_LOAD_SUB_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_SUB_I64", - [(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_SUB_I64", + [(set i64:$dst, (atomic_load_sub_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_LOAD_OR_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_OR_I64", - [(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_OR_I64", + [(set i64:$dst, (atomic_load_or_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_LOAD_XOR_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_XOR_I64", - [(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_XOR_I64", + [(set i64:$dst, (atomic_load_xor_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_LOAD_AND_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_AND_i64", - [(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_AND_i64", + [(set i64:$dst, (atomic_load_and_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_LOAD_NAND_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_NAND_I64", - [(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_NAND_I64", + [(set i64:$dst, (atomic_load_nand_64 xoaddr:$ptr, i64:$incr))]>; def ATOMIC_CMP_SWAP_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "#ATOMIC_CMP_SWAP_I64", - [(set G8RC:$dst, - (atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$old, g8rc:$new), "#ATOMIC_CMP_SWAP_I64", + [(set i64:$dst, (atomic_cmp_swap_64 xoaddr:$ptr, i64:$old, i64:$new))]>; def ATOMIC_SWAP_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "#ATOMIC_SWAP_I64", - [(set G8RC:$dst, (atomic_swap_64 xoaddr:$ptr, G8RC:$new))]>; + (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$new), "#ATOMIC_SWAP_I64", + [(set i64:$dst, (atomic_swap_64 xoaddr:$ptr, i64:$new))]>; } } // Instructions to support atomic operations -def LDARX : XForm_1<31, 84, (outs G8RC:$rD), (ins memrr:$ptr), +def LDARX : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr), "ldarx $rD, $ptr", LdStLDARX, - [(set G8RC:$rD, (PPClarx xoaddr:$ptr))]>; + [(set i64:$rD, (PPClarx xoaddr:$ptr))]>; let Defs = [CR0] in -def STDCX : XForm_1<31, 214, (outs), (ins G8RC:$rS, memrr:$dst), +def STDCX : XForm_1<31, 214, (outs), (ins g8rc:$rS, memrr:$dst), "stdcx. $rS, $dst", LdStSTDCX, - [(PPCstcx G8RC:$rS, xoaddr:$dst)]>, + [(PPCstcx i64:$rS, xoaddr:$dst)]>, isDOT; +let Interpretation64Bit = 1 in { let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in def TCRETURNdi8 :Pseudo< (outs), (ins calltarget:$dst, i32imm:$offset), @@ -216,17 +212,12 @@ def TCRETURNri8 : Pseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset), "#TC_RETURNr8 $dst $offset", []>; +let isCodeGenOnly = 1 in { let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, - isIndirectBranch = 1, isCall = 1, Uses = [CTR8, RM] in { - let isReturn = 1 in { - def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, - Requires<[In64BitMode]>; - } - - def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, - Requires<[In64BitMode]>; -} + isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR8, RM] in +def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, + Requires<[In64BitMode]>; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, @@ -242,6 +233,9 @@ def TAILBA8 : IForm<18, 0, 0, (outs), (ins aaddr:$dst), "ba $dst", BrB, []>; +} +} // Interpretation64Bit + def : Pat<(PPCtc_return (i64 tglobaladdr:$dst), imm:$imm), (TCRETURNdi8 tglobaladdr:$dst, imm:$imm)>; @@ -251,44 +245,53 @@ def : Pat<(PPCtc_return (i64 texternalsym:$dst), imm:$imm), def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm), (TCRETURNri8 CTRRC8:$dst, imm:$imm)>; -let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { - let Defs = [CTR8], Uses = [CTR8] in { - def BDZ8 : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), - "bdz $dst">; - def BDNZ8 : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), - "bdnz $dst">; - } -} -// 64-but CR instructions -def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins G8RC:$rS), +// 64-bit CR instructions +let Interpretation64Bit = 1 in { +let neverHasSideEffects = 1 in { +def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins g8rc:$rS), "mtcrf $FXM, $rS", BrMCRX>, PPC970_MicroCode, PPC970_Unit_CRU; -def MFCR8pseud: XFXForm_3<31, 19, (outs G8RC:$rT), (ins crbitm:$FXM), +let isCodeGenOnly = 1 in +def MFCR8pseud: XFXForm_3<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM), "#MFCR8pseud", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; - -def MFCR8 : XFXForm_3<31, 19, (outs G8RC:$rT), (ins), +} // neverHasSideEffects = 1 + +let neverHasSideEffects = 1 in +def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins), "mfcr $rT", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; +let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in { + def EH_SjLj_SetJmp64 : Pseudo<(outs gprc:$dst), (ins memr:$buf), + "#EH_SJLJ_SETJMP64", + [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, + Requires<[In64BitMode]>; + let isTerminator = 1 in + def EH_SjLj_LongJmp64 : Pseudo<(outs), (ins memr:$buf), + "#EH_SJLJ_LONGJMP64", + [(PPCeh_sjlj_longjmp addr:$buf)]>, + Requires<[In64BitMode]>; +} + //===----------------------------------------------------------------------===// // 64-bit SPR manipulation instrs. let Uses = [CTR8] in { -def MFCTR8 : XFXForm_1_ext<31, 339, 9, (outs G8RC:$rT), (ins), +def MFCTR8 : XFXForm_1_ext<31, 339, 9, (outs g8rc:$rT), (ins), "mfctr $rT", SprMFSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } -let Pattern = [(PPCmtctr G8RC:$rS)], Defs = [CTR8] in { -def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins G8RC:$rS), +let Pattern = [(PPCmtctr i64:$rS)], Defs = [CTR8] in { +def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS), "mtctr $rS", SprMTSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } -let Pattern = [(set G8RC:$rT, readcyclecounter)] in -def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs G8RC:$rT), (ins), +let Pattern = [(set i64:$rT, readcyclecounter)] in +def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs g8rc:$rT), (ins), "mfspr $rT, 268", SprMFTB>, PPC970_DGroup_First, PPC970_Unit_FXU; // Note that encoding mftb using mfspr is now the preferred form, @@ -297,248 +300,265 @@ def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs G8RC:$rT), (ins), // the POWER3. let Defs = [X1], Uses = [X1] in -def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"#DYNALLOC8", - [(set G8RC:$result, - (PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>; +def DYNALLOC8 : Pseudo<(outs g8rc:$result), (ins g8rc:$negsize, memri:$fpsi),"#DYNALLOC8", + [(set i64:$result, + (PPCdynalloc i64:$negsize, iaddr:$fpsi))]>; let Defs = [LR8] in { -def MTLR8 : XFXForm_7_ext<31, 467, 8, (outs), (ins G8RC:$rS), +def MTLR8 : XFXForm_7_ext<31, 467, 8, (outs), (ins g8rc:$rS), "mtlr $rS", SprMTSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } let Uses = [LR8] in { -def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs G8RC:$rT), (ins), +def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs g8rc:$rT), (ins), "mflr $rT", SprMFSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } +} // Interpretation64Bit //===----------------------------------------------------------------------===// // Fixed point instructions. // let PPC970_Unit = 1 in { // FXU Operations. +let Interpretation64Bit = 1 in { +let neverHasSideEffects = 1 in { let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { -def LI8 : DForm_2_r0<14, (outs G8RC:$rD), (ins symbolLo64:$imm), +def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins symbolLo64:$imm), "li $rD, $imm", IntSimple, - [(set G8RC:$rD, immSExt16:$imm)]>; -def LIS8 : DForm_2_r0<15, (outs G8RC:$rD), (ins symbolHi64:$imm), + [(set i64:$rD, immSExt16:$imm)]>; +def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins symbolHi64:$imm), "lis $rD, $imm", IntSimple, - [(set G8RC:$rD, imm16ShiftedSExt:$imm)]>; + [(set i64:$rD, imm16ShiftedSExt:$imm)]>; } // Logical ops. -def NAND8: XForm_6<31, 476, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "nand $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (not (and G8RC:$rS, G8RC:$rB)))]>; -def AND8 : XForm_6<31, 28, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "and $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (and G8RC:$rS, G8RC:$rB))]>; -def ANDC8: XForm_6<31, 60, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "andc $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (and G8RC:$rS, (not G8RC:$rB)))]>; -def OR8 : XForm_6<31, 444, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "or $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (or G8RC:$rS, G8RC:$rB))]>; -def NOR8 : XForm_6<31, 124, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "nor $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (not (or G8RC:$rS, G8RC:$rB)))]>; -def ORC8 : XForm_6<31, 412, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "orc $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (or G8RC:$rS, (not G8RC:$rB)))]>; -def EQV8 : XForm_6<31, 284, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "eqv $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (not (xor G8RC:$rS, G8RC:$rB)))]>; -def XOR8 : XForm_6<31, 316, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), - "xor $rA, $rS, $rB", IntSimple, - [(set G8RC:$rA, (xor G8RC:$rS, G8RC:$rB))]>; +defm NAND8: XForm_6r<31, 476, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "nand", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (not (and i64:$rS, i64:$rB)))]>; +defm AND8 : XForm_6r<31, 28, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "and", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (and i64:$rS, i64:$rB))]>; +defm ANDC8: XForm_6r<31, 60, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "andc", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (and i64:$rS, (not i64:$rB)))]>; +defm OR8 : XForm_6r<31, 444, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "or", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (or i64:$rS, i64:$rB))]>; +defm NOR8 : XForm_6r<31, 124, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "nor", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (not (or i64:$rS, i64:$rB)))]>; +defm ORC8 : XForm_6r<31, 412, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "orc", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (or i64:$rS, (not i64:$rB)))]>; +defm EQV8 : XForm_6r<31, 284, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "eqv", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (not (xor i64:$rS, i64:$rB)))]>; +defm XOR8 : XForm_6r<31, 316, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), + "xor", "$rA, $rS, $rB", IntSimple, + [(set i64:$rA, (xor i64:$rS, i64:$rB))]>; // Logical ops with immediate. -def ANDIo8 : DForm_4<28, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), +let Defs = [CR0] in { +def ANDIo8 : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "andi. $dst, $src1, $src2", IntGeneral, - [(set G8RC:$dst, (and G8RC:$src1, immZExt16:$src2))]>, + [(set i64:$dst, (and i64:$src1, immZExt16:$src2))]>, isDOT; -def ANDISo8 : DForm_4<29, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), +def ANDISo8 : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "andis. $dst, $src1, $src2", IntGeneral, - [(set G8RC:$dst, (and G8RC:$src1,imm16ShiftedZExt:$src2))]>, + [(set i64:$dst, (and i64:$src1, imm16ShiftedZExt:$src2))]>, isDOT; -def ORI8 : DForm_4<24, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), +} +def ORI8 : DForm_4<24, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "ori $dst, $src1, $src2", IntSimple, - [(set G8RC:$dst, (or G8RC:$src1, immZExt16:$src2))]>; -def ORIS8 : DForm_4<25, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), + [(set i64:$dst, (or i64:$src1, immZExt16:$src2))]>; +def ORIS8 : DForm_4<25, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "oris $dst, $src1, $src2", IntSimple, - [(set G8RC:$dst, (or G8RC:$src1, imm16ShiftedZExt:$src2))]>; -def XORI8 : DForm_4<26, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), + [(set i64:$dst, (or i64:$src1, imm16ShiftedZExt:$src2))]>; +def XORI8 : DForm_4<26, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "xori $dst, $src1, $src2", IntSimple, - [(set G8RC:$dst, (xor G8RC:$src1, immZExt16:$src2))]>; -def XORIS8 : DForm_4<27, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2), + [(set i64:$dst, (xor i64:$src1, immZExt16:$src2))]>; +def XORIS8 : DForm_4<27, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2), "xoris $dst, $src1, $src2", IntSimple, - [(set G8RC:$dst, (xor G8RC:$src1, imm16ShiftedZExt:$src2))]>; + [(set i64:$dst, (xor i64:$src1, imm16ShiftedZExt:$src2))]>; -def ADD8 : XOForm_1<31, 266, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "add $rT, $rA, $rB", IntSimple, - [(set G8RC:$rT, (add G8RC:$rA, G8RC:$rB))]>; +defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "add", "$rT, $rA, $rB", IntSimple, + [(set i64:$rT, (add i64:$rA, i64:$rB))]>; // ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the // initial-exec thread-local storage model. -def ADD8TLS : XOForm_1<31, 266, 0, (outs G8RC:$rT), (ins G8RC:$rA, tlsreg:$rB), +let isCodeGenOnly = 1 in +def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB), "add $rT, $rA, $rB@tls", IntSimple, - [(set G8RC:$rT, (add G8RC:$rA, tglobaltlsaddr:$rB))]>; + [(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>; -let Defs = [CARRY] in { -def ADDC8 : XOForm_1<31, 10, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "addc $rT, $rA, $rB", IntGeneral, - [(set G8RC:$rT, (addc G8RC:$rA, G8RC:$rB))]>, - PPC970_DGroup_Cracked; -def ADDIC8 : DForm_2<12, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm), +defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "addc", "$rT, $rA, $rB", IntGeneral, + [(set i64:$rT, (addc i64:$rA, i64:$rB))]>, + PPC970_DGroup_Cracked; +let Defs = [CARRY] in +def ADDIC8 : DForm_2<12, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm), "addic $rD, $rA, $imm", IntGeneral, - [(set G8RC:$rD, (addc G8RC:$rA, immSExt16:$imm))]>; -} -def ADDI8 : DForm_2<14, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm), + [(set i64:$rD, (addc i64:$rA, immSExt16:$imm))]>; +def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, symbolLo64:$imm), "addi $rD, $rA, $imm", IntSimple, - [(set G8RC:$rD, (add G8RC:$rA, immSExt16:$imm))]>; -def ADDI8L : DForm_2<14, (outs G8RC:$rD), (ins G8RC:$rA, symbolLo64:$imm), - "addi $rD, $rA, $imm", IntSimple, - [(set G8RC:$rD, (add G8RC:$rA, immSExt16:$imm))]>; -def ADDIS8 : DForm_2<15, (outs G8RC:$rD), (ins G8RC:$rA, symbolHi64:$imm), + [(set i64:$rD, (add i64:$rA, immSExt16:$imm))]>; +def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, symbolHi64:$imm), "addis $rD, $rA, $imm", IntSimple, - [(set G8RC:$rD, (add G8RC:$rA, imm16ShiftedSExt:$imm))]>; + [(set i64:$rD, (add i64:$rA, imm16ShiftedSExt:$imm))]>; let Defs = [CARRY] in { -def SUBFIC8: DForm_2< 8, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm), +def SUBFIC8: DForm_2< 8, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm), "subfic $rD, $rA, $imm", IntGeneral, - [(set G8RC:$rD, (subc immSExt16:$imm, G8RC:$rA))]>; -def SUBFC8 : XOForm_1<31, 8, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "subfc $rT, $rA, $rB", IntGeneral, - [(set G8RC:$rT, (subc G8RC:$rB, G8RC:$rA))]>, - PPC970_DGroup_Cracked; -} -def SUBF8 : XOForm_1<31, 40, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "subf $rT, $rA, $rB", IntGeneral, - [(set G8RC:$rT, (sub G8RC:$rB, G8RC:$rA))]>; -def NEG8 : XOForm_3<31, 104, 0, (outs G8RC:$rT), (ins G8RC:$rA), - "neg $rT, $rA", IntSimple, - [(set G8RC:$rT, (ineg G8RC:$rA))]>; -let Uses = [CARRY], Defs = [CARRY] in { -def ADDE8 : XOForm_1<31, 138, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "adde $rT, $rA, $rB", IntGeneral, - [(set G8RC:$rT, (adde G8RC:$rA, G8RC:$rB))]>; -def ADDME8 : XOForm_3<31, 234, 0, (outs G8RC:$rT), (ins G8RC:$rA), - "addme $rT, $rA", IntGeneral, - [(set G8RC:$rT, (adde G8RC:$rA, -1))]>; -def ADDZE8 : XOForm_3<31, 202, 0, (outs G8RC:$rT), (ins G8RC:$rA), - "addze $rT, $rA", IntGeneral, - [(set G8RC:$rT, (adde G8RC:$rA, 0))]>; -def SUBFE8 : XOForm_1<31, 136, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "subfe $rT, $rA, $rB", IntGeneral, - [(set G8RC:$rT, (sube G8RC:$rB, G8RC:$rA))]>; -def SUBFME8 : XOForm_3<31, 232, 0, (outs G8RC:$rT), (ins G8RC:$rA), - "subfme $rT, $rA", IntGeneral, - [(set G8RC:$rT, (sube -1, G8RC:$rA))]>; -def SUBFZE8 : XOForm_3<31, 200, 0, (outs G8RC:$rT), (ins G8RC:$rA), - "subfze $rT, $rA", IntGeneral, - [(set G8RC:$rT, (sube 0, G8RC:$rA))]>; -} - - -def MULHD : XOForm_1<31, 73, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "mulhd $rT, $rA, $rB", IntMulHW, - [(set G8RC:$rT, (mulhs G8RC:$rA, G8RC:$rB))]>; -def MULHDU : XOForm_1<31, 9, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "mulhdu $rT, $rA, $rB", IntMulHWU, - [(set G8RC:$rT, (mulhu G8RC:$rA, G8RC:$rB))]>; - -def CMPD : XForm_16_ext<31, 0, (outs CRRC:$crD), (ins G8RC:$rA, G8RC:$rB), - "cmpd $crD, $rA, $rB", IntCompare>, isPPC64; -def CMPLD : XForm_16_ext<31, 32, (outs CRRC:$crD), (ins G8RC:$rA, G8RC:$rB), - "cmpld $crD, $rA, $rB", IntCompare>, isPPC64; -def CMPDI : DForm_5_ext<11, (outs CRRC:$crD), (ins G8RC:$rA, s16imm:$imm), - "cmpdi $crD, $rA, $imm", IntCompare>, isPPC64; -def CMPLDI : DForm_6_ext<10, (outs CRRC:$dst), (ins G8RC:$src1, u16imm:$src2), - "cmpldi $dst, $src1, $src2", IntCompare>, isPPC64; - -def SLD : XForm_6<31, 27, (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB), - "sld $rA, $rS, $rB", IntRotateD, - [(set G8RC:$rA, (PPCshl G8RC:$rS, GPRC:$rB))]>, isPPC64; -def SRD : XForm_6<31, 539, (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB), - "srd $rA, $rS, $rB", IntRotateD, - [(set G8RC:$rA, (PPCsrl G8RC:$rS, GPRC:$rB))]>, isPPC64; -let Defs = [CARRY] in { -def SRAD : XForm_6<31, 794, (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB), - "srad $rA, $rS, $rB", IntRotateD, - [(set G8RC:$rA, (PPCsra G8RC:$rS, GPRC:$rB))]>, isPPC64; + [(set i64:$rD, (subc immSExt16:$imm, i64:$rA))]>; +defm SUBFC8 : XOForm_1r<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "subfc", "$rT, $rA, $rB", IntGeneral, + [(set i64:$rT, (subc i64:$rB, i64:$rA))]>, + PPC970_DGroup_Cracked; +} +defm SUBF8 : XOForm_1r<31, 40, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "subf", "$rT, $rA, $rB", IntGeneral, + [(set i64:$rT, (sub i64:$rB, i64:$rA))]>; +defm NEG8 : XOForm_3r<31, 104, 0, (outs g8rc:$rT), (ins g8rc:$rA), + "neg", "$rT, $rA", IntSimple, + [(set i64:$rT, (ineg i64:$rA))]>; +let Uses = [CARRY] in { +defm ADDE8 : XOForm_1rc<31, 138, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "adde", "$rT, $rA, $rB", IntGeneral, + [(set i64:$rT, (adde i64:$rA, i64:$rB))]>; +defm ADDME8 : XOForm_3rc<31, 234, 0, (outs g8rc:$rT), (ins g8rc:$rA), + "addme", "$rT, $rA", IntGeneral, + [(set i64:$rT, (adde i64:$rA, -1))]>; +defm ADDZE8 : XOForm_3rc<31, 202, 0, (outs g8rc:$rT), (ins g8rc:$rA), + "addze", "$rT, $rA", IntGeneral, + [(set i64:$rT, (adde i64:$rA, 0))]>; +defm SUBFE8 : XOForm_1rc<31, 136, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "subfe", "$rT, $rA, $rB", IntGeneral, + [(set i64:$rT, (sube i64:$rB, i64:$rA))]>; +defm SUBFME8 : XOForm_3rc<31, 232, 0, (outs g8rc:$rT), (ins g8rc:$rA), + "subfme", "$rT, $rA", IntGeneral, + [(set i64:$rT, (sube -1, i64:$rA))]>; +defm SUBFZE8 : XOForm_3rc<31, 200, 0, (outs g8rc:$rT), (ins g8rc:$rA), + "subfze", "$rT, $rA", IntGeneral, + [(set i64:$rT, (sube 0, i64:$rA))]>; } - -def EXTSB8 : XForm_11<31, 954, (outs G8RC:$rA), (ins G8RC:$rS), - "extsb $rA, $rS", IntSimple, - [(set G8RC:$rA, (sext_inreg G8RC:$rS, i8))]>; -def EXTSH8 : XForm_11<31, 922, (outs G8RC:$rA), (ins G8RC:$rS), - "extsh $rA, $rS", IntSimple, - [(set G8RC:$rA, (sext_inreg G8RC:$rS, i16))]>; - -def EXTSW : XForm_11<31, 986, (outs G8RC:$rA), (ins G8RC:$rS), - "extsw $rA, $rS", IntSimple, - [(set G8RC:$rA, (sext_inreg G8RC:$rS, i32))]>, isPPC64; -/// EXTSW_32 - Just like EXTSW, but works on '32-bit' registers. -def EXTSW_32 : XForm_11<31, 986, (outs GPRC:$rA), (ins GPRC:$rS), - "extsw $rA, $rS", IntSimple, - [(set GPRC:$rA, (PPCextsw_32 GPRC:$rS))]>, isPPC64; -def EXTSW_32_64 : XForm_11<31, 986, (outs G8RC:$rA), (ins GPRC:$rS), - "extsw $rA, $rS", IntSimple, - [(set G8RC:$rA, (sext GPRC:$rS))]>, isPPC64; -let Defs = [CARRY] in { -def SRADI : XSForm_1<31, 413, (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH), - "sradi $rA, $rS, $SH", IntRotateDI, - [(set G8RC:$rA, (sra G8RC:$rS, (i32 imm:$SH)))]>, isPPC64; -} -def CNTLZD : XForm_11<31, 58, (outs G8RC:$rA), (ins G8RC:$rS), - "cntlzd $rA, $rS", IntGeneral, - [(set G8RC:$rA, (ctlz G8RC:$rS))]>; - -def DIVD : XOForm_1<31, 489, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "divd $rT, $rA, $rB", IntDivD, - [(set G8RC:$rT, (sdiv G8RC:$rA, G8RC:$rB))]>, isPPC64, - PPC970_DGroup_First, PPC970_DGroup_Cracked; -def DIVDU : XOForm_1<31, 457, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "divdu $rT, $rA, $rB", IntDivD, - [(set G8RC:$rT, (udiv G8RC:$rA, G8RC:$rB))]>, isPPC64, - PPC970_DGroup_First, PPC970_DGroup_Cracked; -def MULLD : XOForm_1<31, 233, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), - "mulld $rT, $rA, $rB", IntMulHD, - [(set G8RC:$rT, (mul G8RC:$rA, G8RC:$rB))]>, isPPC64; +defm MULHD : XOForm_1r<31, 73, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "mulhd", "$rT, $rA, $rB", IntMulHW, + [(set i64:$rT, (mulhs i64:$rA, i64:$rB))]>; +defm MULHDU : XOForm_1r<31, 9, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "mulhdu", "$rT, $rA, $rB", IntMulHWU, + [(set i64:$rT, (mulhu i64:$rA, i64:$rB))]>; +} +} // Interpretation64Bit + +let isCompare = 1, neverHasSideEffects = 1 in { + def CMPD : XForm_16_ext<31, 0, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB), + "cmpd $crD, $rA, $rB", IntCompare>, isPPC64; + def CMPLD : XForm_16_ext<31, 32, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB), + "cmpld $crD, $rA, $rB", IntCompare>, isPPC64; + def CMPDI : DForm_5_ext<11, (outs crrc:$crD), (ins g8rc:$rA, s16imm:$imm), + "cmpdi $crD, $rA, $imm", IntCompare>, isPPC64; + def CMPLDI : DForm_6_ext<10, (outs crrc:$dst), (ins g8rc:$src1, u16imm:$src2), + "cmpldi $dst, $src1, $src2", IntCompare>, isPPC64; +} +let neverHasSideEffects = 1 in { +defm SLD : XForm_6r<31, 27, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB), + "sld", "$rA, $rS, $rB", IntRotateD, + [(set i64:$rA, (PPCshl i64:$rS, i32:$rB))]>, isPPC64; +defm SRD : XForm_6r<31, 539, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB), + "srd", "$rA, $rS, $rB", IntRotateD, + [(set i64:$rA, (PPCsrl i64:$rS, i32:$rB))]>, isPPC64; +defm SRAD : XForm_6rc<31, 794, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB), + "srad", "$rA, $rS, $rB", IntRotateD, + [(set i64:$rA, (PPCsra i64:$rS, i32:$rB))]>, isPPC64; + +let Interpretation64Bit = 1 in { +defm EXTSB8 : XForm_11r<31, 954, (outs g8rc:$rA), (ins g8rc:$rS), + "extsb", "$rA, $rS", IntSimple, + [(set i64:$rA, (sext_inreg i64:$rS, i8))]>; +defm EXTSH8 : XForm_11r<31, 922, (outs g8rc:$rA), (ins g8rc:$rS), + "extsh", "$rA, $rS", IntSimple, + [(set i64:$rA, (sext_inreg i64:$rS, i16))]>; +} // Interpretation64Bit + +defm EXTSW : XForm_11r<31, 986, (outs g8rc:$rA), (ins g8rc:$rS), + "extsw", "$rA, $rS", IntSimple, + [(set i64:$rA, (sext_inreg i64:$rS, i32))]>, isPPC64; +let Interpretation64Bit = 1 in +defm EXTSW_32_64 : XForm_11r<31, 986, (outs g8rc:$rA), (ins gprc:$rS), + "extsw", "$rA, $rS", IntSimple, + [(set i64:$rA, (sext i32:$rS))]>, isPPC64; + +defm SRADI : XSForm_1rc<31, 413, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH), + "sradi", "$rA, $rS, $SH", IntRotateDI, + [(set i64:$rA, (sra i64:$rS, (i32 imm:$SH)))]>, isPPC64; +defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS), + "cntlzd", "$rA, $rS", IntGeneral, + [(set i64:$rA, (ctlz i64:$rS))]>; +defm POPCNTD : XForm_11r<31, 506, (outs g8rc:$rA), (ins g8rc:$rS), + "popcntd", "$rA, $rS", IntGeneral, + [(set i64:$rA, (ctpop i64:$rS))]>; + +// popcntw also does a population count on the high 32 bits (storing the +// results in the high 32-bits of the output). We'll ignore that here (which is +// safe because we never separately use the high part of the 64-bit registers). +defm POPCNTW : XForm_11r<31, 378, (outs gprc:$rA), (ins gprc:$rS), + "popcntw", "$rA, $rS", IntGeneral, + [(set i32:$rA, (ctpop i32:$rS))]>; + +defm DIVD : XOForm_1r<31, 489, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "divd", "$rT, $rA, $rB", IntDivD, + [(set i64:$rT, (sdiv i64:$rA, i64:$rB))]>, isPPC64, + PPC970_DGroup_First, PPC970_DGroup_Cracked; +defm DIVDU : XOForm_1r<31, 457, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "divdu", "$rT, $rA, $rB", IntDivD, + [(set i64:$rT, (udiv i64:$rA, i64:$rB))]>, isPPC64, + PPC970_DGroup_First, PPC970_DGroup_Cracked; +defm MULLD : XOForm_1r<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB), + "mulld", "$rT, $rA, $rB", IntMulHD, + [(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64; +} + +let neverHasSideEffects = 1 in { let isCommutable = 1 in { -def RLDIMI : MDForm_1<30, 3, - (outs G8RC:$rA), (ins G8RC:$rSi, G8RC:$rS, u6imm:$SH, u6imm:$MB), - "rldimi $rA, $rS, $SH, $MB", IntRotateDI, - []>, isPPC64, RegConstraint<"$rSi = $rA">, - NoEncode<"$rSi">; +defm RLDIMI : MDForm_1r<30, 3, (outs g8rc:$rA), + (ins g8rc:$rSi, g8rc:$rS, u6imm:$SH, u6imm:$MBE), + "rldimi", "$rA, $rS, $SH, $MBE", IntRotateDI, + []>, isPPC64, RegConstraint<"$rSi = $rA">, + NoEncode<"$rSi">; } // Rotate instructions. -def RLDCL : MDForm_1<30, 0, - (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB, u6imm:$MBE), - "rldcl $rA, $rS, $rB, $MBE", IntRotateD, - []>, isPPC64; -def RLDICL : MDForm_1<30, 0, - (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE), - "rldicl $rA, $rS, $SH, $MBE", IntRotateDI, - []>, isPPC64; -def RLDICR : MDForm_1<30, 1, - (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE), - "rldicr $rA, $rS, $SH, $MBE", IntRotateDI, - []>, isPPC64; - -def RLWINM8 : MForm_2<21, - (outs G8RC:$rA), (ins G8RC:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), - "rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral, - []>; - +defm RLDCL : MDSForm_1r<30, 8, + (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE), + "rldcl", "$rA, $rS, $rB, $MBE", IntRotateD, + []>, isPPC64; +defm RLDICL : MDForm_1r<30, 0, + (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE), + "rldicl", "$rA, $rS, $SH, $MBE", IntRotateDI, + []>, isPPC64; +defm RLDICR : MDForm_1r<30, 1, + (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE), + "rldicr", "$rA, $rS, $SH, $MBE", IntRotateDI, + []>, isPPC64; + +let Interpretation64Bit = 1 in { +defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA), + (ins g8rc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), + "rlwinm", "$rA, $rS, $SH, $MB, $ME", IntGeneral, + []>; + +let isSelect = 1 in def ISEL8 : AForm_4<31, 15, - (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB, pred:$cond), + (outs g8rc:$rT), (ins g8rc_nox0:$rA, g8rc:$rB, crbitrc:$cond), "isel $rT, $rA, $rB, $cond", IntGeneral, []>; +} // Interpretation64Bit +} // neverHasSideEffects = 1 } // End FXU Operations. @@ -549,157 +569,161 @@ def ISEL8 : AForm_4<31, 15, // Sign extending loads. let canFoldAsLoad = 1, PPC970_Unit = 2 in { -def LHA8: DForm_1<42, (outs G8RC:$rD), (ins memri:$src), +let Interpretation64Bit = 1 in +def LHA8: DForm_1<42, (outs g8rc:$rD), (ins memri:$src), "lha $rD, $src", LdStLHA, - [(set G8RC:$rD, (sextloadi16 iaddr:$src))]>, + [(set i64:$rD, (sextloadi16 iaddr:$src))]>, PPC970_DGroup_Cracked; -def LWA : DSForm_1<58, 2, (outs G8RC:$rD), (ins memrix:$src), +def LWA : DSForm_1<58, 2, (outs g8rc:$rD), (ins memrix:$src), "lwa $rD, $src", LdStLWA, - [(set G8RC:$rD, + [(set i64:$rD, (aligned4sextloadi32 ixaddr:$src))]>, isPPC64, PPC970_DGroup_Cracked; -def LHAX8: XForm_1<31, 343, (outs G8RC:$rD), (ins memrr:$src), +let Interpretation64Bit = 1 in +def LHAX8: XForm_1<31, 343, (outs g8rc:$rD), (ins memrr:$src), "lhax $rD, $src", LdStLHA, - [(set G8RC:$rD, (sextloadi16 xaddr:$src))]>, + [(set i64:$rD, (sextloadi16 xaddr:$src))]>, PPC970_DGroup_Cracked; -def LWAX : XForm_1<31, 341, (outs G8RC:$rD), (ins memrr:$src), +def LWAX : XForm_1<31, 341, (outs g8rc:$rD), (ins memrr:$src), "lwax $rD, $src", LdStLHA, - [(set G8RC:$rD, (sextloadi32 xaddr:$src))]>, isPPC64, + [(set i64:$rD, (sextloadi32 xaddr:$src))]>, isPPC64, PPC970_DGroup_Cracked; // Update forms. -let mayLoad = 1 in -def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp, - ptr_rc:$rA), - "lhau $rD, $disp($rA)", LdStLHAU, - []>, RegConstraint<"$rA = $ea_result">, +let mayLoad = 1, neverHasSideEffects = 1 in { +let Interpretation64Bit = 1 in +def LHAU8 : DForm_1<43, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), + (ins memri:$addr), + "lhau $rD, $addr", LdStLHAU, + []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; // NO LWAU! -def LHAUX8 : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result), +let Interpretation64Bit = 1 in +def LHAUX8 : XForm_1<31, 375, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lhaux $rD, $addr", LdStLHAU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LWAUX : XForm_1<31, 373, (outs G8RC:$rD, ptr_rc:$ea_result), +def LWAUX : XForm_1<31, 373, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lwaux $rD, $addr", LdStLHAU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">, isPPC64; } +} +let Interpretation64Bit = 1 in { // Zero extending loads. let canFoldAsLoad = 1, PPC970_Unit = 2 in { -def LBZ8 : DForm_1<34, (outs G8RC:$rD), (ins memri:$src), +def LBZ8 : DForm_1<34, (outs g8rc:$rD), (ins memri:$src), "lbz $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi8 iaddr:$src))]>; -def LHZ8 : DForm_1<40, (outs G8RC:$rD), (ins memri:$src), + [(set i64:$rD, (zextloadi8 iaddr:$src))]>; +def LHZ8 : DForm_1<40, (outs g8rc:$rD), (ins memri:$src), "lhz $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi16 iaddr:$src))]>; -def LWZ8 : DForm_1<32, (outs G8RC:$rD), (ins memri:$src), + [(set i64:$rD, (zextloadi16 iaddr:$src))]>; +def LWZ8 : DForm_1<32, (outs g8rc:$rD), (ins memri:$src), "lwz $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi32 iaddr:$src))]>, isPPC64; + [(set i64:$rD, (zextloadi32 iaddr:$src))]>, isPPC64; -def LBZX8 : XForm_1<31, 87, (outs G8RC:$rD), (ins memrr:$src), +def LBZX8 : XForm_1<31, 87, (outs g8rc:$rD), (ins memrr:$src), "lbzx $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi8 xaddr:$src))]>; -def LHZX8 : XForm_1<31, 279, (outs G8RC:$rD), (ins memrr:$src), + [(set i64:$rD, (zextloadi8 xaddr:$src))]>; +def LHZX8 : XForm_1<31, 279, (outs g8rc:$rD), (ins memrr:$src), "lhzx $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi16 xaddr:$src))]>; -def LWZX8 : XForm_1<31, 23, (outs G8RC:$rD), (ins memrr:$src), + [(set i64:$rD, (zextloadi16 xaddr:$src))]>; +def LWZX8 : XForm_1<31, 23, (outs g8rc:$rD), (ins memrr:$src), "lwzx $rD, $src", LdStLoad, - [(set G8RC:$rD, (zextloadi32 xaddr:$src))]>; + [(set i64:$rD, (zextloadi32 xaddr:$src))]>; // Update forms. -let mayLoad = 1 in { -def LBZU8 : DForm_1<35, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +let mayLoad = 1, neverHasSideEffects = 1 in { +def LBZU8 : DForm_1<35, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lbzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LHZU8 : DForm_1<41, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LHZU8 : DForm_1<41, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lhzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LWZU8 : DForm_1<33, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lwzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LBZUX8 : XForm_1<31, 119, (outs G8RC:$rD, ptr_rc:$ea_result), +def LBZUX8 : XForm_1<31, 119, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lbzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LHZUX8 : XForm_1<31, 311, (outs G8RC:$rD, ptr_rc:$ea_result), +def LHZUX8 : XForm_1<31, 311, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lhzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LWZUX8 : XForm_1<31, 55, (outs G8RC:$rD, ptr_rc:$ea_result), +def LWZUX8 : XForm_1<31, 55, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lwzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; } } +} // Interpretation64Bit // Full 8-byte loads. let canFoldAsLoad = 1, PPC970_Unit = 2 in { -def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src), +def LD : DSForm_1<58, 0, (outs g8rc:$rD), (ins memrix:$src), "ld $rD, $src", LdStLD, - [(set G8RC:$rD, (aligned4load ixaddr:$src))]>, isPPC64; -def LDrs : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrs:$src), - "ld $rD, $src", LdStLD, - []>, isPPC64; + [(set i64:$rD, (aligned4load ixaddr:$src))]>, isPPC64; // The following three definitions are selected for small code model only. // Otherwise, we need to create two instructions to form a 32-bit offset, // so we have a custom matcher for TOC_ENTRY in PPCDAGToDAGIsel::Select(). -def LDtoc: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), +def LDtoc: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg), "#LDtoc", - [(set G8RC:$rD, - (PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64; -def LDtocJTI: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), + [(set i64:$rD, + (PPCtoc_entry tglobaladdr:$disp, i64:$reg))]>, isPPC64; +def LDtocJTI: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg), "#LDtocJTI", - [(set G8RC:$rD, - (PPCtoc_entry tjumptable:$disp, G8RC:$reg))]>, isPPC64; -def LDtocCPT: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), + [(set i64:$rD, + (PPCtoc_entry tjumptable:$disp, i64:$reg))]>, isPPC64; +def LDtocCPT: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg), "#LDtocCPT", - [(set G8RC:$rD, - (PPCtoc_entry tconstpool:$disp, G8RC:$reg))]>, isPPC64; + [(set i64:$rD, + (PPCtoc_entry tconstpool:$disp, i64:$reg))]>, isPPC64; -let hasSideEffects = 1 in { +let hasSideEffects = 1, isCodeGenOnly = 1 in { let RST = 2, DS = 2 in -def LDinto_toc: DSForm_1a<58, 0, (outs), (ins G8RC:$reg), +def LDinto_toc: DSForm_1a<58, 0, (outs), (ins g8rc:$reg), "ld 2, 8($reg)", LdStLD, - [(PPCload_toc G8RC:$reg)]>, isPPC64; + [(PPCload_toc i64:$reg)]>, isPPC64; let RST = 2, DS = 10, RA = 1 in def LDtoc_restore : DSForm_1a<58, 0, (outs), (ins), "ld 2, 40(1)", LdStLD, [(PPCtoc_restore)]>, isPPC64; } -def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src), +def LDX : XForm_1<31, 21, (outs g8rc:$rD), (ins memrr:$src), "ldx $rD, $src", LdStLD, - [(set G8RC:$rD, (load xaddr:$src))]>, isPPC64; -let isCodeGenOnly = 1 in -def LDXu : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src), - "ldx $rD, $src", LdStLD, - [(set G8RC:$rD, (load xaddr:$src))]>, isPPC64; - -let mayLoad = 1 in -def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr), + [(set i64:$rD, (load xaddr:$src))]>, isPPC64; +def LDBRX : XForm_1<31, 532, (outs g8rc:$rD), (ins memrr:$src), + "ldbrx $rD, $src", LdStLoad, + [(set i64:$rD, (PPClbrx xoaddr:$src, i64))]>, isPPC64; + +let mayLoad = 1, neverHasSideEffects = 1 in { +def LDU : DSForm_1<58, 1, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrix:$addr), "ldu $rD, $addr", LdStLDU, []>, RegConstraint<"$addr.reg = $ea_result">, isPPC64, NoEncode<"$ea_result">; -def LDUX : XForm_1<31, 53, (outs G8RC:$rD, ptr_rc:$ea_result), +def LDUX : XForm_1<31, 53, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "ldux $rD, $addr", LdStLDU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">, isPPC64; } +} def : Pat<(PPCload ixaddr:$src), (LD ixaddr:$src)>; @@ -707,189 +731,173 @@ def : Pat<(PPCload xaddr:$src), (LDX xaddr:$src)>; // Support for medium and large code model. -def ADDIStocHA: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, tocentry:$disp), +def ADDIStocHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp), "#ADDIStocHA", - [(set G8RC:$rD, - (PPCaddisTocHA G8RC:$reg, tglobaladdr:$disp))]>, + [(set i64:$rD, + (PPCaddisTocHA i64:$reg, tglobaladdr:$disp))]>, isPPC64; -def LDtocL: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), +def LDtocL: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc_nox0:$reg), "#LDtocL", - [(set G8RC:$rD, - (PPCldTocL tglobaladdr:$disp, G8RC:$reg))]>, isPPC64; -def ADDItocL: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, tocentry:$disp), + [(set i64:$rD, + (PPCldTocL tglobaladdr:$disp, i64:$reg))]>, isPPC64; +def ADDItocL: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, tocentry:$disp), "#ADDItocL", - [(set G8RC:$rD, - (PPCaddiTocL G8RC:$reg, tglobaladdr:$disp))]>, isPPC64; + [(set i64:$rD, + (PPCaddiTocL i64:$reg, tglobaladdr:$disp))]>, isPPC64; // Support for thread-local storage. -def ADDISgotTprelHA: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolHi64:$disp), +def ADDISgotTprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp), "#ADDISgotTprelHA", - [(set G8RC:$rD, - (PPCaddisGotTprelHA G8RC:$reg, + [(set i64:$rD, + (PPCaddisGotTprelHA i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def LDgotTprelL: Pseudo<(outs G8RC:$rD), (ins symbolLo64:$disp, G8RC:$reg), +def LDgotTprelL: Pseudo<(outs g8rc:$rD), (ins symbolLo64:$disp, g8rc_nox0:$reg), "#LDgotTprelL", - [(set G8RC:$rD, - (PPCldGotTprelL tglobaltlsaddr:$disp, G8RC:$reg))]>, + [(set i64:$rD, + (PPCldGotTprelL tglobaltlsaddr:$disp, i64:$reg))]>, isPPC64; -def : Pat<(PPCaddTls G8RC:$in, tglobaltlsaddr:$g), - (ADD8TLS G8RC:$in, tglobaltlsaddr:$g)>; -def ADDIStlsgdHA: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolHi64:$disp), +def : Pat<(PPCaddTls i64:$in, tglobaltlsaddr:$g), + (ADD8TLS $in, tglobaltlsaddr:$g)>; +def ADDIStlsgdHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp), "#ADDIStlsgdHA", - [(set G8RC:$rD, - (PPCaddisTlsgdHA G8RC:$reg, tglobaltlsaddr:$disp))]>, + [(set i64:$rD, + (PPCaddisTlsgdHA i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def ADDItlsgdL : Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolLo64:$disp), +def ADDItlsgdL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp), "#ADDItlsgdL", - [(set G8RC:$rD, - (PPCaddiTlsgdL G8RC:$reg, tglobaltlsaddr:$disp))]>, + [(set i64:$rD, + (PPCaddiTlsgdL i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def GETtlsADDR : Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, tlsgd:$sym), +def GETtlsADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym), "#GETtlsADDR", - [(set G8RC:$rD, - (PPCgetTlsAddr G8RC:$reg, tglobaltlsaddr:$sym))]>, + [(set i64:$rD, + (PPCgetTlsAddr i64:$reg, tglobaltlsaddr:$sym))]>, isPPC64; -def ADDIStlsldHA: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolHi64:$disp), +def ADDIStlsldHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp), "#ADDIStlsldHA", - [(set G8RC:$rD, - (PPCaddisTlsldHA G8RC:$reg, tglobaltlsaddr:$disp))]>, + [(set i64:$rD, + (PPCaddisTlsldHA i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def ADDItlsldL : Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolLo64:$disp), +def ADDItlsldL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp), "#ADDItlsldL", - [(set G8RC:$rD, - (PPCaddiTlsldL G8RC:$reg, tglobaltlsaddr:$disp))]>, + [(set i64:$rD, + (PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def GETtlsldADDR : Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, tlsgd:$sym), +def GETtlsldADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym), "#GETtlsldADDR", - [(set G8RC:$rD, - (PPCgetTlsldAddr G8RC:$reg, tglobaltlsaddr:$sym))]>, + [(set i64:$rD, + (PPCgetTlsldAddr i64:$reg, tglobaltlsaddr:$sym))]>, isPPC64; -def ADDISdtprelHA: Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolHi64:$disp), +def ADDISdtprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolHi64:$disp), "#ADDISdtprelHA", - [(set G8RC:$rD, - (PPCaddisDtprelHA G8RC:$reg, + [(set i64:$rD, + (PPCaddisDtprelHA i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; -def ADDIdtprelL : Pseudo<(outs G8RC:$rD), (ins G8RC:$reg, symbolLo64:$disp), +def ADDIdtprelL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, symbolLo64:$disp), "#ADDIdtprelL", - [(set G8RC:$rD, - (PPCaddiDtprelL G8RC:$reg, tglobaltlsaddr:$disp))]>, + [(set i64:$rD, + (PPCaddiDtprelL i64:$reg, tglobaltlsaddr:$disp))]>, isPPC64; let PPC970_Unit = 2 in { +let Interpretation64Bit = 1 in { // Truncating stores. -def STB8 : DForm_1<38, (outs), (ins G8RC:$rS, memri:$src), +def STB8 : DForm_1<38, (outs), (ins g8rc:$rS, memri:$src), "stb $rS, $src", LdStStore, - [(truncstorei8 G8RC:$rS, iaddr:$src)]>; -def STH8 : DForm_1<44, (outs), (ins G8RC:$rS, memri:$src), + [(truncstorei8 i64:$rS, iaddr:$src)]>; +def STH8 : DForm_1<44, (outs), (ins g8rc:$rS, memri:$src), "sth $rS, $src", LdStStore, - [(truncstorei16 G8RC:$rS, iaddr:$src)]>; -def STW8 : DForm_1<36, (outs), (ins G8RC:$rS, memri:$src), + [(truncstorei16 i64:$rS, iaddr:$src)]>; +def STW8 : DForm_1<36, (outs), (ins g8rc:$rS, memri:$src), "stw $rS, $src", LdStStore, - [(truncstorei32 G8RC:$rS, iaddr:$src)]>; -def STBX8 : XForm_8<31, 215, (outs), (ins G8RC:$rS, memrr:$dst), + [(truncstorei32 i64:$rS, iaddr:$src)]>; +def STBX8 : XForm_8<31, 215, (outs), (ins g8rc:$rS, memrr:$dst), "stbx $rS, $dst", LdStStore, - [(truncstorei8 G8RC:$rS, xaddr:$dst)]>, + [(truncstorei8 i64:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; -def STHX8 : XForm_8<31, 407, (outs), (ins G8RC:$rS, memrr:$dst), +def STHX8 : XForm_8<31, 407, (outs), (ins g8rc:$rS, memrr:$dst), "sthx $rS, $dst", LdStStore, - [(truncstorei16 G8RC:$rS, xaddr:$dst)]>, + [(truncstorei16 i64:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; -def STWX8 : XForm_8<31, 151, (outs), (ins G8RC:$rS, memrr:$dst), +def STWX8 : XForm_8<31, 151, (outs), (ins g8rc:$rS, memrr:$dst), "stwx $rS, $dst", LdStStore, - [(truncstorei32 G8RC:$rS, xaddr:$dst)]>, + [(truncstorei32 i64:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; +} // Interpretation64Bit + // Normal 8-byte stores. -def STD : DSForm_1<62, 0, (outs), (ins G8RC:$rS, memrix:$dst), +def STD : DSForm_1<62, 0, (outs), (ins g8rc:$rS, memrix:$dst), "std $rS, $dst", LdStSTD, - [(aligned4store G8RC:$rS, ixaddr:$dst)]>, isPPC64; -def STDX : XForm_8<31, 149, (outs), (ins G8RC:$rS, memrr:$dst), + [(aligned4store i64:$rS, ixaddr:$dst)]>, isPPC64; +def STDX : XForm_8<31, 149, (outs), (ins g8rc:$rS, memrr:$dst), "stdx $rS, $dst", LdStSTD, - [(store G8RC:$rS, xaddr:$dst)]>, isPPC64, + [(store i64:$rS, xaddr:$dst)]>, isPPC64, + PPC970_DGroup_Cracked; +def STDBRX: XForm_8<31, 660, (outs), (ins g8rc:$rS, memrr:$dst), + "stdbrx $rS, $dst", LdStStore, + [(PPCstbrx i64:$rS, xoaddr:$dst, i64)]>, isPPC64, PPC970_DGroup_Cracked; } -let PPC970_Unit = 2 in { +// Stores with Update (pre-inc). +let PPC970_Unit = 2, mayStore = 1 in { +let Interpretation64Bit = 1 in { +def STBU8 : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst), + "stbu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STHU8 : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst), + "sthu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STWU8 : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst), + "stwu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STDU : DSForm_1<62, 1, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrix:$dst), + "stdu $rS, $dst", LdStSTDU, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">, + isPPC64; -def STBU8 : DForm_1a<39, (outs ptr_rc:$ea_res), (ins G8RC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; -def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti16 G8RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; - -def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti32 G8RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; - -def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS, - s16immX4:$ptroff, ptr_rc:$ptrreg), - "stdu $rS, $ptroff($ptrreg)", LdStSTDU, - [(set ptr_rc:$ea_res, - (aligned4pre_store G8RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">, - isPPC64; - - -def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res), - (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti8 G8RC:$rS, - ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, +def STBUX8: XForm_8<31, 247, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst), + "stbux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, PPC970_DGroup_Cracked; - -def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res), - (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti16 G8RC:$rS, - ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, +def STHUX8: XForm_8<31, 439, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst), + "sthux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, PPC970_DGroup_Cracked; - -def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res), - (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti32 G8RC:$rS, - ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, +def STWUX8: XForm_8<31, 183, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst), + "stwux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, PPC970_DGroup_Cracked; +} // Interpretation64Bit -def STDUX : XForm_8<31, 181, (outs ptr_rc:$ea_res), - (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stdux $rS, $ptroff, $ptrreg", LdStSTDU, - [(set ptr_rc:$ea_res, - (pre_store G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, +def STDUX : XForm_8<31, 181, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst), + "stdux $rS, $dst", LdStSTDU, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, PPC970_DGroup_Cracked, isPPC64; - -// STD_32/STDX_32 - Just like STD/STDX, but uses a '32-bit' input register. -def STD_32 : DSForm_1<62, 0, (outs), (ins GPRC:$rT, memrix:$dst), - "std $rT, $dst", LdStSTD, - [(PPCstd_32 GPRC:$rT, ixaddr:$dst)]>, isPPC64; -def STDX_32 : XForm_8<31, 149, (outs), (ins GPRC:$rT, memrr:$dst), - "stdx $rT, $dst", LdStSTD, - [(PPCstd_32 GPRC:$rT, xaddr:$dst)]>, isPPC64, - PPC970_DGroup_Cracked; } +// Patterns to match the pre-inc stores. We can't put the patterns on +// the instruction definitions directly as ISel wants the address base +// and offset to be separate operands, not a single complex operand. +def : Pat<(pre_truncsti8 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STBU8 $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_truncsti16 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STHU8 $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_truncsti32 i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STWU8 $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(aligned4pre_store i64:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STDU $rS, iaddroff:$ptroff, $ptrreg)>; + +def : Pat<(pre_truncsti8 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STBUX8 $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_truncsti16 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STHUX8 $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_truncsti32 i64:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STWUX8 $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_store i64:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STDUX $rS, $ptrreg, $ptroff)>; //===----------------------------------------------------------------------===// @@ -897,13 +905,30 @@ def STDX_32 : XForm_8<31, 149, (outs), (ins GPRC:$rT, memrr:$dst), // -let PPC970_Unit = 3, Uses = [RM] in { // FPU Operations. -def FCFID : XForm_26<63, 846, (outs F8RC:$frD), (ins F8RC:$frB), - "fcfid $frD, $frB", FPGeneral, - [(set F8RC:$frD, (PPCfcfid F8RC:$frB))]>, isPPC64; -def FCTIDZ : XForm_26<63, 815, (outs F8RC:$frD), (ins F8RC:$frB), - "fctidz $frD, $frB", FPGeneral, - [(set F8RC:$frD, (PPCfctidz F8RC:$frB))]>, isPPC64; +let PPC970_Unit = 3, neverHasSideEffects = 1, + Uses = [RM] in { // FPU Operations. +defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB), + "fcfid", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfcfid f64:$frB))]>, isPPC64; +defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB), + "fctidz", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfctidz f64:$frB))]>, isPPC64; + +defm FCFIDU : XForm_26r<63, 974, (outs f8rc:$frD), (ins f8rc:$frB), + "fcfidu", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfcfidu f64:$frB))]>, isPPC64; +defm FCFIDS : XForm_26r<59, 846, (outs f4rc:$frD), (ins f8rc:$frB), + "fcfids", "$frD, $frB", FPGeneral, + [(set f32:$frD, (PPCfcfids f64:$frB))]>, isPPC64; +defm FCFIDUS : XForm_26r<59, 974, (outs f4rc:$frD), (ins f8rc:$frB), + "fcfidus", "$frD, $frB", FPGeneral, + [(set f32:$frD, (PPCfcfidus f64:$frB))]>, isPPC64; +defm FCTIDUZ : XForm_26r<63, 943, (outs f8rc:$frD), (ins f8rc:$frB), + "fctiduz", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfctiduz f64:$frB))]>, isPPC64; +defm FCTIWUZ : XForm_26r<63, 143, (outs f8rc:$frD), (ins f8rc:$frB), + "fctiwuz", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfctiwuz f64:$frB))]>, isPPC64; } @@ -912,13 +937,13 @@ def FCTIDZ : XForm_26<63, 815, (outs F8RC:$frD), (ins F8RC:$frB), // // Extensions and truncates to/from 32-bit regs. -def : Pat<(i64 (zext GPRC:$in)), - (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPRC:$in, sub_32), +def : Pat<(i64 (zext i32:$in)), + (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32), 0, 32)>; -def : Pat<(i64 (anyext GPRC:$in)), - (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPRC:$in, sub_32)>; -def : Pat<(i32 (trunc G8RC:$in)), - (EXTRACT_SUBREG G8RC:$in, sub_32)>; +def : Pat<(i64 (anyext i32:$in)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32)>; +def : Pat<(i32 (trunc i64:$in)), + (EXTRACT_SUBREG $in, sub_32)>; // Extending loads with i64 targets. def : Pat<(zextloadi1 iaddr:$src), @@ -945,24 +970,24 @@ def : Pat<(extloadi32 xaddr:$src), // Standard shifts. These are represented separately from the real shifts above // so that we can distinguish between shifts that allow 6-bit and 7-bit shift // amounts. -def : Pat<(sra G8RC:$rS, GPRC:$rB), - (SRAD G8RC:$rS, GPRC:$rB)>; -def : Pat<(srl G8RC:$rS, GPRC:$rB), - (SRD G8RC:$rS, GPRC:$rB)>; -def : Pat<(shl G8RC:$rS, GPRC:$rB), - (SLD G8RC:$rS, GPRC:$rB)>; +def : Pat<(sra i64:$rS, i32:$rB), + (SRAD $rS, $rB)>; +def : Pat<(srl i64:$rS, i32:$rB), + (SRD $rS, $rB)>; +def : Pat<(shl i64:$rS, i32:$rB), + (SLD $rS, $rB)>; // SHL/SRL -def : Pat<(shl G8RC:$in, (i32 imm:$imm)), - (RLDICR G8RC:$in, imm:$imm, (SHL64 imm:$imm))>; -def : Pat<(srl G8RC:$in, (i32 imm:$imm)), - (RLDICL G8RC:$in, (SRL64 imm:$imm), imm:$imm)>; +def : Pat<(shl i64:$in, (i32 imm:$imm)), + (RLDICR $in, imm:$imm, (SHL64 imm:$imm))>; +def : Pat<(srl i64:$in, (i32 imm:$imm)), + (RLDICL $in, (SRL64 imm:$imm), imm:$imm)>; // ROTL -def : Pat<(rotl G8RC:$in, GPRC:$sh), - (RLDCL G8RC:$in, GPRC:$sh, 0)>; -def : Pat<(rotl G8RC:$in, (i32 imm:$imm)), - (RLDICL G8RC:$in, imm:$imm, 0)>; +def : Pat<(rotl i64:$in, i32:$sh), + (RLDCL $in, $sh, 0)>; +def : Pat<(rotl i64:$in, (i32 imm:$imm)), + (RLDICL $in, imm:$imm, 0)>; // Hi and Lo for Darwin Global Addresses. def : Pat<(PPChi tglobaladdr:$in, 0), (LIS8 tglobaladdr:$in)>; @@ -973,18 +998,18 @@ def : Pat<(PPChi tjumptable:$in , 0), (LIS8 tjumptable:$in)>; def : Pat<(PPClo tjumptable:$in , 0), (LI8 tjumptable:$in)>; def : Pat<(PPChi tblockaddress:$in, 0), (LIS8 tblockaddress:$in)>; def : Pat<(PPClo tblockaddress:$in, 0), (LI8 tblockaddress:$in)>; -def : Pat<(PPChi tglobaltlsaddr:$g, G8RC:$in), - (ADDIS8 G8RC:$in, tglobaltlsaddr:$g)>; -def : Pat<(PPClo tglobaltlsaddr:$g, G8RC:$in), - (ADDI8L G8RC:$in, tglobaltlsaddr:$g)>; -def : Pat<(add G8RC:$in, (PPChi tglobaladdr:$g, 0)), - (ADDIS8 G8RC:$in, tglobaladdr:$g)>; -def : Pat<(add G8RC:$in, (PPChi tconstpool:$g, 0)), - (ADDIS8 G8RC:$in, tconstpool:$g)>; -def : Pat<(add G8RC:$in, (PPChi tjumptable:$g, 0)), - (ADDIS8 G8RC:$in, tjumptable:$g)>; -def : Pat<(add G8RC:$in, (PPChi tblockaddress:$g, 0)), - (ADDIS8 G8RC:$in, tblockaddress:$g)>; +def : Pat<(PPChi tglobaltlsaddr:$g, i64:$in), + (ADDIS8 $in, tglobaltlsaddr:$g)>; +def : Pat<(PPClo tglobaltlsaddr:$g, i64:$in), + (ADDI8 $in, tglobaltlsaddr:$g)>; +def : Pat<(add i64:$in, (PPChi tglobaladdr:$g, 0)), + (ADDIS8 $in, tglobaladdr:$g)>; +def : Pat<(add i64:$in, (PPChi tconstpool:$g, 0)), + (ADDIS8 $in, tconstpool:$g)>; +def : Pat<(add i64:$in, (PPChi tjumptable:$g, 0)), + (ADDIS8 $in, tjumptable:$g)>; +def : Pat<(add i64:$in, (PPChi tblockaddress:$g, 0)), + (ADDIS8 $in, tblockaddress:$g)>; // Patterns to match r+r indexed loads and stores for // addresses without at least 4-byte alignment. @@ -992,6 +1017,6 @@ def : Pat<(i64 (unaligned4sextloadi32 xoaddr:$src)), (LWAX xoaddr:$src)>; def : Pat<(i64 (unaligned4load xoaddr:$src)), (LDX xoaddr:$src)>; -def : Pat<(unaligned4store G8RC:$rS, xoaddr:$dst), - (STDX G8RC:$rS, xoaddr:$dst)>; +def : Pat<(unaligned4store i64:$rS, xoaddr:$dst), + (STDX $rS, xoaddr:$dst)>; diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index 0ed7ff2..cc9cf0a 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -161,23 +161,64 @@ def vecspltisw : PatLeaf<(build_vector), [{ //===----------------------------------------------------------------------===// // Helpers for defining instructions that directly correspond to intrinsics. -// VA1a_Int - A VAForm_1a intrinsic definition. -class VA1a_Int<bits<6> xo, string opc, Intrinsic IntID> - : VAForm_1a<xo, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB, VRRC:$vC), +// VA1a_Int_Ty - A VAForm_1a intrinsic definition of specific type. +class VA1a_Int_Ty<bits<6> xo, string opc, Intrinsic IntID, ValueType Ty> + : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC), !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP, - [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB, VRRC:$vC))]>; + [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB, Ty:$vC))]>; -// VX1_Int - A VXForm_1 intrinsic definition. -class VX1_Int<bits<11> xo, string opc, Intrinsic IntID> - : VXForm_1<xo, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +// VA1a_Int_Ty2 - A VAForm_1a intrinsic definition where the type of the +// inputs doesn't match the type of the output. +class VA1a_Int_Ty2<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy, + ValueType InTy> + : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC), + !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP, + [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB, InTy:$vC))]>; + +// VA1a_Int_Ty3 - A VAForm_1a intrinsic definition where there are two +// input types and an output type. +class VA1a_Int_Ty3<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy, + ValueType In1Ty, ValueType In2Ty> + : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC), + !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP, + [(set OutTy:$vD, + (IntID In1Ty:$vA, In1Ty:$vB, In2Ty:$vC))]>; + +// VX1_Int_Ty - A VXForm_1 intrinsic definition of specific type. +class VX1_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty> + : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), + !strconcat(opc, " $vD, $vA, $vB"), VecFP, + [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB))]>; + +// VX1_Int_Ty2 - A VXForm_1 intrinsic definition where the type of the +// inputs doesn't match the type of the output. +class VX1_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy, + ValueType InTy> + : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), + !strconcat(opc, " $vD, $vA, $vB"), VecFP, + [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB))]>; + +// VX1_Int_Ty3 - A VXForm_1 intrinsic definition where there are two +// input types and an output type. +class VX1_Int_Ty3<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy, + ValueType In1Ty, ValueType In2Ty> + : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), !strconcat(opc, " $vD, $vA, $vB"), VecFP, - [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB))]>; + [(set OutTy:$vD, (IntID In1Ty:$vA, In2Ty:$vB))]>; -// VX2_Int - A VXForm_2 intrinsic definition. -class VX2_Int<bits<11> xo, string opc, Intrinsic IntID> - : VXForm_2<xo, (outs VRRC:$vD), (ins VRRC:$vB), +// VX2_Int_SP - A VXForm_2 intrinsic definition of vector single-precision type. +class VX2_Int_SP<bits<11> xo, string opc, Intrinsic IntID> + : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB), !strconcat(opc, " $vD, $vB"), VecFP, - [(set VRRC:$vD, (IntID VRRC:$vB))]>; + [(set v4f32:$vD, (IntID v4f32:$vB))]>; + +// VX2_Int_Ty2 - A VXForm_2 intrinsic definition where the type of the +// inputs doesn't match the type of the output. +class VX2_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy, + ValueType InTy> + : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB), + !strconcat(opc, " $vD, $vB"), VecFP, + [(set OutTy:$vD, (IntID InTy:$vB))]>; //===----------------------------------------------------------------------===// // Instruction Definitions. @@ -185,6 +226,7 @@ class VX2_Int<bits<11> xo, string opc, Intrinsic IntID> def HasAltivec : Predicate<"PPCSubTarget.hasAltivec()">; let Predicates = [HasAltivec] in { +let isCodeGenOnly = 1 in { def DSS : DSS_Form<822, (outs), (ins u5imm:$ZERO0, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2), "dss $STRM", LdStLoad /*FIXME*/, []>; @@ -192,357 +234,398 @@ def DSSALL : DSS_Form<822, (outs), (ins u5imm:$ONE, u5imm:$ZERO0,u5imm:$ZERO1,u5imm:$ZERO2), "dssall", LdStLoad /*FIXME*/, []>; def DST : DSS_Form<342, (outs), - (ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + (ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB), "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTT : DSS_Form<342, (outs), - (ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + (ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB), "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTST : DSS_Form<374, (outs), - (ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + (ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB), "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTSTT : DSS_Form<374, (outs), - (ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + (ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB), "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DST64 : DSS_Form<342, (outs), - (ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + (ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB), "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTT64 : DSS_Form<342, (outs), - (ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + (ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB), "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTST64 : DSS_Form<374, (outs), - (ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + (ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB), "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; def DSTSTT64 : DSS_Form<374, (outs), - (ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + (ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB), "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>; +} -def MFVSCR : VXForm_4<1540, (outs VRRC:$vD), (ins), +def MFVSCR : VXForm_4<1540, (outs vrrc:$vD), (ins), "mfvscr $vD", LdStStore, - [(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>; -def MTVSCR : VXForm_5<1604, (outs), (ins VRRC:$vB), + [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>; +def MTVSCR : VXForm_5<1604, (outs), (ins vrrc:$vB), "mtvscr $vB", LdStLoad, - [(int_ppc_altivec_mtvscr VRRC:$vB)]>; + [(int_ppc_altivec_mtvscr v4i32:$vB)]>; let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads. -def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src), +def LVEBX: XForm_1<31, 7, (outs vrrc:$vD), (ins memrr:$src), "lvebx $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; -def LVEHX: XForm_1<31, 39, (outs VRRC:$vD), (ins memrr:$src), + [(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; +def LVEHX: XForm_1<31, 39, (outs vrrc:$vD), (ins memrr:$src), "lvehx $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; -def LVEWX: XForm_1<31, 71, (outs VRRC:$vD), (ins memrr:$src), + [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; +def LVEWX: XForm_1<31, 71, (outs vrrc:$vD), (ins memrr:$src), "lvewx $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>; -def LVX : XForm_1<31, 103, (outs VRRC:$vD), (ins memrr:$src), + [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>; +def LVX : XForm_1<31, 103, (outs vrrc:$vD), (ins memrr:$src), "lvx $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>; -def LVXL : XForm_1<31, 359, (outs VRRC:$vD), (ins memrr:$src), + [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>; +def LVXL : XForm_1<31, 359, (outs vrrc:$vD), (ins memrr:$src), "lvxl $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>; + [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>; } -def LVSL : XForm_1<31, 6, (outs VRRC:$vD), (ins memrr:$src), +def LVSL : XForm_1<31, 6, (outs vrrc:$vD), (ins memrr:$src), "lvsl $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>, + [(set v16i8:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>, PPC970_Unit_LSU; -def LVSR : XForm_1<31, 38, (outs VRRC:$vD), (ins memrr:$src), +def LVSR : XForm_1<31, 38, (outs vrrc:$vD), (ins memrr:$src), "lvsr $vD, $src", LdStLoad, - [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>, + [(set v16i8:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>, PPC970_Unit_LSU; let PPC970_Unit = 2 in { // Stores. -def STVEBX: XForm_8<31, 135, (outs), (ins VRRC:$rS, memrr:$dst), +def STVEBX: XForm_8<31, 135, (outs), (ins vrrc:$rS, memrr:$dst), "stvebx $rS, $dst", LdStStore, - [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>; -def STVEHX: XForm_8<31, 167, (outs), (ins VRRC:$rS, memrr:$dst), + [(int_ppc_altivec_stvebx v16i8:$rS, xoaddr:$dst)]>; +def STVEHX: XForm_8<31, 167, (outs), (ins vrrc:$rS, memrr:$dst), "stvehx $rS, $dst", LdStStore, - [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>; -def STVEWX: XForm_8<31, 199, (outs), (ins VRRC:$rS, memrr:$dst), + [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>; +def STVEWX: XForm_8<31, 199, (outs), (ins vrrc:$rS, memrr:$dst), "stvewx $rS, $dst", LdStStore, - [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>; -def STVX : XForm_8<31, 231, (outs), (ins VRRC:$rS, memrr:$dst), + [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>; +def STVX : XForm_8<31, 231, (outs), (ins vrrc:$rS, memrr:$dst), "stvx $rS, $dst", LdStStore, - [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>; -def STVXL : XForm_8<31, 487, (outs), (ins VRRC:$rS, memrr:$dst), + [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>; +def STVXL : XForm_8<31, 487, (outs), (ins vrrc:$rS, memrr:$dst), "stvxl $rS, $dst", LdStStore, - [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>; + [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>; } let PPC970_Unit = 5 in { // VALU Operations. // VA-Form instructions. 3-input AltiVec ops. -def VMADDFP : VAForm_1<46, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB), +def VMADDFP : VAForm_1<46, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB), "vmaddfp $vD, $vA, $vC, $vB", VecFP, - [(set VRRC:$vD, (fma VRRC:$vA, VRRC:$vC, VRRC:$vB))]>; -def VNMSUBFP: VAForm_1<47, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB), + [(set v4f32:$vD, + (fma v4f32:$vA, v4f32:$vC, v4f32:$vB))]>; + +// FIXME: The fma+fneg pattern won't match because fneg is not legal. +def VNMSUBFP: VAForm_1<47, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB), "vnmsubfp $vD, $vA, $vC, $vB", VecFP, - [(set VRRC:$vD, (fneg (fma VRRC:$vA, VRRC:$vC, - (fneg VRRC:$vB))))]>; + [(set v4f32:$vD, (fneg (fma v4f32:$vA, v4f32:$vC, + (fneg v4f32:$vB))))]>; -def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>; -def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>; -def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>; -def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>; -def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>; +def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>; +def VMHRADDSHS : VA1a_Int_Ty<33, "vmhraddshs", int_ppc_altivec_vmhraddshs, + v8i16>; +def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>; + +def VPERM : VA1a_Int_Ty3<43, "vperm", int_ppc_altivec_vperm, + v4i32, v4i32, v16i8>; +def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>; // Shuffles. -def VSLDOI : VAForm_2<44, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB, u5imm:$SH), +def VSLDOI : VAForm_2<44, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u5imm:$SH), "vsldoi $vD, $vA, $vB, $SH", VecFP, - [(set VRRC:$vD, - (vsldoi_shuffle:$SH (v16i8 VRRC:$vA), VRRC:$vB))]>; + [(set v16i8:$vD, + (vsldoi_shuffle:$SH v16i8:$vA, v16i8:$vB))]>; // VX-Form instructions. AltiVec arithmetic ops. -def VADDFP : VXForm_1<10, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VADDFP : VXForm_1<10, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vaddfp $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>; + [(set v4f32:$vD, (fadd v4f32:$vA, v4f32:$vB))]>; -def VADDUBM : VXForm_1<0, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VADDUBM : VXForm_1<0, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vaddubm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VADDUHM : VXForm_1<64, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (add v16i8:$vA, v16i8:$vB))]>; +def VADDUHM : VXForm_1<64, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vadduhm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>; -def VADDUWM : VXForm_1<128, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>; +def VADDUWM : VXForm_1<128, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vadduwm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>; + [(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>; -def VADDCUW : VX1_Int<384, "vaddcuw", int_ppc_altivec_vaddcuw>; -def VADDSBS : VX1_Int<768, "vaddsbs", int_ppc_altivec_vaddsbs>; -def VADDSHS : VX1_Int<832, "vaddshs", int_ppc_altivec_vaddshs>; -def VADDSWS : VX1_Int<896, "vaddsws", int_ppc_altivec_vaddsws>; -def VADDUBS : VX1_Int<512, "vaddubs", int_ppc_altivec_vaddubs>; -def VADDUHS : VX1_Int<576, "vadduhs", int_ppc_altivec_vadduhs>; -def VADDUWS : VX1_Int<640, "vadduws", int_ppc_altivec_vadduws>; +def VADDCUW : VX1_Int_Ty<384, "vaddcuw", int_ppc_altivec_vaddcuw, v4i32>; +def VADDSBS : VX1_Int_Ty<768, "vaddsbs", int_ppc_altivec_vaddsbs, v16i8>; +def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>; +def VADDSWS : VX1_Int_Ty<896, "vaddsws", int_ppc_altivec_vaddsws, v4i32>; +def VADDUBS : VX1_Int_Ty<512, "vaddubs", int_ppc_altivec_vaddubs, v16i8>; +def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>; +def VADDUWS : VX1_Int_Ty<640, "vadduws", int_ppc_altivec_vadduws, v4i32>; -def VAND : VXForm_1<1028, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VAND : VXForm_1<1028, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vand $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VANDC : VXForm_1<1092, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v4i32:$vD, (and v4i32:$vA, v4i32:$vB))]>; +def VANDC : VXForm_1<1092, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vandc $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (and (v4i32 VRRC:$vA), - (vnot_ppc VRRC:$vB)))]>; + [(set v4i32:$vD, (and v4i32:$vA, + (vnot_ppc v4i32:$vB)))]>; -def VCFSX : VXForm_1<842, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), +def VCFSX : VXForm_1<842, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vcfsx $vD, $vB, $UIMM", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>; -def VCFUX : VXForm_1<778, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), + [(set v4f32:$vD, + (int_ppc_altivec_vcfsx v4i32:$vB, imm:$UIMM))]>; +def VCFUX : VXForm_1<778, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vcfux $vD, $vB, $UIMM", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>; -def VCTSXS : VXForm_1<970, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), + [(set v4f32:$vD, + (int_ppc_altivec_vcfux v4i32:$vB, imm:$UIMM))]>; +def VCTSXS : VXForm_1<970, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vctsxs $vD, $vB, $UIMM", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vctsxs VRRC:$vB, imm:$UIMM))]>; -def VCTUXS : VXForm_1<906, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), + [(set v4i32:$vD, + (int_ppc_altivec_vctsxs v4f32:$vB, imm:$UIMM))]>; +def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vctuxs $vD, $vB, $UIMM", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>; + [(set v4i32:$vD, + (int_ppc_altivec_vctuxs v4f32:$vB, imm:$UIMM))]>; // Defines with the UIM field set to 0 for floating-point // to integer (fp_to_sint/fp_to_uint) conversions and integer // to floating-point (sint_to_fp/uint_to_fp) conversions. let VA = 0 in { -def VCFSX_0 : VXForm_1<842, (outs VRRC:$vD), (ins VRRC:$vB), +def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB), "vcfsx $vD, $vB, 0", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vcfsx VRRC:$vB, 0))]>; -def VCTUXS_0 : VXForm_1<906, (outs VRRC:$vD), (ins VRRC:$vB), + [(set v4f32:$vD, + (int_ppc_altivec_vcfsx v4i32:$vB, 0))]>; +def VCTUXS_0 : VXForm_1<906, (outs vrrc:$vD), (ins vrrc:$vB), "vctuxs $vD, $vB, 0", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vctuxs VRRC:$vB, 0))]>; -def VCFUX_0 : VXForm_1<778, (outs VRRC:$vD), (ins VRRC:$vB), + [(set v4i32:$vD, + (int_ppc_altivec_vctuxs v4f32:$vB, 0))]>; +def VCFUX_0 : VXForm_1<778, (outs vrrc:$vD), (ins vrrc:$vB), "vcfux $vD, $vB, 0", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vcfux VRRC:$vB, 0))]>; -def VCTSXS_0 : VXForm_1<970, (outs VRRC:$vD), (ins VRRC:$vB), + [(set v4f32:$vD, + (int_ppc_altivec_vcfux v4i32:$vB, 0))]>; +def VCTSXS_0 : VXForm_1<970, (outs vrrc:$vD), (ins vrrc:$vB), "vctsxs $vD, $vB, 0", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vctsxs VRRC:$vB, 0))]>; + [(set v4i32:$vD, + (int_ppc_altivec_vctsxs v4f32:$vB, 0))]>; } -def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>; -def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>; - -def VAVGSB : VX1_Int<1282, "vavgsb", int_ppc_altivec_vavgsb>; -def VAVGSH : VX1_Int<1346, "vavgsh", int_ppc_altivec_vavgsh>; -def VAVGSW : VX1_Int<1410, "vavgsw", int_ppc_altivec_vavgsw>; -def VAVGUB : VX1_Int<1026, "vavgub", int_ppc_altivec_vavgub>; -def VAVGUH : VX1_Int<1090, "vavguh", int_ppc_altivec_vavguh>; -def VAVGUW : VX1_Int<1154, "vavguw", int_ppc_altivec_vavguw>; - -def VMAXFP : VX1_Int<1034, "vmaxfp", int_ppc_altivec_vmaxfp>; -def VMAXSB : VX1_Int< 258, "vmaxsb", int_ppc_altivec_vmaxsb>; -def VMAXSH : VX1_Int< 322, "vmaxsh", int_ppc_altivec_vmaxsh>; -def VMAXSW : VX1_Int< 386, "vmaxsw", int_ppc_altivec_vmaxsw>; -def VMAXUB : VX1_Int< 2, "vmaxub", int_ppc_altivec_vmaxub>; -def VMAXUH : VX1_Int< 66, "vmaxuh", int_ppc_altivec_vmaxuh>; -def VMAXUW : VX1_Int< 130, "vmaxuw", int_ppc_altivec_vmaxuw>; -def VMINFP : VX1_Int<1098, "vminfp", int_ppc_altivec_vminfp>; -def VMINSB : VX1_Int< 770, "vminsb", int_ppc_altivec_vminsb>; -def VMINSH : VX1_Int< 834, "vminsh", int_ppc_altivec_vminsh>; -def VMINSW : VX1_Int< 898, "vminsw", int_ppc_altivec_vminsw>; -def VMINUB : VX1_Int< 514, "vminub", int_ppc_altivec_vminub>; -def VMINUH : VX1_Int< 578, "vminuh", int_ppc_altivec_vminuh>; -def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>; - -def VMRGHB : VXForm_1< 12, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VEXPTEFP : VX2_Int_SP<394, "vexptefp", int_ppc_altivec_vexptefp>; +def VLOGEFP : VX2_Int_SP<458, "vlogefp", int_ppc_altivec_vlogefp>; + +def VAVGSB : VX1_Int_Ty<1282, "vavgsb", int_ppc_altivec_vavgsb, v16i8>; +def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>; +def VAVGSW : VX1_Int_Ty<1410, "vavgsw", int_ppc_altivec_vavgsw, v4i32>; +def VAVGUB : VX1_Int_Ty<1026, "vavgub", int_ppc_altivec_vavgub, v16i8>; +def VAVGUH : VX1_Int_Ty<1090, "vavguh", int_ppc_altivec_vavguh, v8i16>; +def VAVGUW : VX1_Int_Ty<1154, "vavguw", int_ppc_altivec_vavguw, v4i32>; + +def VMAXFP : VX1_Int_Ty<1034, "vmaxfp", int_ppc_altivec_vmaxfp, v4f32>; +def VMAXSB : VX1_Int_Ty< 258, "vmaxsb", int_ppc_altivec_vmaxsb, v16i8>; +def VMAXSH : VX1_Int_Ty< 322, "vmaxsh", int_ppc_altivec_vmaxsh, v8i16>; +def VMAXSW : VX1_Int_Ty< 386, "vmaxsw", int_ppc_altivec_vmaxsw, v4i32>; +def VMAXUB : VX1_Int_Ty< 2, "vmaxub", int_ppc_altivec_vmaxub, v16i8>; +def VMAXUH : VX1_Int_Ty< 66, "vmaxuh", int_ppc_altivec_vmaxuh, v8i16>; +def VMAXUW : VX1_Int_Ty< 130, "vmaxuw", int_ppc_altivec_vmaxuw, v4i32>; +def VMINFP : VX1_Int_Ty<1098, "vminfp", int_ppc_altivec_vminfp, v4f32>; +def VMINSB : VX1_Int_Ty< 770, "vminsb", int_ppc_altivec_vminsb, v16i8>; +def VMINSH : VX1_Int_Ty< 834, "vminsh", int_ppc_altivec_vminsh, v8i16>; +def VMINSW : VX1_Int_Ty< 898, "vminsw", int_ppc_altivec_vminsw, v4i32>; +def VMINUB : VX1_Int_Ty< 514, "vminub", int_ppc_altivec_vminub, v16i8>; +def VMINUH : VX1_Int_Ty< 578, "vminuh", int_ppc_altivec_vminuh, v8i16>; +def VMINUW : VX1_Int_Ty< 642, "vminuw", int_ppc_altivec_vminuw, v4i32>; + +def VMRGHB : VXForm_1< 12, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrghb $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrghb_shuffle VRRC:$vA, VRRC:$vB))]>; -def VMRGHH : VXForm_1< 76, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (vmrghb_shuffle v16i8:$vA, v16i8:$vB))]>; +def VMRGHH : VXForm_1< 76, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrghh $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrghh_shuffle VRRC:$vA, VRRC:$vB))]>; -def VMRGHW : VXForm_1<140, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (vmrghh_shuffle v16i8:$vA, v16i8:$vB))]>; +def VMRGHW : VXForm_1<140, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrghw $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrghw_shuffle VRRC:$vA, VRRC:$vB))]>; -def VMRGLB : VXForm_1<268, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (vmrghw_shuffle v16i8:$vA, v16i8:$vB))]>; +def VMRGLB : VXForm_1<268, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrglb $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrglb_shuffle VRRC:$vA, VRRC:$vB))]>; -def VMRGLH : VXForm_1<332, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (vmrglb_shuffle v16i8:$vA, v16i8:$vB))]>; +def VMRGLH : VXForm_1<332, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrglh $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrglh_shuffle VRRC:$vA, VRRC:$vB))]>; -def VMRGLW : VXForm_1<396, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (vmrglh_shuffle v16i8:$vA, v16i8:$vB))]>; +def VMRGLW : VXForm_1<396, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vmrglw $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vmrglw_shuffle VRRC:$vA, VRRC:$vB))]>; - -def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>; -def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>; -def VMSUMSHS : VA1a_Int<41, "vmsumshs", int_ppc_altivec_vmsumshs>; -def VMSUMUBM : VA1a_Int<36, "vmsumubm", int_ppc_altivec_vmsumubm>; -def VMSUMUHM : VA1a_Int<38, "vmsumuhm", int_ppc_altivec_vmsumuhm>; -def VMSUMUHS : VA1a_Int<39, "vmsumuhs", int_ppc_altivec_vmsumuhs>; - -def VMULESB : VX1_Int<776, "vmulesb", int_ppc_altivec_vmulesb>; -def VMULESH : VX1_Int<840, "vmulesh", int_ppc_altivec_vmulesh>; -def VMULEUB : VX1_Int<520, "vmuleub", int_ppc_altivec_vmuleub>; -def VMULEUH : VX1_Int<584, "vmuleuh", int_ppc_altivec_vmuleuh>; -def VMULOSB : VX1_Int<264, "vmulosb", int_ppc_altivec_vmulosb>; -def VMULOSH : VX1_Int<328, "vmulosh", int_ppc_altivec_vmulosh>; -def VMULOUB : VX1_Int< 8, "vmuloub", int_ppc_altivec_vmuloub>; -def VMULOUH : VX1_Int< 72, "vmulouh", int_ppc_altivec_vmulouh>; + [(set v16i8:$vD, (vmrglw_shuffle v16i8:$vA, v16i8:$vB))]>; + +def VMSUMMBM : VA1a_Int_Ty3<37, "vmsummbm", int_ppc_altivec_vmsummbm, + v4i32, v16i8, v4i32>; +def VMSUMSHM : VA1a_Int_Ty3<40, "vmsumshm", int_ppc_altivec_vmsumshm, + v4i32, v8i16, v4i32>; +def VMSUMSHS : VA1a_Int_Ty3<41, "vmsumshs", int_ppc_altivec_vmsumshs, + v4i32, v8i16, v4i32>; +def VMSUMUBM : VA1a_Int_Ty3<36, "vmsumubm", int_ppc_altivec_vmsumubm, + v4i32, v16i8, v4i32>; +def VMSUMUHM : VA1a_Int_Ty3<38, "vmsumuhm", int_ppc_altivec_vmsumuhm, + v4i32, v8i16, v4i32>; +def VMSUMUHS : VA1a_Int_Ty3<39, "vmsumuhs", int_ppc_altivec_vmsumuhs, + v4i32, v8i16, v4i32>; + +def VMULESB : VX1_Int_Ty2<776, "vmulesb", int_ppc_altivec_vmulesb, + v8i16, v16i8>; +def VMULESH : VX1_Int_Ty2<840, "vmulesh", int_ppc_altivec_vmulesh, + v4i32, v8i16>; +def VMULEUB : VX1_Int_Ty2<520, "vmuleub", int_ppc_altivec_vmuleub, + v8i16, v16i8>; +def VMULEUH : VX1_Int_Ty2<584, "vmuleuh", int_ppc_altivec_vmuleuh, + v4i32, v8i16>; +def VMULOSB : VX1_Int_Ty2<264, "vmulosb", int_ppc_altivec_vmulosb, + v8i16, v16i8>; +def VMULOSH : VX1_Int_Ty2<328, "vmulosh", int_ppc_altivec_vmulosh, + v4i32, v8i16>; +def VMULOUB : VX1_Int_Ty2< 8, "vmuloub", int_ppc_altivec_vmuloub, + v8i16, v16i8>; +def VMULOUH : VX1_Int_Ty2< 72, "vmulouh", int_ppc_altivec_vmulouh, + v4i32, v8i16>; -def VREFP : VX2_Int<266, "vrefp", int_ppc_altivec_vrefp>; -def VRFIM : VX2_Int<714, "vrfim", int_ppc_altivec_vrfim>; -def VRFIN : VX2_Int<522, "vrfin", int_ppc_altivec_vrfin>; -def VRFIP : VX2_Int<650, "vrfip", int_ppc_altivec_vrfip>; -def VRFIZ : VX2_Int<586, "vrfiz", int_ppc_altivec_vrfiz>; -def VRSQRTEFP : VX2_Int<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>; +def VREFP : VX2_Int_SP<266, "vrefp", int_ppc_altivec_vrefp>; +def VRFIM : VX2_Int_SP<714, "vrfim", int_ppc_altivec_vrfim>; +def VRFIN : VX2_Int_SP<522, "vrfin", int_ppc_altivec_vrfin>; +def VRFIP : VX2_Int_SP<650, "vrfip", int_ppc_altivec_vrfip>; +def VRFIZ : VX2_Int_SP<586, "vrfiz", int_ppc_altivec_vrfiz>; +def VRSQRTEFP : VX2_Int_SP<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>; -def VSUBCUW : VX1_Int<74, "vsubcuw", int_ppc_altivec_vsubcuw>; +def VSUBCUW : VX1_Int_Ty<1408, "vsubcuw", int_ppc_altivec_vsubcuw, v4i32>; -def VSUBFP : VXForm_1<74, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VSUBFP : VXForm_1<74, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vsubfp $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>; -def VSUBUBM : VXForm_1<1024, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v4f32:$vD, (fsub v4f32:$vA, v4f32:$vB))]>; +def VSUBUBM : VXForm_1<1024, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vsububm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VSUBUHM : VXForm_1<1088, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, (sub v16i8:$vA, v16i8:$vB))]>; +def VSUBUHM : VXForm_1<1088, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vsubuhm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>; -def VSUBUWM : VXForm_1<1152, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v8i16:$vD, (sub v8i16:$vA, v8i16:$vB))]>; +def VSUBUWM : VXForm_1<1152, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vsubuwm $vD, $vA, $vB", VecGeneral, - [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>; + [(set v4i32:$vD, (sub v4i32:$vA, v4i32:$vB))]>; -def VSUBSBS : VX1_Int<1792, "vsubsbs" , int_ppc_altivec_vsubsbs>; -def VSUBSHS : VX1_Int<1856, "vsubshs" , int_ppc_altivec_vsubshs>; -def VSUBSWS : VX1_Int<1920, "vsubsws" , int_ppc_altivec_vsubsws>; -def VSUBUBS : VX1_Int<1536, "vsububs" , int_ppc_altivec_vsububs>; -def VSUBUHS : VX1_Int<1600, "vsubuhs" , int_ppc_altivec_vsubuhs>; -def VSUBUWS : VX1_Int<1664, "vsubuws" , int_ppc_altivec_vsubuws>; -def VSUMSWS : VX1_Int<1928, "vsumsws" , int_ppc_altivec_vsumsws>; -def VSUM2SWS: VX1_Int<1672, "vsum2sws", int_ppc_altivec_vsum2sws>; -def VSUM4SBS: VX1_Int<1672, "vsum4sbs", int_ppc_altivec_vsum4sbs>; -def VSUM4SHS: VX1_Int<1608, "vsum4shs", int_ppc_altivec_vsum4shs>; -def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>; - -def VNOR : VXForm_1<1284, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VSUBSBS : VX1_Int_Ty<1792, "vsubsbs" , int_ppc_altivec_vsubsbs, v16i8>; +def VSUBSHS : VX1_Int_Ty<1856, "vsubshs" , int_ppc_altivec_vsubshs, v8i16>; +def VSUBSWS : VX1_Int_Ty<1920, "vsubsws" , int_ppc_altivec_vsubsws, v4i32>; +def VSUBUBS : VX1_Int_Ty<1536, "vsububs" , int_ppc_altivec_vsububs, v16i8>; +def VSUBUHS : VX1_Int_Ty<1600, "vsubuhs" , int_ppc_altivec_vsubuhs, v8i16>; +def VSUBUWS : VX1_Int_Ty<1664, "vsubuws" , int_ppc_altivec_vsubuws, v4i32>; + +def VSUMSWS : VX1_Int_Ty<1928, "vsumsws" , int_ppc_altivec_vsumsws, v4i32>; +def VSUM2SWS: VX1_Int_Ty<1672, "vsum2sws", int_ppc_altivec_vsum2sws, v4i32>; + +def VSUM4SBS: VX1_Int_Ty3<1800, "vsum4sbs", int_ppc_altivec_vsum4sbs, + v4i32, v16i8, v4i32>; +def VSUM4SHS: VX1_Int_Ty3<1608, "vsum4shs", int_ppc_altivec_vsum4shs, + v4i32, v8i16, v4i32>; +def VSUM4UBS: VX1_Int_Ty3<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs, + v4i32, v16i8, v4i32>; + +def VNOR : VXForm_1<1284, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vnor $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vnot_ppc (or (v4i32 VRRC:$vA), - VRRC:$vB)))]>; -def VOR : VXForm_1<1156, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v4i32:$vD, (vnot_ppc (or v4i32:$vA, + v4i32:$vB)))]>; +def VOR : VXForm_1<1156, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vor $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VXOR : VXForm_1<1220, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v4i32:$vD, (or v4i32:$vA, v4i32:$vB))]>; +def VXOR : VXForm_1<1220, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vxor $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>; + [(set v4i32:$vD, (xor v4i32:$vA, v4i32:$vB))]>; + +def VRLB : VX1_Int_Ty< 4, "vrlb", int_ppc_altivec_vrlb, v16i8>; +def VRLH : VX1_Int_Ty< 68, "vrlh", int_ppc_altivec_vrlh, v8i16>; +def VRLW : VX1_Int_Ty< 132, "vrlw", int_ppc_altivec_vrlw, v4i32>; -def VRLB : VX1_Int< 4, "vrlb", int_ppc_altivec_vrlb>; -def VRLH : VX1_Int< 68, "vrlh", int_ppc_altivec_vrlh>; -def VRLW : VX1_Int< 132, "vrlw", int_ppc_altivec_vrlw>; +def VSL : VX1_Int_Ty< 452, "vsl" , int_ppc_altivec_vsl, v4i32 >; +def VSLO : VX1_Int_Ty<1036, "vslo", int_ppc_altivec_vslo, v4i32>; -def VSL : VX1_Int< 452, "vsl" , int_ppc_altivec_vsl >; -def VSLO : VX1_Int<1036, "vslo", int_ppc_altivec_vslo>; -def VSLB : VX1_Int< 260, "vslb", int_ppc_altivec_vslb>; -def VSLH : VX1_Int< 324, "vslh", int_ppc_altivec_vslh>; -def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>; +def VSLB : VX1_Int_Ty< 260, "vslb", int_ppc_altivec_vslb, v16i8>; +def VSLH : VX1_Int_Ty< 324, "vslh", int_ppc_altivec_vslh, v8i16>; +def VSLW : VX1_Int_Ty< 388, "vslw", int_ppc_altivec_vslw, v4i32>; -def VSPLTB : VXForm_1<524, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), +def VSPLTB : VXForm_1<524, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vspltb $vD, $vB, $UIMM", VecPerm, - [(set VRRC:$vD, - (vspltb_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; -def VSPLTH : VXForm_1<588, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), + [(set v16i8:$vD, + (vspltb_shuffle:$UIMM v16i8:$vB, (undef)))]>; +def VSPLTH : VXForm_1<588, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vsplth $vD, $vB, $UIMM", VecPerm, - [(set VRRC:$vD, - (vsplth_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; -def VSPLTW : VXForm_1<652, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), + [(set v16i8:$vD, + (vsplth_shuffle:$UIMM v16i8:$vB, (undef)))]>; +def VSPLTW : VXForm_1<652, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB), "vspltw $vD, $vB, $UIMM", VecPerm, - [(set VRRC:$vD, - (vspltw_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; + [(set v16i8:$vD, + (vspltw_shuffle:$UIMM v16i8:$vB, (undef)))]>; -def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>; -def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>; -def VSRAB : VX1_Int< 772, "vsrab", int_ppc_altivec_vsrab>; -def VSRAH : VX1_Int< 836, "vsrah", int_ppc_altivec_vsrah>; -def VSRAW : VX1_Int< 900, "vsraw", int_ppc_altivec_vsraw>; -def VSRB : VX1_Int< 516, "vsrb" , int_ppc_altivec_vsrb>; -def VSRH : VX1_Int< 580, "vsrh" , int_ppc_altivec_vsrh>; -def VSRW : VX1_Int< 644, "vsrw" , int_ppc_altivec_vsrw>; +def VSR : VX1_Int_Ty< 708, "vsr" , int_ppc_altivec_vsr, v4i32>; +def VSRO : VX1_Int_Ty<1100, "vsro" , int_ppc_altivec_vsro, v4i32>; +def VSRAB : VX1_Int_Ty< 772, "vsrab", int_ppc_altivec_vsrab, v16i8>; +def VSRAH : VX1_Int_Ty< 836, "vsrah", int_ppc_altivec_vsrah, v8i16>; +def VSRAW : VX1_Int_Ty< 900, "vsraw", int_ppc_altivec_vsraw, v4i32>; +def VSRB : VX1_Int_Ty< 516, "vsrb" , int_ppc_altivec_vsrb , v16i8>; +def VSRH : VX1_Int_Ty< 580, "vsrh" , int_ppc_altivec_vsrh , v8i16>; +def VSRW : VX1_Int_Ty< 644, "vsrw" , int_ppc_altivec_vsrw , v4i32>; -def VSPLTISB : VXForm_3<780, (outs VRRC:$vD), (ins s5imm:$SIMM), + +def VSPLTISB : VXForm_3<780, (outs vrrc:$vD), (ins s5imm:$SIMM), "vspltisb $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v16i8 vecspltisb:$SIMM))]>; -def VSPLTISH : VXForm_3<844, (outs VRRC:$vD), (ins s5imm:$SIMM), + [(set v16i8:$vD, (v16i8 vecspltisb:$SIMM))]>; +def VSPLTISH : VXForm_3<844, (outs vrrc:$vD), (ins s5imm:$SIMM), "vspltish $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v8i16 vecspltish:$SIMM))]>; -def VSPLTISW : VXForm_3<908, (outs VRRC:$vD), (ins s5imm:$SIMM), + [(set v8i16:$vD, (v8i16 vecspltish:$SIMM))]>; +def VSPLTISW : VXForm_3<908, (outs vrrc:$vD), (ins s5imm:$SIMM), "vspltisw $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4i32 vecspltisw:$SIMM))]>; + [(set v4i32:$vD, (v4i32 vecspltisw:$SIMM))]>; // Vector Pack. -def VPKPX : VX1_Int<782, "vpkpx", int_ppc_altivec_vpkpx>; -def VPKSHSS : VX1_Int<398, "vpkshss", int_ppc_altivec_vpkshss>; -def VPKSHUS : VX1_Int<270, "vpkshus", int_ppc_altivec_vpkshus>; -def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>; -def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>; -def VPKUHUM : VXForm_1<14, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), +def VPKPX : VX1_Int_Ty2<782, "vpkpx", int_ppc_altivec_vpkpx, + v8i16, v4i32>; +def VPKSHSS : VX1_Int_Ty2<398, "vpkshss", int_ppc_altivec_vpkshss, + v16i8, v8i16>; +def VPKSHUS : VX1_Int_Ty2<270, "vpkshus", int_ppc_altivec_vpkshus, + v16i8, v8i16>; +def VPKSWSS : VX1_Int_Ty2<462, "vpkswss", int_ppc_altivec_vpkswss, + v16i8, v4i32>; +def VPKSWUS : VX1_Int_Ty2<334, "vpkswus", int_ppc_altivec_vpkswus, + v8i16, v4i32>; +def VPKUHUM : VXForm_1<14, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vpkuhum $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (vpkuhum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>; -def VPKUWUM : VXForm_1<78, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + [(set v16i8:$vD, + (vpkuhum_shuffle v16i8:$vA, v16i8:$vB))]>; +def VPKUHUS : VX1_Int_Ty2<142, "vpkuhus", int_ppc_altivec_vpkuhus, + v16i8, v8i16>; +def VPKUWUM : VXForm_1<78, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vpkuwum $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (vpkuwum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>; + [(set v16i8:$vD, + (vpkuwum_shuffle v16i8:$vA, v16i8:$vB))]>; +def VPKUWUS : VX1_Int_Ty2<206, "vpkuwus", int_ppc_altivec_vpkuwus, + v8i16, v4i32>; // Vector Unpack. -def VUPKHPX : VX2_Int<846, "vupkhpx", int_ppc_altivec_vupkhpx>; -def VUPKHSB : VX2_Int<526, "vupkhsb", int_ppc_altivec_vupkhsb>; -def VUPKHSH : VX2_Int<590, "vupkhsh", int_ppc_altivec_vupkhsh>; -def VUPKLPX : VX2_Int<974, "vupklpx", int_ppc_altivec_vupklpx>; -def VUPKLSB : VX2_Int<654, "vupklsb", int_ppc_altivec_vupklsb>; -def VUPKLSH : VX2_Int<718, "vupklsh", int_ppc_altivec_vupklsh>; +def VUPKHPX : VX2_Int_Ty2<846, "vupkhpx", int_ppc_altivec_vupkhpx, + v4i32, v8i16>; +def VUPKHSB : VX2_Int_Ty2<526, "vupkhsb", int_ppc_altivec_vupkhsb, + v8i16, v16i8>; +def VUPKHSH : VX2_Int_Ty2<590, "vupkhsh", int_ppc_altivec_vupkhsh, + v4i32, v8i16>; +def VUPKLPX : VX2_Int_Ty2<974, "vupklpx", int_ppc_altivec_vupklpx, + v4i32, v8i16>; +def VUPKLSB : VX2_Int_Ty2<654, "vupklsb", int_ppc_altivec_vupklsb, + v8i16, v16i8>; +def VUPKLSH : VX2_Int_Ty2<718, "vupklsh", int_ppc_altivec_vupklsh, + v4i32, v8i16>; // Altivec Comparisons. class VCMP<bits<10> xo, string asmstr, ValueType Ty> - : VXRForm_1<xo, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),asmstr,VecFPCompare, - [(set VRRC:$vD, (Ty (PPCvcmp VRRC:$vA, VRRC:$vB, xo)))]>; + : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),asmstr,VecFPCompare, + [(set Ty:$vD, (Ty (PPCvcmp Ty:$vA, Ty:$vB, xo)))]>; class VCMPo<bits<10> xo, string asmstr, ValueType Ty> - : VXRForm_1<xo, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),asmstr,VecFPCompare, - [(set VRRC:$vD, (Ty (PPCvcmp_o VRRC:$vA, VRRC:$vB, xo)))]> { + : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),asmstr,VecFPCompare, + [(set Ty:$vD, (Ty (PPCvcmp_o Ty:$vA, Ty:$vB, xo)))]> { let Defs = [CR6]; let RC = 1; } @@ -581,13 +664,14 @@ def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>; def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; -def V_SET0 : VXForm_setzero<1220, (outs VRRC:$vD), (ins), +let isCodeGenOnly = 1 in +def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins), "vxor $vD, $vD, $vD", VecFP, - [(set VRRC:$vD, (v4i32 immAllZerosV))]>; + [(set v4i32:$vD, (v4i32 immAllZerosV))]>; let IMM=-1 in { -def V_SETALLONES : VXForm_3<908, (outs VRRC:$vD), (ins), +def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins), "vspltisw $vD, -1", VecFP, - [(set VRRC:$vD, (v4i32 immAllOnesV))]>; + [(set v4i32:$vD, (v4i32 immAllOnesV))]>; } } // VALU Operations. @@ -600,31 +684,31 @@ def : Pat<(int_ppc_altivec_dssall), (DSSALL 1, 0, 0, 0)>; def : Pat<(int_ppc_altivec_dss imm:$STRM), (DSS 0, imm:$STRM, 0, 0)>; // * 32-bit -def : Pat<(int_ppc_altivec_dst GPRC:$rA, GPRC:$rB, imm:$STRM), - (DST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dstt GPRC:$rA, GPRC:$rB, imm:$STRM), - (DSTT 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dstst GPRC:$rA, GPRC:$rB, imm:$STRM), - (DSTST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dststt GPRC:$rA, GPRC:$rB, imm:$STRM), - (DSTSTT 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dst i32:$rA, i32:$rB, imm:$STRM), + (DST 0, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dstt i32:$rA, i32:$rB, imm:$STRM), + (DSTT 1, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dstst i32:$rA, i32:$rB, imm:$STRM), + (DSTST 0, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dststt i32:$rA, i32:$rB, imm:$STRM), + (DSTSTT 1, imm:$STRM, $rA, $rB)>; // * 64-bit -def : Pat<(int_ppc_altivec_dst G8RC:$rA, GPRC:$rB, imm:$STRM), - (DST64 0, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dstt G8RC:$rA, GPRC:$rB, imm:$STRM), - (DSTT64 1, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dstst G8RC:$rA, GPRC:$rB, imm:$STRM), - (DSTST64 0, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; -def : Pat<(int_ppc_altivec_dststt G8RC:$rA, GPRC:$rB, imm:$STRM), - (DSTSTT64 1, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dst i64:$rA, i32:$rB, imm:$STRM), + (DST64 0, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dstt i64:$rA, i32:$rB, imm:$STRM), + (DSTT64 1, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dstst i64:$rA, i32:$rB, imm:$STRM), + (DSTST64 0, imm:$STRM, $rA, $rB)>; +def : Pat<(int_ppc_altivec_dststt i64:$rA, i32:$rB, imm:$STRM), + (DSTSTT64 1, imm:$STRM, $rA, $rB)>; // Loads. def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>; // Stores. -def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst), - (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>; +def : Pat<(store v4i32:$rS, xoaddr:$dst), + (STVX $rS, xoaddr:$dst)>; // Bit conversions. def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>; @@ -646,96 +730,99 @@ def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>; // Shuffles. // Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x) -def:Pat<(vsldoi_unary_shuffle:$in (v16i8 VRRC:$vA), undef), - (VSLDOI VRRC:$vA, VRRC:$vA, (VSLDOI_unary_get_imm VRRC:$in))>; -def:Pat<(vpkuwum_unary_shuffle (v16i8 VRRC:$vA), undef), - (VPKUWUM VRRC:$vA, VRRC:$vA)>; -def:Pat<(vpkuhum_unary_shuffle (v16i8 VRRC:$vA), undef), - (VPKUHUM VRRC:$vA, VRRC:$vA)>; +def:Pat<(vsldoi_unary_shuffle:$in v16i8:$vA, undef), + (VSLDOI $vA, $vA, (VSLDOI_unary_get_imm $in))>; +def:Pat<(vpkuwum_unary_shuffle v16i8:$vA, undef), + (VPKUWUM $vA, $vA)>; +def:Pat<(vpkuhum_unary_shuffle v16i8:$vA, undef), + (VPKUHUM $vA, $vA)>; // Match vmrg*(x,x) -def:Pat<(vmrglb_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGLB VRRC:$vA, VRRC:$vA)>; -def:Pat<(vmrglh_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGLH VRRC:$vA, VRRC:$vA)>; -def:Pat<(vmrglw_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGLW VRRC:$vA, VRRC:$vA)>; -def:Pat<(vmrghb_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGHB VRRC:$vA, VRRC:$vA)>; -def:Pat<(vmrghh_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGHH VRRC:$vA, VRRC:$vA)>; -def:Pat<(vmrghw_unary_shuffle (v16i8 VRRC:$vA), undef), - (VMRGHW VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrglb_unary_shuffle v16i8:$vA, undef), + (VMRGLB $vA, $vA)>; +def:Pat<(vmrglh_unary_shuffle v16i8:$vA, undef), + (VMRGLH $vA, $vA)>; +def:Pat<(vmrglw_unary_shuffle v16i8:$vA, undef), + (VMRGLW $vA, $vA)>; +def:Pat<(vmrghb_unary_shuffle v16i8:$vA, undef), + (VMRGHB $vA, $vA)>; +def:Pat<(vmrghh_unary_shuffle v16i8:$vA, undef), + (VMRGHH $vA, $vA)>; +def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef), + (VMRGHW $vA, $vA)>; // Logical Operations -def : Pat<(v4i32 (vnot_ppc VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>; +def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>; -def : Pat<(v4i32 (vnot_ppc (or VRRC:$A, VRRC:$B))), - (VNOR VRRC:$A, VRRC:$B)>; -def : Pat<(v4i32 (and VRRC:$A, (vnot_ppc VRRC:$B))), - (VANDC VRRC:$A, VRRC:$B)>; +def : Pat<(vnot_ppc (or v4i32:$A, v4i32:$B)), + (VNOR $A, $B)>; +def : Pat<(and v4i32:$A, (vnot_ppc v4i32:$B)), + (VANDC $A, $B)>; -def : Pat<(fmul VRRC:$vA, VRRC:$vB), - (VMADDFP VRRC:$vA, VRRC:$vB, +def : Pat<(fmul v4f32:$vA, v4f32:$vB), + (VMADDFP $vA, $vB, (v4i32 (VSLW (V_SETALLONES), (V_SETALLONES))))>; // Fused multiply add and multiply sub for packed float. These are represented // separately from the real instructions above, for operations that must have // the additional precision, such as Newton-Rhapson (used by divide, sqrt) -def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C), - (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>; -def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), - (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>; +def : Pat<(PPCvmaddfp v4f32:$A, v4f32:$B, v4f32:$C), + (VMADDFP $A, $B, $C)>; +def : Pat<(PPCvnmsubfp v4f32:$A, v4f32:$B, v4f32:$C), + (VNMSUBFP $A, $B, $C)>; + +def : Pat<(int_ppc_altivec_vmaddfp v4f32:$A, v4f32:$B, v4f32:$C), + (VMADDFP $A, $B, $C)>; +def : Pat<(int_ppc_altivec_vnmsubfp v4f32:$A, v4f32:$B, v4f32:$C), + (VNMSUBFP $A, $B, $C)>; -def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C), - (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>; -def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), - (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>; +def : Pat<(PPCvperm v16i8:$vA, v16i8:$vB, v16i8:$vC), + (VPERM $vA, $vB, $vC)>; -def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC)>; +def : Pat<(PPCfre v4f32:$A), (VREFP $A)>; +def : Pat<(PPCfrsqrte v4f32:$A), (VRSQRTEFP $A)>; // Vector shifts -def : Pat<(v16i8 (shl (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), - (v16i8 (VSLB VRRC:$vA, VRRC:$vB))>; -def : Pat<(v8i16 (shl (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), - (v8i16 (VSLH VRRC:$vA, VRRC:$vB))>; -def : Pat<(v4i32 (shl (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), - (v4i32 (VSLW VRRC:$vA, VRRC:$vB))>; - -def : Pat<(v16i8 (srl (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), - (v16i8 (VSRB VRRC:$vA, VRRC:$vB))>; -def : Pat<(v8i16 (srl (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), - (v8i16 (VSRH VRRC:$vA, VRRC:$vB))>; -def : Pat<(v4i32 (srl (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), - (v4i32 (VSRW VRRC:$vA, VRRC:$vB))>; - -def : Pat<(v16i8 (sra (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), - (v16i8 (VSRAB VRRC:$vA, VRRC:$vB))>; -def : Pat<(v8i16 (sra (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), - (v8i16 (VSRAH VRRC:$vA, VRRC:$vB))>; -def : Pat<(v4i32 (sra (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), - (v4i32 (VSRAW VRRC:$vA, VRRC:$vB))>; +def : Pat<(v16i8 (shl v16i8:$vA, v16i8:$vB)), + (v16i8 (VSLB $vA, $vB))>; +def : Pat<(v8i16 (shl v8i16:$vA, v8i16:$vB)), + (v8i16 (VSLH $vA, $vB))>; +def : Pat<(v4i32 (shl v4i32:$vA, v4i32:$vB)), + (v4i32 (VSLW $vA, $vB))>; + +def : Pat<(v16i8 (srl v16i8:$vA, v16i8:$vB)), + (v16i8 (VSRB $vA, $vB))>; +def : Pat<(v8i16 (srl v8i16:$vA, v8i16:$vB)), + (v8i16 (VSRH $vA, $vB))>; +def : Pat<(v4i32 (srl v4i32:$vA, v4i32:$vB)), + (v4i32 (VSRW $vA, $vB))>; + +def : Pat<(v16i8 (sra v16i8:$vA, v16i8:$vB)), + (v16i8 (VSRAB $vA, $vB))>; +def : Pat<(v8i16 (sra v8i16:$vA, v8i16:$vB)), + (v8i16 (VSRAH $vA, $vB))>; +def : Pat<(v4i32 (sra v4i32:$vA, v4i32:$vB)), + (v4i32 (VSRAW $vA, $vB))>; // Float to integer and integer to float conversions -def : Pat<(v4i32 (fp_to_sint (v4f32 VRRC:$vA))), - (VCTSXS_0 VRRC:$vA)>; -def : Pat<(v4i32 (fp_to_uint (v4f32 VRRC:$vA))), - (VCTUXS_0 VRRC:$vA)>; -def : Pat<(v4f32 (sint_to_fp (v4i32 VRRC:$vA))), - (VCFSX_0 VRRC:$vA)>; -def : Pat<(v4f32 (uint_to_fp (v4i32 VRRC:$vA))), - (VCFUX_0 VRRC:$vA)>; +def : Pat<(v4i32 (fp_to_sint v4f32:$vA)), + (VCTSXS_0 $vA)>; +def : Pat<(v4i32 (fp_to_uint v4f32:$vA)), + (VCTUXS_0 $vA)>; +def : Pat<(v4f32 (sint_to_fp v4i32:$vA)), + (VCFSX_0 $vA)>; +def : Pat<(v4f32 (uint_to_fp v4i32:$vA)), + (VCFUX_0 $vA)>; // Floating-point rounding -def : Pat<(v4f32 (ffloor (v4f32 VRRC:$vA))), - (VRFIM VRRC:$vA)>; -def : Pat<(v4f32 (fceil (v4f32 VRRC:$vA))), - (VRFIP VRRC:$vA)>; -def : Pat<(v4f32 (ftrunc (v4f32 VRRC:$vA))), - (VRFIZ VRRC:$vA)>; -def : Pat<(v4f32 (fnearbyint (v4f32 VRRC:$vA))), - (VRFIN VRRC:$vA)>; +def : Pat<(v4f32 (ffloor v4f32:$vA)), + (VRFIM $vA)>; +def : Pat<(v4f32 (fceil v4f32:$vA)), + (VRFIP $vA)>; +def : Pat<(v4f32 (ftrunc v4f32:$vA)), + (VRFIZ $vA)>; +def : Pat<(v4f32 (fnearbyint v4f32:$vA)), + (VRFIN $vA)>; } // end HasAltivec diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td index c3c171c..41b4e01 100644 --- a/lib/Target/PowerPC/PPCInstrFormats.td +++ b/lib/Target/PowerPC/PPCInstrFormats.td @@ -35,6 +35,15 @@ class I<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin> let TSFlags{1} = PPC970_Single; let TSFlags{2} = PPC970_Cracked; let TSFlags{5-3} = PPC970_Unit; + + // Fields used for relation models. + string BaseName = ""; + + // For cases where multiple instruction definitions really represent the + // same underlying instruction but with one definition for 64-bit arguments + // and one for 32-bit arguments, this bit breaks the degeneracy between + // the two forms and allows TableGen to generate mapping tables. + bit Interpretation64Bit = 0; } class PPC970_DGroup_First { bits<1> PPC970_First = 1; } @@ -80,6 +89,10 @@ class I2<bits<6> opcode1, bits<6> opcode2, dag OOL, dag IOL, string asmstr, let TSFlags{1} = PPC970_Single; let TSFlags{2} = PPC970_Cracked; let TSFlags{5-3} = PPC970_Unit; + + // Fields used for relation models. + string BaseName = ""; + bit Interpretation64Bit = 0; } // 1.7.1 I-Form @@ -120,6 +133,18 @@ class BForm_1<bits<6> opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL, let CR = 0; } +class BForm_2<bits<6> opcode, bits<5> bo, bits<5> bi, bit aa, bit lk, + dag OOL, dag IOL, string asmstr> + : I<opcode, OOL, IOL, asmstr, BrB> { + bits<14> BD; + + let Inst{6-10} = bo; + let Inst{11-15} = bi; + let Inst{16-29} = BD; + let Inst{30} = aa; + let Inst{31} = lk; +} + // 1.7.4 D-Form class DForm_base<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern> @@ -165,7 +190,12 @@ class DForm_1a<bits<6> opcode, dag OOL, dag IOL, string asmstr, class DForm_2<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern> - : DForm_base<opcode, OOL, IOL, asmstr, itin, pattern>; + : DForm_base<opcode, OOL, IOL, asmstr, itin, pattern> { + + // Even though ADDICo does not really have an RC bit, provide + // the declaration of one here so that isDOT has something to set. + bit RC = 0; +} class DForm_2_r0<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern> @@ -553,9 +583,9 @@ class XLForm_2_br<bits<6> opcode, bits<10> xo, bit lk, bits<7> BIBO; // 2 bits of BI and 5 bits of BO. bits<3> CR; - let BO = BIBO{2-6}; - let BI{0-1} = BIBO{0-1}; - let BI{2-4} = CR; + let BO = BIBO{4-0}; + let BI{0-1} = BIBO{5-6}; + let BI{2-4} = CR{0-2}; let BH = 0; } @@ -664,14 +694,13 @@ class XFXForm_7_ext<bits<6> opcode, bits<10> xo, bits<10> spr, // This is probably 1.7.9, but I don't have the reference that uses this // numbering scheme... class XFLForm<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, - string cstr, InstrItinClass itin, list<dag>pattern> + InstrItinClass itin, list<dag>pattern> : I<opcode, OOL, IOL, asmstr, itin> { bits<8> FM; bits<5> rT; bit RC = 0; // set by isDOT let Pattern = pattern; - let Constraints = cstr; let Inst{6} = 0; let Inst{7-14} = FM; @@ -765,16 +794,14 @@ class AForm_4<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr, bits<5> RT; bits<5> RA; bits<5> RB; - bits<7> BIBO; // 2 bits of BI and 5 bits of BO (must be 12). - bits<3> CR; + bits<5> COND; let Pattern = pattern; let Inst{6-10} = RT; let Inst{11-15} = RA; let Inst{16-20} = RB; - let Inst{21-23} = CR; - let Inst{24-25} = BIBO{6-5}; + let Inst{21-25} = COND; let Inst{26-30} = xo; let Inst{31} = 0; } @@ -828,6 +855,25 @@ class MDForm_1<bits<6> opcode, bits<3> xo, dag OOL, dag IOL, string asmstr, let Inst{31} = RC; } +class MDSForm_1<bits<6> opcode, bits<4> xo, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list<dag> pattern> + : I<opcode, OOL, IOL, asmstr, itin> { + bits<5> RA; + bits<5> RS; + bits<5> RB; + bits<6> MBE; + + let Pattern = pattern; + + bit RC = 0; // set by isDOT + + let Inst{6-10} = RS; + let Inst{11-15} = RA; + let Inst{16-20} = RB; + let Inst{21-26} = MBE{4,3,2,1,0,5}; + let Inst{27-30} = xo; + let Inst{31} = RC; +} // E-1 VA-Form @@ -987,6 +1033,7 @@ class VXRForm_1<bits<10> xo, dag OOL, dag IOL, string asmstr, //===----------------------------------------------------------------------===// class Pseudo<dag OOL, dag IOL, string asmstr, list<dag> pattern> : I<0, OOL, IOL, asmstr, NoItinerary> { + let isCodeGenOnly = 1; let PPC64 = 0; let Pattern = pattern; let Inst{31-0} = 0; diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 7fe7880..847bd22 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -18,8 +18,10 @@ #include "PPCInstrBuilder.h" #include "PPCMachineFunctionInfo.h" #include "PPCTargetMachine.h" +#include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -30,6 +32,7 @@ #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" +#define GET_INSTRMAP_INFO #define GET_INSTRINFO_CTOR #include "PPCGenInstrInfo.inc" @@ -39,6 +42,9 @@ static cl:: opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops")); +static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt", +cl::desc("Disable compare instruction optimization"), cl::Hidden); + PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm) : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), TM(tm), RI(*TM.getSubtargetImpl(), *this) {} @@ -94,12 +100,18 @@ bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI, unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const { + // Note: This list must be kept consistent with LoadRegFromStackSlot. switch (MI->getOpcode()) { default: break; case PPC::LD: case PPC::LWZ: case PPC::LFS: case PPC::LFD: + case PPC::RESTORE_CR: + case PPC::LVX: + case PPC::RESTORE_VRSAVE: + // Check for the operands added by addFrameReference (the immediate is the + // offset which defaults to 0). if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() && MI->getOperand(2).isFI()) { FrameIndex = MI->getOperand(2).getIndex(); @@ -112,12 +124,18 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const { + // Note: This list must be kept consistent with StoreRegToStackSlot. switch (MI->getOpcode()) { default: break; case PPC::STD: case PPC::STW: case PPC::STFS: case PPC::STFD: + case PPC::SPILL_CR: + case PPC::STVX: + case PPC::SPILL_VRSAVE: + // Check for the operands added by addFrameReference (the immediate is the + // offset which defaults to 0). if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() && MI->getOperand(2).isFI()) { FrameIndex = MI->getOperand(2).getIndex(); @@ -135,7 +153,8 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { MachineFunction &MF = *MI->getParent()->getParent(); // Normal instructions can be commuted the obvious way. - if (MI->getOpcode() != PPC::RLWIMI) + if (MI->getOpcode() != PPC::RLWIMI && + MI->getOpcode() != PPC::RLWIMIo) return TargetInstrInfo::commuteInstruction(MI, NewMI); // Cannot commute if it has a non-zero rotate count. @@ -405,6 +424,105 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, return 2; } +// Select analysis. +bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, + const SmallVectorImpl<MachineOperand> &Cond, + unsigned TrueReg, unsigned FalseReg, + int &CondCycles, int &TrueCycles, int &FalseCycles) const { + if (!TM.getSubtargetImpl()->hasISEL()) + return false; + + if (Cond.size() != 2) + return false; + + // If this is really a bdnz-like condition, then it cannot be turned into a + // select. + if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) + return false; + + // Check register classes. + const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = + RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); + if (!RC) + return false; + + // isel is for regular integer GPRs only. + if (!PPC::GPRCRegClass.hasSubClassEq(RC) && + !PPC::G8RCRegClass.hasSubClassEq(RC)) + return false; + + // FIXME: These numbers are for the A2, how well they work for other cores is + // an open question. On the A2, the isel instruction has a 2-cycle latency + // but single-cycle throughput. These numbers are used in combination with + // the MispredictPenalty setting from the active SchedMachineModel. + CondCycles = 1; + TrueCycles = 1; + FalseCycles = 1; + + return true; +} + +void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, DebugLoc dl, + unsigned DestReg, + const SmallVectorImpl<MachineOperand> &Cond, + unsigned TrueReg, unsigned FalseReg) const { + assert(Cond.size() == 2 && + "PPC branch conditions have two components!"); + + assert(TM.getSubtargetImpl()->hasISEL() && + "Cannot insert select on target without ISEL support"); + + // Get the register classes. + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = + RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); + assert(RC && "TrueReg and FalseReg must have overlapping register classes"); + assert((PPC::GPRCRegClass.hasSubClassEq(RC) || + PPC::G8RCRegClass.hasSubClassEq(RC)) && + "isel is for regular integer GPRs only"); + + unsigned OpCode = + PPC::GPRCRegClass.hasSubClassEq(RC) ? PPC::ISEL : PPC::ISEL8; + unsigned SelectPred = Cond[0].getImm(); + + unsigned SubIdx; + bool SwapOps; + switch (SelectPred) { + default: llvm_unreachable("invalid predicate for isel"); + case PPC::PRED_EQ: SubIdx = PPC::sub_eq; SwapOps = false; break; + case PPC::PRED_NE: SubIdx = PPC::sub_eq; SwapOps = true; break; + case PPC::PRED_LT: SubIdx = PPC::sub_lt; SwapOps = false; break; + case PPC::PRED_GE: SubIdx = PPC::sub_lt; SwapOps = true; break; + case PPC::PRED_GT: SubIdx = PPC::sub_gt; SwapOps = false; break; + case PPC::PRED_LE: SubIdx = PPC::sub_gt; SwapOps = true; break; + case PPC::PRED_UN: SubIdx = PPC::sub_un; SwapOps = false; break; + case PPC::PRED_NU: SubIdx = PPC::sub_un; SwapOps = true; break; + } + + unsigned FirstReg = SwapOps ? FalseReg : TrueReg, + SecondReg = SwapOps ? TrueReg : FalseReg; + + // The first input register of isel cannot be r0. If it is a member + // of a register class that can be r0, then copy it first (the + // register allocator should eliminate the copy). + if (MRI.getRegClass(FirstReg)->contains(PPC::R0) || + MRI.getRegClass(FirstReg)->contains(PPC::X0)) { + const TargetRegisterClass *FirstRC = + MRI.getRegClass(FirstReg)->contains(PPC::X0) ? + &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass; + unsigned OldFirstReg = FirstReg; + FirstReg = MRI.createVirtualRegister(FirstRC); + BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg) + .addReg(OldFirstReg); + } + + BuildMI(MBB, MI, dl, get(OpCode), DestReg) + .addReg(FirstReg).addReg(SecondReg) + .addReg(Cond[1].getReg(), 0, SubIdx); +} + void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, @@ -440,40 +558,21 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, int FrameIdx, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs, - bool &NonRI) const{ + bool &NonRI, bool &SpillsVRS) const{ + // Note: If additional store instructions are added here, + // update isStoreToStackSlot. + DebugLoc DL; if (PPC::GPRCRegClass.hasSubClassEq(RC)) { - if (SrcReg != PPC::LR) { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW)) - .addReg(SrcReg, - getKillRegState(isKill)), - FrameIdx)); - } else { - // FIXME: this spills LR immediately to memory in one step. To do this, - // we use R11, which we know cannot be used in the prolog/epilog. This is - // a hack. - NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFLR), PPC::R11)); - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW)) - .addReg(PPC::R11, - getKillRegState(isKill)), - FrameIdx)); - } + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { - if (SrcReg != PPC::LR8) { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD)) - .addReg(SrcReg, - getKillRegState(isKill)), - FrameIdx)); - } else { - // FIXME: this spills LR immediately to memory in one step. To do this, - // we use X11, which we know cannot be used in the prolog/epilog. This is - // a hack. - NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFLR8), PPC::X11)); - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD)) - .addReg(PPC::X11, - getKillRegState(isKill)), - FrameIdx)); - } + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD)) .addReg(SrcReg, @@ -522,7 +621,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, Reg = PPC::CR7; return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx, - &PPC::CRRCRegClass, NewMIs, NonRI); + &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX)) @@ -530,6 +629,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, getKillRegState(isKill)), FrameIdx)); NonRI = true; + } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { + assert(TM.getSubtargetImpl()->isDarwin() && + "VRSAVE only needs spill/restore on Darwin"); + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + SpillsVRS = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -549,10 +656,14 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); FuncInfo->setHasSpills(); - bool NonRI = false; - if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs, NonRI)) + bool NonRI = false, SpillsVRS = false; + if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs, + NonRI, SpillsVRS)) FuncInfo->setSpillsCR(); + if (SpillsVRS) + FuncInfo->setSpillsVRSAVE(); + if (NonRI) FuncInfo->setHasNonRISpills(); @@ -573,25 +684,16 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs, - bool &NonRI) const{ + bool &NonRI, bool &SpillsVRS) const{ + // Note: If additional load instructions are added here, + // update isLoadFromStackSlot. + if (PPC::GPRCRegClass.hasSubClassEq(RC)) { - if (DestReg != PPC::LR) { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ), - DestReg), FrameIdx)); - } else { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ), - PPC::R11), FrameIdx)); - NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR)).addReg(PPC::R11)); - } + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ), + DestReg), FrameIdx)); } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { - if (DestReg != PPC::LR8) { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg), - FrameIdx)); - } else { - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), - PPC::X11), FrameIdx)); - NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR8)).addReg(PPC::X11)); - } + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg), + FrameIdx)); } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg), FrameIdx)); @@ -632,12 +734,20 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, Reg = PPC::CR7; return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx, - &PPC::CRRCRegClass, NewMIs, NonRI); + &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg), FrameIdx)); NonRI = true; + } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { + assert(TM.getSubtargetImpl()->isDarwin() && + "VRSAVE only needs spill/restore on Darwin"); + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, + get(PPC::RESTORE_VRSAVE), + DestReg), + FrameIdx)); + SpillsVRS = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -659,10 +769,14 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); FuncInfo->setHasSpills(); - bool NonRI = false; - if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs, NonRI)) + bool NonRI = false, SpillsVRS = false; + if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs, + NonRI, SpillsVRS)) FuncInfo->setSpillsCR(); + if (SpillsVRS) + FuncInfo->setSpillsVRSAVE(); + if (NonRI) FuncInfo->setHasNonRISpills(); @@ -699,6 +813,562 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { return false; } +bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, + unsigned Reg, MachineRegisterInfo *MRI) const { + // For some instructions, it is legal to fold ZERO into the RA register field. + // A zero immediate should always be loaded with a single li. + unsigned DefOpc = DefMI->getOpcode(); + if (DefOpc != PPC::LI && DefOpc != PPC::LI8) + return false; + if (!DefMI->getOperand(1).isImm()) + return false; + if (DefMI->getOperand(1).getImm() != 0) + return false; + + // Note that we cannot here invert the arguments of an isel in order to fold + // a ZERO into what is presented as the second argument. All we have here + // is the condition bit, and that might come from a CR-logical bit operation. + + const MCInstrDesc &UseMCID = UseMI->getDesc(); + + // Only fold into real machine instructions. + if (UseMCID.isPseudo()) + return false; + + unsigned UseIdx; + for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx) + if (UseMI->getOperand(UseIdx).isReg() && + UseMI->getOperand(UseIdx).getReg() == Reg) + break; + + assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI"); + assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"); + + const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx]; + + // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0 + // register (which might also be specified as a pointer class kind). + if (UseInfo->isLookupPtrRegClass()) { + if (UseInfo->RegClass /* Kind */ != 1) + return false; + } else { + if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID && + UseInfo->RegClass != PPC::G8RC_NOX0RegClassID) + return false; + } + + // Make sure this is not tied to an output register (or otherwise + // constrained). This is true for ST?UX registers, for example, which + // are tied to their output registers. + if (UseInfo->Constraints != 0) + return false; + + unsigned ZeroReg; + if (UseInfo->isLookupPtrRegClass()) { + bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO; + } else { + ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ? + PPC::ZERO8 : PPC::ZERO; + } + + bool DeleteDef = MRI->hasOneNonDBGUse(Reg); + UseMI->getOperand(UseIdx).setReg(ZeroReg); + + if (DeleteDef) + DefMI->eraseFromParent(); + + return true; +} + +static bool MBBDefinesCTR(MachineBasicBlock &MBB) { + for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end(); + I != IE; ++I) + if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8)) + return true; + return false; +} + +// We should make sure that, if we're going to predicate both sides of a +// condition (a diamond), that both sides don't define the counter register. We +// can predicate counter-decrement-based branches, but while that predicates +// the branching, it does not predicate the counter decrement. If we tried to +// merge the triangle into one predicated block, we'd decrement the counter +// twice. +bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumT, unsigned ExtraT, + MachineBasicBlock &FMBB, + unsigned NumF, unsigned ExtraF, + const BranchProbability &Probability) const { + return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB)); +} + + +bool PPCInstrInfo::isPredicated(const MachineInstr *MI) const { + // The predicated branches are identified by their type, not really by the + // explicit presence of a predicate. Furthermore, some of them can be + // predicated more than once. Because if conversion won't try to predicate + // any instruction which already claims to be predicated (by returning true + // here), always return false. In doing so, we let isPredicable() be the + // final word on whether not the instruction can be (further) predicated. + + return false; +} + +bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { + if (!MI->isTerminator()) + return false; + + // Conditional branch is a special case. + if (MI->isBranch() && !MI->isBarrier()) + return true; + + return !isPredicated(MI); +} + +bool PPCInstrInfo::PredicateInstruction( + MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const { + unsigned OpC = MI->getOpcode(); + if (OpC == PPC::BLR) { + if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { + bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + MI->setDesc(get(Pred[0].getImm() ? + (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) : + (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR))); + } else { + MI->setDesc(get(PPC::BCLR)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addImm(Pred[0].getImm()) + .addReg(Pred[1].getReg()); + } + + return true; + } else if (OpC == PPC::B) { + if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { + bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + MI->setDesc(get(Pred[0].getImm() ? + (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : + (isPPC64 ? PPC::BDZ8 : PPC::BDZ))); + } else { + MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); + MI->RemoveOperand(0); + + MI->setDesc(get(PPC::BCC)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addImm(Pred[0].getImm()) + .addReg(Pred[1].getReg()) + .addMBB(MBB); + } + + return true; + } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || + OpC == PPC::BCTRL || OpC == PPC::BCTRL8) { + if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) + llvm_unreachable("Cannot predicate bctr[l] on the ctr register"); + + bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8; + bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) : + (setLR ? PPC::BCCTRL : PPC::BCCTR))); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addImm(Pred[0].getImm()) + .addReg(Pred[1].getReg()); + return true; + } + + return false; +} + +bool PPCInstrInfo::SubsumesPredicate( + const SmallVectorImpl<MachineOperand> &Pred1, + const SmallVectorImpl<MachineOperand> &Pred2) const { + assert(Pred1.size() == 2 && "Invalid PPC first predicate"); + assert(Pred2.size() == 2 && "Invalid PPC second predicate"); + + if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR) + return false; + if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR) + return false; + + PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm(); + PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm(); + + if (P1 == P2) + return true; + + // Does P1 subsume P2, e.g. GE subsumes GT. + if (P1 == PPC::PRED_LE && + (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ)) + return true; + if (P1 == PPC::PRED_GE && + (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ)) + return true; + + return false; +} + +bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI, + std::vector<MachineOperand> &Pred) const { + // Note: At the present time, the contents of Pred from this function is + // unused by IfConversion. This implementation follows ARM by pushing the + // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of + // predicate, instructions defining CTR or CTR8 are also included as + // predicate-defining instructions. + + const TargetRegisterClass *RCs[] = + { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass, + &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass }; + + bool Found = false; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) { + const TargetRegisterClass *RC = RCs[c]; + if (MO.isReg()) { + if (MO.isDef() && RC->contains(MO.getReg())) { + Pred.push_back(MO); + Found = true; + } + } else if (MO.isRegMask()) { + for (TargetRegisterClass::iterator I = RC->begin(), + IE = RC->end(); I != IE; ++I) + if (MO.clobbersPhysReg(*I)) { + Pred.push_back(MO); + Found = true; + } + } + } + } + + return Found; +} + +bool PPCInstrInfo::isPredicable(MachineInstr *MI) const { + unsigned OpC = MI->getOpcode(); + switch (OpC) { + default: + return false; + case PPC::B: + case PPC::BLR: + case PPC::BCTR: + case PPC::BCTR8: + case PPC::BCTRL: + case PPC::BCTRL8: + return true; + } +} + +bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI, + unsigned &SrcReg, unsigned &SrcReg2, + int &Mask, int &Value) const { + unsigned Opc = MI->getOpcode(); + + switch (Opc) { + default: return false; + case PPC::CMPWI: + case PPC::CMPLWI: + case PPC::CMPDI: + case PPC::CMPLDI: + SrcReg = MI->getOperand(1).getReg(); + SrcReg2 = 0; + Value = MI->getOperand(2).getImm(); + Mask = 0xFFFF; + return true; + case PPC::CMPW: + case PPC::CMPLW: + case PPC::CMPD: + case PPC::CMPLD: + case PPC::FCMPUS: + case PPC::FCMPUD: + SrcReg = MI->getOperand(1).getReg(); + SrcReg2 = MI->getOperand(2).getReg(); + return true; + } +} + +bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, + unsigned SrcReg, unsigned SrcReg2, + int Mask, int Value, + const MachineRegisterInfo *MRI) const { + if (DisableCmpOpt) + return false; + + int OpC = CmpInstr->getOpcode(); + unsigned CRReg = CmpInstr->getOperand(0).getReg(); + bool isFP = OpC == PPC::FCMPUS || OpC == PPC::FCMPUD; + unsigned CRRecReg = isFP ? PPC::CR1 : PPC::CR0; + + // The record forms set the condition register based on a signed comparison + // with zero (so says the ISA manual). This is not as straightforward as it + // seems, however, because this is always a 64-bit comparison on PPC64, even + // for instructions that are 32-bit in nature (like slw for example). + // So, on PPC32, for unsigned comparisons, we can use the record forms only + // for equality checks (as those don't depend on the sign). On PPC64, + // we are restricted to equality for unsigned 64-bit comparisons and for + // signed 32-bit comparisons the applicability is more restricted. + bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW; + bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW; + bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD; + + // Get the unique definition of SrcReg. + MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); + if (!MI) return false; + int MIOpC = MI->getOpcode(); + + bool equalityOnly = false; + bool noSub = false; + if (isPPC64) { + if (is32BitSignedCompare) { + // We can perform this optimization only if MI is sign-extending. + if (MIOpC == PPC::SRAW || MIOpC == PPC::SRAWo || + MIOpC == PPC::SRAWI || MIOpC == PPC::SRAWIo || + MIOpC == PPC::EXTSB || MIOpC == PPC::EXTSBo || + MIOpC == PPC::EXTSH || MIOpC == PPC::EXTSHo || + MIOpC == PPC::EXTSW || MIOpC == PPC::EXTSWo) { + noSub = true; + } else + return false; + } else if (is32BitUnsignedCompare) { + // We can perform this optimization, equality only, if MI is + // zero-extending. + if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo || + MIOpC == PPC::SLW || MIOpC == PPC::SLWo || + MIOpC == PPC::SRW || MIOpC == PPC::SRWo) { + noSub = true; + equalityOnly = true; + } else + return false; + } else if (!isFP) + equalityOnly = is64BitUnsignedCompare; + } else if (!isFP) + equalityOnly = is32BitUnsignedCompare; + + if (equalityOnly) { + // We need to check the uses of the condition register in order to reject + // non-equality comparisons. + for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg), + IE = MRI->use_end(); I != IE; ++I) { + MachineInstr *UseMI = &*I; + if (UseMI->getOpcode() == PPC::BCC) { + unsigned Pred = UseMI->getOperand(0).getImm(); + if (Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) + continue; + + return false; + } else if (UseMI->getOpcode() == PPC::ISEL || + UseMI->getOpcode() == PPC::ISEL8) { + unsigned SubIdx = UseMI->getOperand(3).getSubReg(); + if (SubIdx == PPC::sub_eq) + continue; + + return false; + } else + return false; + } + } + + // Get ready to iterate backward from CmpInstr. + MachineBasicBlock::iterator I = CmpInstr, E = MI, + B = CmpInstr->getParent()->begin(); + + // Scan forward to find the first use of the compare. + for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end(); + I != EL; ++I) { + bool FoundUse = false; + for (MachineRegisterInfo::use_iterator J = MRI->use_begin(CRReg), + JE = MRI->use_end(); J != JE; ++J) + if (&*J == &*I) { + FoundUse = true; + break; + } + + if (FoundUse) + break; + } + + // Early exit if we're at the beginning of the BB. + if (I == B) return false; + + // There are two possible candidates which can be changed to set CR[01]. + // One is MI, the other is a SUB instruction. + // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). + MachineInstr *Sub = NULL; + if (SrcReg2 != 0) + // MI is not a candidate for CMPrr. + MI = NULL; + // FIXME: Conservatively refuse to convert an instruction which isn't in the + // same BB as the comparison. This is to allow the check below to avoid calls + // (and other explicit clobbers); instead we should really check for these + // more explicitly (in at least a few predecessors). + else if (MI->getParent() != CmpInstr->getParent() || Value != 0) { + // PPC does not have a record-form SUBri. + return false; + } + + // Search for Sub. + const TargetRegisterInfo *TRI = &getRegisterInfo(); + --I; + for (; I != E && !noSub; --I) { + const MachineInstr &Instr = *I; + unsigned IOpC = Instr.getOpcode(); + + if (&*I != CmpInstr && ( + Instr.modifiesRegister(CRRecReg, TRI) || + Instr.readsRegister(CRRecReg, TRI))) + // This instruction modifies or uses the record condition register after + // the one we want to change. While we could do this transformation, it + // would likely not be profitable. This transformation removes one + // instruction, and so even forcing RA to generate one move probably + // makes it unprofitable. + return false; + + // Check whether CmpInstr can be made redundant by the current instruction. + if ((OpC == PPC::CMPW || OpC == PPC::CMPLW || + OpC == PPC::CMPD || OpC == PPC::CMPLD) && + (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) && + ((Instr.getOperand(1).getReg() == SrcReg && + Instr.getOperand(2).getReg() == SrcReg2) || + (Instr.getOperand(1).getReg() == SrcReg2 && + Instr.getOperand(2).getReg() == SrcReg))) { + Sub = &*I; + break; + } + + if (isFP && (IOpC == PPC::FSUB || IOpC == PPC::FSUBS) && + ((Instr.getOperand(1).getReg() == SrcReg && + Instr.getOperand(2).getReg() == SrcReg2) || + (Instr.getOperand(1).getReg() == SrcReg2 && + Instr.getOperand(2).getReg() == SrcReg))) { + Sub = &*I; + break; + } + + if (I == B) + // The 'and' is below the comparison instruction. + return false; + } + + // Return false if no candidates exist. + if (!MI && !Sub) + return false; + + // The single candidate is called MI. + if (!MI) MI = Sub; + + int NewOpC = -1; + MIOpC = MI->getOpcode(); + if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8) + NewOpC = MIOpC; + else { + NewOpC = PPC::getRecordFormOpcode(MIOpC); + if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1) + NewOpC = MIOpC; + } + + // FIXME: On the non-embedded POWER architectures, only some of the record + // forms are fast, and we should use only the fast ones. + + // The defining instruction has a record form (or is already a record + // form). It is possible, however, that we'll need to reverse the condition + // code of the users. + if (NewOpC == -1) + return false; + + SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate; + SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate; + + // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP + // needs to be updated to be based on SUB. Push the condition code + // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the + // condition code of these operands will be modified. + bool ShouldSwap = false; + if (Sub) { + ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && + Sub->getOperand(2).getReg() == SrcReg; + + // The operands to subf are the opposite of sub, so only in the fixed-point + // case, invert the order. + if (!isFP) + ShouldSwap = !ShouldSwap; + } + + if (ShouldSwap) + for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg), + IE = MRI->use_end(); I != IE; ++I) { + MachineInstr *UseMI = &*I; + if (UseMI->getOpcode() == PPC::BCC) { + PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm(); + assert((!equalityOnly || + Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && + "Invalid predicate for equality-only optimization"); + PredsToUpdate.push_back(std::make_pair(&((*I).getOperand(0)), + PPC::getSwappedPredicate(Pred))); + } else if (UseMI->getOpcode() == PPC::ISEL || + UseMI->getOpcode() == PPC::ISEL8) { + unsigned NewSubReg = UseMI->getOperand(3).getSubReg(); + assert((!equalityOnly || NewSubReg == PPC::sub_eq) && + "Invalid CR bit for equality-only optimization"); + + if (NewSubReg == PPC::sub_lt) + NewSubReg = PPC::sub_gt; + else if (NewSubReg == PPC::sub_gt) + NewSubReg = PPC::sub_lt; + + SubRegsToUpdate.push_back(std::make_pair(&((*I).getOperand(3)), + NewSubReg)); + } else // We need to abort on a user we don't understand. + return false; + } + + // Create a new virtual register to hold the value of the CR set by the + // record-form instruction. If the instruction was not previously in + // record form, then set the kill flag on the CR. + CmpInstr->eraseFromParent(); + + MachineBasicBlock::iterator MII = MI; + BuildMI(*MI->getParent(), llvm::next(MII), MI->getDebugLoc(), + get(TargetOpcode::COPY), CRReg) + .addReg(CRRecReg, MIOpC != NewOpC ? RegState::Kill : 0); + + if (MIOpC != NewOpC) { + // We need to be careful here: we're replacing one instruction with + // another, and we need to make sure that we get all of the right + // implicit uses and defs. On the other hand, the caller may be holding + // an iterator to this instruction, and so we can't delete it (this is + // specifically the case if this is the instruction directly after the + // compare). + + const MCInstrDesc &NewDesc = get(NewOpC); + MI->setDesc(NewDesc); + + if (NewDesc.ImplicitDefs) + for (const uint16_t *ImpDefs = NewDesc.getImplicitDefs(); + *ImpDefs; ++ImpDefs) + if (!MI->definesRegister(*ImpDefs)) + MI->addOperand(*MI->getParent()->getParent(), + MachineOperand::CreateReg(*ImpDefs, true, true)); + if (NewDesc.ImplicitUses) + for (const uint16_t *ImpUses = NewDesc.getImplicitUses(); + *ImpUses; ++ImpUses) + if (!MI->readsRegister(*ImpUses)) + MI->addOperand(*MI->getParent()->getParent(), + MachineOperand::CreateReg(*ImpUses, false, true)); + } + + // Modify the condition code of operands in OperandsToUpdate. + // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to + // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. + for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++) + PredsToUpdate[i].first->setImm(PredsToUpdate[i].second); + + for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++) + SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second); + + return true; +} + /// GetInstSize - Return the number of bytes of code the specified /// instruction may be. This returns the maximum number of bytes. /// @@ -714,10 +1384,159 @@ unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { case PPC::GC_LABEL: case PPC::DBG_VALUE: return 0; - case PPC::BL8_NOP_ELF: - case PPC::BLA8_NOP_ELF: + case PPC::BL8_NOP: + case PPC::BLA8_NOP: return 8; default: return 4; // PowerPC instructions are all 4 bytes } } + +#undef DEBUG_TYPE +#define DEBUG_TYPE "ppc-early-ret" +STATISTIC(NumBCLR, "Number of early conditional returns"); +STATISTIC(NumBLR, "Number of early returns"); + +namespace llvm { + void initializePPCEarlyReturnPass(PassRegistry&); +} + +namespace { + // PPCEarlyReturn pass - For simple functions without epilogue code, move + // returns up, and create conditional returns, to avoid unnecessary + // branch-to-blr sequences. + struct PPCEarlyReturn : public MachineFunctionPass { + static char ID; + PPCEarlyReturn() : MachineFunctionPass(ID) { + initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry()); + } + + const PPCTargetMachine *TM; + const PPCInstrInfo *TII; + +protected: + bool processBlock(MachineBasicBlock &ReturnMBB) { + bool Changed = false; + + MachineBasicBlock::iterator I = ReturnMBB.begin(); + I = ReturnMBB.SkipPHIsAndLabels(I); + + // The block must be essentially empty except for the blr. + if (I == ReturnMBB.end() || I->getOpcode() != PPC::BLR || + I != ReturnMBB.getLastNonDebugInstr()) + return Changed; + + SmallVector<MachineBasicBlock*, 8> PredToRemove; + for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(), + PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) { + bool OtherReference = false, BlockChanged = false; + for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) { + if (J->getOpcode() == PPC::B) { + if (J->getOperand(0).getMBB() == &ReturnMBB) { + // This is an unconditional branch to the return. Replace the + // branch with a blr. + BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR)); + MachineBasicBlock::iterator K = J--; + K->eraseFromParent(); + BlockChanged = true; + ++NumBLR; + continue; + } + } else if (J->getOpcode() == PPC::BCC) { + if (J->getOperand(2).getMBB() == &ReturnMBB) { + // This is a conditional branch to the return. Replace the branch + // with a bclr. + BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCLR)) + .addImm(J->getOperand(0).getImm()) + .addReg(J->getOperand(1).getReg()); + MachineBasicBlock::iterator K = J--; + K->eraseFromParent(); + BlockChanged = true; + ++NumBCLR; + continue; + } + } else if (J->isBranch()) { + if (J->isIndirectBranch()) { + if (ReturnMBB.hasAddressTaken()) + OtherReference = true; + } else + for (unsigned i = 0; i < J->getNumOperands(); ++i) + if (J->getOperand(i).isMBB() && + J->getOperand(i).getMBB() == &ReturnMBB) + OtherReference = true; + } else if (!J->isTerminator() && !J->isDebugValue()) + break; + + if (J == (*PI)->begin()) + break; + + --J; + } + + if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB)) + OtherReference = true; + + // Predecessors are stored in a vector and can't be removed here. + if (!OtherReference && BlockChanged) { + PredToRemove.push_back(*PI); + } + + if (BlockChanged) + Changed = true; + } + + for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i) + PredToRemove[i]->removeSuccessor(&ReturnMBB); + + if (Changed && !ReturnMBB.hasAddressTaken()) { + // We now might be able to merge this blr-only block into its + // by-layout predecessor. + if (ReturnMBB.pred_size() == 1 && + (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) { + // Move the blr into the preceding block. + MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin(); + PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I); + PrevMBB.removeSuccessor(&ReturnMBB); + } + + if (ReturnMBB.pred_empty()) + ReturnMBB.eraseFromParent(); + } + + return Changed; + } + +public: + virtual bool runOnMachineFunction(MachineFunction &MF) { + TM = static_cast<const PPCTargetMachine *>(&MF.getTarget()); + TII = TM->getInstrInfo(); + + bool Changed = false; + + // If the function does not have at least two blocks, then there is + // nothing to do. + if (MF.size() < 2) + return Changed; + + for (MachineFunction::iterator I = MF.begin(); I != MF.end();) { + MachineBasicBlock &B = *I++; + if (processBlock(B)) + Changed = true; + } + + return Changed; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + MachineFunctionPass::getAnalysisUsage(AU); + } + }; +} + +INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE, + "PowerPC Early-Return Creation", false, false) + +char PPCEarlyReturn::ID = 0; +FunctionPass* +llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); } + diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h index 5d4ae91..34a1a73 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.h +++ b/lib/Target/PowerPC/PPCInstrInfo.h @@ -72,12 +72,12 @@ class PPCInstrInfo : public PPCGenInstrInfo { unsigned SrcReg, bool isKill, int FrameIdx, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs, - bool &NonRI) const; + bool &NonRI, bool &SpillsVRS) const; bool LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs, - bool &NonRI) const; + bool &NonRI, bool &SpillsVRS) const; public: explicit PPCInstrInfo(PPCTargetMachine &TM); @@ -120,6 +120,17 @@ public: MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const; + + // Select analysis. + virtual bool canInsertSelect(const MachineBasicBlock&, + const SmallVectorImpl<MachineOperand> &Cond, + unsigned, unsigned, int&, int&, int&) const; + virtual void insertSelect(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, DebugLoc DL, + unsigned DstReg, + const SmallVectorImpl<MachineOperand> &Cond, + unsigned TrueReg, unsigned FalseReg) const; + virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, @@ -146,6 +157,66 @@ public: virtual bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const; + virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, + unsigned Reg, MachineRegisterInfo *MRI) const; + + // If conversion by predication (only supported by some branch instructions). + // All of the profitability checks always return true; it is always + // profitable to use the predicated branches. + virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, unsigned ExtraPredCycles, + const BranchProbability &Probability) const { + return true; + } + + virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumT, unsigned ExtraT, + MachineBasicBlock &FMBB, + unsigned NumF, unsigned ExtraF, + const BranchProbability &Probability) const; + + virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, + const BranchProbability + &Probability) const { + return true; + } + + virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const { + return false; + } + + // Predication support. + bool isPredicated(const MachineInstr *MI) const; + + virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const; + + virtual + bool PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const; + + virtual + bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, + const SmallVectorImpl<MachineOperand> &Pred2) const; + + virtual bool DefinesPredicate(MachineInstr *MI, + std::vector<MachineOperand> &Pred) const; + + virtual bool isPredicable(MachineInstr *MI) const; + + // Comparison optimization. + + + virtual bool analyzeCompare(const MachineInstr *MI, + unsigned &SrcReg, unsigned &SrcReg2, + int &Mask, int &Value) const; + + virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, + unsigned SrcReg, unsigned SrcReg2, + int Mask, int Value, + const MachineRegisterInfo *MRI) const; + /// GetInstSize - Return the number of bytes of code the specified /// instruction may be. This returns the maximum number of bytes. /// diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 3f181aa..7d3540e 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -20,6 +20,10 @@ include "PPCInstrFormats.td" def SDT_PPCstfiwx : SDTypeProfile<0, 2, [ // stfiwx SDTCisVT<0, f64>, SDTCisPtrTy<1> ]>; +def SDT_PPClfiwx : SDTypeProfile<1, 1, [ // lfiw[az]x + SDTCisVT<0, f64>, SDTCisPtrTy<1> +]>; + def SDT_PPCCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; def SDT_PPCCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>; @@ -36,10 +40,10 @@ def SDT_PPCcondbr : SDTypeProfile<0, 3, [ ]>; def SDT_PPClbrx : SDTypeProfile<1, 2, [ - SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT> + SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT> ]>; def SDT_PPCstbrx : SDTypeProfile<0, 3, [ - SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT> + SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT> ]>; def SDT_PPClarx : SDTypeProfile<1, 1, [ @@ -53,32 +57,36 @@ def SDT_PPCTC_ret : SDTypeProfile<0, 2, [ SDTCisPtrTy<0>, SDTCisVT<1, i32> ]>; -def SDT_PPCnop : SDTypeProfile<0, 0, []>; //===----------------------------------------------------------------------===// // PowerPC specific DAG Nodes. // -def PPCfcfid : SDNode<"PPCISD::FCFID" , SDTFPUnaryOp, []>; +def PPCfre : SDNode<"PPCISD::FRE", SDTFPUnaryOp, []>; +def PPCfrsqrte: SDNode<"PPCISD::FRSQRTE", SDTFPUnaryOp, []>; + +def PPCfcfid : SDNode<"PPCISD::FCFID", SDTFPUnaryOp, []>; +def PPCfcfidu : SDNode<"PPCISD::FCFIDU", SDTFPUnaryOp, []>; +def PPCfcfids : SDNode<"PPCISD::FCFIDS", SDTFPRoundOp, []>; +def PPCfcfidus: SDNode<"PPCISD::FCFIDUS", SDTFPRoundOp, []>; def PPCfctidz : SDNode<"PPCISD::FCTIDZ", SDTFPUnaryOp, []>; def PPCfctiwz : SDNode<"PPCISD::FCTIWZ", SDTFPUnaryOp, []>; +def PPCfctiduz: SDNode<"PPCISD::FCTIDUZ",SDTFPUnaryOp, []>; +def PPCfctiwuz: SDNode<"PPCISD::FCTIWUZ",SDTFPUnaryOp, []>; def PPCstfiwx : SDNode<"PPCISD::STFIWX", SDT_PPCstfiwx, [SDNPHasChain, SDNPMayStore]>; +def PPClfiwax : SDNode<"PPCISD::LFIWAX", SDT_PPClfiwx, + [SDNPHasChain, SDNPMayLoad]>; +def PPClfiwzx : SDNode<"PPCISD::LFIWZX", SDT_PPClfiwx, + [SDNPHasChain, SDNPMayLoad]>; + +// Extract FPSCR (not modeled at the DAG level). +def PPCmffs : SDNode<"PPCISD::MFFS", + SDTypeProfile<1, 0, [SDTCisVT<0, f64>]>, []>; + +// Perform FADD in round-to-zero mode. +def PPCfaddrtz: SDNode<"PPCISD::FADDRTZ", SDTFPBinOp, []>; -// This sequence is used for long double->int conversions. It changes the -// bits in the FPSCR which is not modelled. -def PPCmffs : SDNode<"PPCISD::MFFS", SDTypeProfile<1, 0, [SDTCisVT<0, f64>]>, - [SDNPOutGlue]>; -def PPCmtfsb0 : SDNode<"PPCISD::MTFSB0", SDTypeProfile<0, 1, [SDTCisInt<0>]>, - [SDNPInGlue, SDNPOutGlue]>; -def PPCmtfsb1 : SDNode<"PPCISD::MTFSB1", SDTypeProfile<0, 1, [SDTCisInt<0>]>, - [SDNPInGlue, SDNPOutGlue]>; -def PPCfaddrtz: SDNode<"PPCISD::FADDRTZ", SDTFPBinOp, - [SDNPInGlue, SDNPOutGlue]>; -def PPCmtfsf : SDNode<"PPCISD::MTFSF", SDTypeProfile<1, 3, - [SDTCisVT<0, f64>, SDTCisInt<1>, SDTCisVT<2, f64>, - SDTCisVT<3, f64>]>, - [SDNPInGlue]>; def PPCfsel : SDNode<"PPCISD::FSEL", // Type constraint for fsel. @@ -113,10 +121,6 @@ def PPCsrl : SDNode<"PPCISD::SRL" , SDTIntShiftOp>; def PPCsra : SDNode<"PPCISD::SRA" , SDTIntShiftOp>; def PPCshl : SDNode<"PPCISD::SHL" , SDTIntShiftOp>; -def PPCextsw_32 : SDNode<"PPCISD::EXTSW_32" , SDTIntUnaryOp>; -def PPCstd_32 : SDNode<"PPCISD::STD_32" , SDTStore, - [SDNPHasChain, SDNPMayStore]>; - // These are target-independent nodes, but have target-specific formats. def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_PPCCallSeqStart, [SDNPHasChain, SDNPOutGlue]>; @@ -124,16 +128,12 @@ def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PPCCallSeqEnd, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisInt<0>]>; -def PPCcall_Darwin : SDNode<"PPCISD::CALL_Darwin", SDT_PPCCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def PPCcall_SVR4 : SDNode<"PPCISD::CALL_SVR4", SDT_PPCCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def PPCcall_nop_SVR4 : SDNode<"PPCISD::CALL_NOP_SVR4", SDT_PPCCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInGlue, SDNPOutGlue]>; +def PPCcall : SDNode<"PPCISD::CALL", SDT_PPCCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; +def PPCcall_nop : SDNode<"PPCISD::CALL_NOP", SDT_PPCCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>, @@ -144,13 +144,9 @@ def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>, SDNPInGlue, SDNPOutGlue]>; def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; - -def PPCbctrl_SVR4 : SDNode<"PPCISD::BCTRL_SVR4", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; +def PPCbctrl : SDNode<"PPCISD::BCTRL", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; @@ -158,6 +154,14 @@ def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone, def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def PPCeh_sjlj_setjmp : SDNode<"PPCISD::EH_SJLJ_SETJMP", + SDTypeProfile<1, 1, [SDTCisInt<0>, + SDTCisPtrTy<1>]>, + [SDNPHasChain, SDNPSideEffect]>; +def PPCeh_sjlj_longjmp : SDNode<"PPCISD::EH_SJLJ_LONGJMP", + SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, + [SDNPHasChain, SDNPSideEffect]>; + def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>; def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutGlue]>; @@ -315,10 +319,7 @@ def unaligned4sextloadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ // PowerPC Flag Definitions. class isPPC64 { bit PPC64 = 1; } -class isDOT { - list<Register> Defs = [CR0]; - bit RC = 1; -} +class isDOT { bit RC = 1; } class RegConstraint<string C> { string Constraints = C; @@ -331,6 +332,26 @@ class NoEncode<string E> { //===----------------------------------------------------------------------===// // PowerPC Operand Definitions. +// In the default PowerPC assembler syntax, registers are specified simply +// by number, so they cannot be distinguished from immediate values (without +// looking at the opcode). This means that the default operand matching logic +// for the asm parser does not work, and we need to specify custom matchers. +// Since those can only be specified with RegisterOperand classes and not +// directly on the RegisterClass, all instructions patterns used by the asm +// parser need to use a RegisterOperand (instead of a RegisterClass) for +// all their register operands. +// For this purpose, we define one RegisterOperand for each RegisterClass, +// using the same name as the class, just in lower case. +def gprc : RegisterOperand<GPRC>; +def g8rc : RegisterOperand<G8RC>; +def gprc_nor0 : RegisterOperand<GPRC_NOR0>; +def g8rc_nox0 : RegisterOperand<G8RC_NOX0>; +def f8rc : RegisterOperand<F8RC>; +def f4rc : RegisterOperand<F4RC>; +def vrrc : RegisterOperand<VRRC>; +def crbitrc : RegisterOperand<CRBITRC>; +def crrc : RegisterOperand<CRRC>; + def s5imm : Operand<i32> { let PrintMethod = "printS5ImmOperand"; } @@ -346,9 +367,6 @@ def s16imm : Operand<i32> { def u16imm : Operand<i32> { let PrintMethod = "printU16ImmOperand"; } -def s16immX4 : Operand<i32> { // Multiply imm by 4 before printing. - let PrintMethod = "printS16X4ImmOperand"; -} def directbrtarget : Operand<OtherVT> { let PrintMethod = "printBranchOperand"; let EncoderMethod = "getDirectBrEncoding"; @@ -376,26 +394,37 @@ def crbitm: Operand<i8> { let EncoderMethod = "get_crbitm_encoding"; } // Address operands +// A version of ptr_rc which excludes R0 (or X0 in 64-bit mode). +def ptr_rc_nor0 : PointerLikeRegClass<1>; + +def dispRI : Operand<iPTR>; +def dispRIX : Operand<iPTR>; + def memri : Operand<iPTR> { let PrintMethod = "printMemRegImm"; - let MIOperandInfo = (ops symbolLo:$imm, ptr_rc:$reg); + let MIOperandInfo = (ops dispRI:$imm, ptr_rc_nor0:$reg); let EncoderMethod = "getMemRIEncoding"; } def memrr : Operand<iPTR> { let PrintMethod = "printMemRegReg"; - let MIOperandInfo = (ops ptr_rc:$offreg, ptr_rc:$ptrreg); + let MIOperandInfo = (ops ptr_rc_nor0:$ptrreg, ptr_rc:$offreg); } def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits. let PrintMethod = "printMemRegImmShifted"; - let MIOperandInfo = (ops symbolLo:$imm, ptr_rc:$reg); + let MIOperandInfo = (ops dispRIX:$imm, ptr_rc_nor0:$reg); let EncoderMethod = "getMemRIXEncoding"; } -// PowerPC Predicate operand. 20 = (0<<5)|20 = always, CR0 is a dummy reg -// that doesn't matter. -def pred : PredicateOperand<OtherVT, (ops imm, CRRC), - (ops (i32 20), (i32 zero_reg))> { +// A single-register address. This is used with the SjLj +// pseudo-instructions. +def memr : Operand<iPTR> { + let MIOperandInfo = (ops ptr_rc:$ptrreg); +} + +// PowerPC Predicate operand. +def pred : Operand<OtherVT> { let PrintMethod = "printPredicateOperand"; + let MIOperandInfo = (ops i32imm:$bibo, crrc:$reg); } // Define PowerPC specific addressing mode. @@ -404,9 +433,12 @@ def xaddr : ComplexPattern<iPTR, 2, "SelectAddrIdx", [], []>; def xoaddr : ComplexPattern<iPTR, 2, "SelectAddrIdxOnly",[], []>; def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmShift", [], []>; // "std" +// The address in a single register. This is used with the SjLj +// pseudo-instructions. +def addr : ComplexPattern<iPTR, 1, "SelectAddr",[], []>; + /// This is just the offset part of iaddr, used for preinc. def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>; -def xaddroff : ComplexPattern<iPTR, 1, "SelectAddrIdxOffs", [], []>; //===----------------------------------------------------------------------===// // PowerPC Instruction Predicate Definitions. @@ -415,6 +447,252 @@ def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">; def IsBookE : Predicate<"PPCSubTarget.isBookE()">; //===----------------------------------------------------------------------===// +// PowerPC Multiclass Definitions. + +multiclass XForm_6r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XForm_6<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : XForm_6<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XForm_6rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + let Defs = [CARRY] in + def NAME : XForm_6<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CARRY, CR0] in + def o : XForm_6<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XForm_10r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XForm_10<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : XForm_10<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XForm_10rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + let Defs = [CARRY] in + def NAME : XForm_10<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CARRY, CR0] in + def o : XForm_10<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XForm_11r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XForm_11<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : XForm_11<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XOForm_1r<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XOForm_1<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : XOForm_1<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XOForm_1rc<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + let Defs = [CARRY] in + def NAME : XOForm_1<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CARRY, CR0] in + def o : XOForm_1<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XOForm_3r<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XOForm_3<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : XOForm_3<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XOForm_3rc<bits<6> opcode, bits<9> xo, bit oe, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + let Defs = [CARRY] in + def NAME : XOForm_3<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CARRY, CR0] in + def o : XOForm_3<opcode, xo, oe, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass MForm_2r<bits<6> opcode, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : MForm_2<opcode, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : MForm_2<opcode, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass MDForm_1r<bits<6> opcode, bits<3> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : MDForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : MDForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass MDSForm_1r<bits<6> opcode, bits<4> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : MDSForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR0] in + def o : MDSForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XSForm_1rc<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + let Defs = [CARRY] in + def NAME : XSForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CARRY, CR0] in + def o : XSForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass XForm_26r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : XForm_26<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR1] in + def o : XForm_26<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass AForm_1r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : AForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR1] in + def o : AForm_1<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass AForm_2r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : AForm_2<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR1] in + def o : AForm_2<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +multiclass AForm_3r<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, + string asmbase, string asmstr, InstrItinClass itin, + list<dag> pattern> { + let BaseName = asmbase in { + def NAME : AForm_3<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(" ", asmstr)), itin, + pattern>, RecFormRel; + let Defs = [CR1] in + def o : AForm_3<opcode, xo, OOL, IOL, + !strconcat(asmbase, !strconcat(". ", asmstr)), itin, + []>, isDOT, RecFormRel; + } +} + +//===----------------------------------------------------------------------===// // PowerPC Instruction Definitions. // Pseudo-instructions: @@ -427,32 +705,37 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "#ADJCAL [(callseq_end timm:$amt1, timm:$amt2)]>; } -def UPDATE_VRSAVE : Pseudo<(outs GPRC:$rD), (ins GPRC:$rS), +def UPDATE_VRSAVE : Pseudo<(outs gprc:$rD), (ins gprc:$rS), "UPDATE_VRSAVE $rD, $rS", []>; } let Defs = [R1], Uses = [R1] in -def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "#DYNALLOC", - [(set GPRC:$result, - (PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>; +def DYNALLOC : Pseudo<(outs gprc:$result), (ins gprc:$negsize, memri:$fpsi), "#DYNALLOC", + [(set i32:$result, + (PPCdynalloc i32:$negsize, iaddr:$fpsi))]>; // SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after // instruction selection into a branch sequence. let usesCustomInserter = 1, // Expanded after instruction selection. PPC970_Single = 1 in { - def SELECT_CC_I4 : Pseudo<(outs GPRC:$dst), (ins CRRC:$cond, GPRC:$T, GPRC:$F, + // Note that SELECT_CC_I4 and SELECT_CC_I8 use the no-r0 register classes + // because either operand might become the first operand in an isel, and + // that operand cannot be r0. + def SELECT_CC_I4 : Pseudo<(outs gprc:$dst), (ins crrc:$cond, + gprc_nor0:$T, gprc_nor0:$F, i32imm:$BROPC), "#SELECT_CC_I4", []>; - def SELECT_CC_I8 : Pseudo<(outs G8RC:$dst), (ins CRRC:$cond, G8RC:$T, G8RC:$F, + def SELECT_CC_I8 : Pseudo<(outs g8rc:$dst), (ins crrc:$cond, + g8rc_nox0:$T, g8rc_nox0:$F, i32imm:$BROPC), "#SELECT_CC_I8", []>; - def SELECT_CC_F4 : Pseudo<(outs F4RC:$dst), (ins CRRC:$cond, F4RC:$T, F4RC:$F, + def SELECT_CC_F4 : Pseudo<(outs f4rc:$dst), (ins crrc:$cond, f4rc:$T, f4rc:$F, i32imm:$BROPC), "#SELECT_CC_F4", []>; - def SELECT_CC_F8 : Pseudo<(outs F8RC:$dst), (ins CRRC:$cond, F8RC:$T, F8RC:$F, + def SELECT_CC_F8 : Pseudo<(outs f8rc:$dst), (ins crrc:$cond, f8rc:$T, f8rc:$F, i32imm:$BROPC), "#SELECT_CC_F8", []>; - def SELECT_CC_VRRC: Pseudo<(outs VRRC:$dst), (ins CRRC:$cond, VRRC:$T, VRRC:$F, + def SELECT_CC_VRRC: Pseudo<(outs vrrc:$dst), (ins crrc:$cond, vrrc:$T, vrrc:$F, i32imm:$BROPC), "#SELECT_CC_VRRC", []>; } @@ -460,22 +743,26 @@ let usesCustomInserter = 1, // Expanded after instruction selection. // SPILL_CR - Indicate that we're dumping the CR register, so we'll need to // scavenge a register for it. let mayStore = 1 in -def SPILL_CR : Pseudo<(outs), (ins CRRC:$cond, memri:$F), +def SPILL_CR : Pseudo<(outs), (ins crrc:$cond, memri:$F), "#SPILL_CR", []>; // RESTORE_CR - Indicate that we're restoring the CR register (previously // spilled), so we'll need to scavenge a register for it. let mayLoad = 1 in -def RESTORE_CR : Pseudo<(outs CRRC:$cond), (ins memri:$F), +def RESTORE_CR : Pseudo<(outs crrc:$cond), (ins memri:$F), "#RESTORE_CR", []>; let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { - let isCodeGenOnly = 1, isReturn = 1, Uses = [LR, RM] in - def BLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$p), - "b${p:cc}lr ${p:reg}", BrB, - [(retflag)]>; - let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in + let isReturn = 1, Uses = [LR, RM] in + def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", BrB, + [(retflag)]>; + let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in { def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>; + + let isCodeGenOnly = 1 in + def BCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond), + "b${cond:cc}ctr ${cond:reg}", BrB, []>; + } } let Defs = [LR] in @@ -492,10 +779,21 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { // BCC represents an arbitrary conditional branch on a predicate. // FIXME: should be able to write a pattern for PPCcondbranch, but can't use // a two-value operand where a dag node expects two operands. :( - let isCodeGenOnly = 1 in + let isCodeGenOnly = 1 in { def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), "b${cond:cc} ${cond:reg}, $dst" - /*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>; + /*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>; + let isReturn = 1, Uses = [LR, RM] in + def BCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond), + "b${cond:cc}lr ${cond:reg}", BrB, []>; + + let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in { + def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins), + "bdzlr", BrB, []>; + def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins), + "bdnzlr", BrB, []>; + } + } let Defs = [CTR], Uses = [CTR] in { def BDZ : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), @@ -505,46 +803,33 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { } } -// Darwin ABI Calls. -let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { - // Convenient aliases for call instructions - let Uses = [RM] in { - def BL_Darwin : IForm<18, 0, 1, - (outs), (ins calltarget:$func), - "bl $func", BrB, []>; // See Pat patterns below. - def BLA_Darwin : IForm<18, 1, 1, - (outs), (ins aaddr:$func), - "bla $func", BrB, [(PPCcall_Darwin (i32 imm:$func))]>; - } - let Uses = [CTR, RM] in { - def BCTRL_Darwin : XLForm_2_ext<19, 528, 20, 0, 1, - (outs), (ins), - "bctrl", BrB, - [(PPCbctrl_Darwin)]>, Requires<[In32BitMode]>; +// The unconditional BCL used by the SjLj setjmp code. +let isCall = 1, hasCtrlDep = 1, isCodeGenOnly = 1, PPC970_Unit = 7 in { + let Defs = [LR], Uses = [RM] in { + def BCLalways : BForm_2<16, 20, 31, 0, 1, (outs), (ins condbrtarget:$dst), + "bcl 20, 31, $dst">; } } -// SVR4 ABI Calls. let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { // Convenient aliases for call instructions let Uses = [RM] in { - def BL_SVR4 : IForm<18, 0, 1, - (outs), (ins calltarget:$func), - "bl $func", BrB, []>; // See Pat patterns below. - def BLA_SVR4 : IForm<18, 1, 1, - (outs), (ins aaddr:$func), - "bla $func", BrB, - [(PPCcall_SVR4 (i32 imm:$func))]>; + def BL : IForm<18, 0, 1, (outs), (ins calltarget:$func), + "bl $func", BrB, []>; // See Pat patterns below. + def BLA : IForm<18, 1, 1, (outs), (ins aaddr:$func), + "bla $func", BrB, [(PPCcall (i32 imm:$func))]>; } let Uses = [CTR, RM] in { - def BCTRL_SVR4 : XLForm_2_ext<19, 528, 20, 0, 1, - (outs), (ins), - "bctrl", BrB, - [(PPCbctrl_SVR4)]>, Requires<[In32BitMode]>; + def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), + "bctrl", BrB, [(PPCbctrl)]>, + Requires<[In32BitMode]>; + + let isCodeGenOnly = 1 in + def BCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond), + "b${cond:cc}ctrl ${cond:reg}", BrB, []>; } } - let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in def TCRETURNdi :Pseudo< (outs), (ins calltarget:$dst, i32imm:$offset), @@ -563,6 +848,8 @@ def TCRETURNri : Pseudo<(outs), (ins CTRRC:$dst, i32imm:$offset), []>; +let isCodeGenOnly = 1 in { + let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR, RM] in def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>, @@ -576,6 +863,7 @@ def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst), "b $dst", BrB, []>; +} let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in @@ -583,6 +871,22 @@ def TAILBA : IForm<18, 0, 0, (outs), (ins aaddr:$dst), "ba $dst", BrB, []>; +let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in { + def EH_SjLj_SetJmp32 : Pseudo<(outs gprc:$dst), (ins memr:$buf), + "#EH_SJLJ_SETJMP32", + [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, + Requires<[In32BitMode]>; + let isTerminator = 1 in + def EH_SjLj_LongJmp32 : Pseudo<(outs), (ins memr:$buf), + "#EH_SJLJ_LONGJMP32", + [(PPCeh_sjlj_longjmp addr:$buf)]>, + Requires<[In32BitMode]>; +} + +let isBranch = 1, isTerminator = 1 in { + def EH_SjLj_Setup : Pseudo<(outs), (ins directbrtarget:$dst), + "#EH_SjLj_Setup\t$dst", []>; +} // DCB* instructions. def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst), @@ -617,94 +921,91 @@ def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)), let usesCustomInserter = 1 in { let Defs = [CR0] in { def ATOMIC_LOAD_ADD_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I8", - [(set GPRC:$dst, (atomic_load_add_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I8", + [(set i32:$dst, (atomic_load_add_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_SUB_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I8", - [(set GPRC:$dst, (atomic_load_sub_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I8", + [(set i32:$dst, (atomic_load_sub_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_AND_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I8", - [(set GPRC:$dst, (atomic_load_and_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I8", + [(set i32:$dst, (atomic_load_and_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_OR_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I8", - [(set GPRC:$dst, (atomic_load_or_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I8", + [(set i32:$dst, (atomic_load_or_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_XOR_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "ATOMIC_LOAD_XOR_I8", - [(set GPRC:$dst, (atomic_load_xor_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "ATOMIC_LOAD_XOR_I8", + [(set i32:$dst, (atomic_load_xor_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_NAND_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I8", - [(set GPRC:$dst, (atomic_load_nand_8 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I8", + [(set i32:$dst, (atomic_load_nand_8 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_ADD_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I16", - [(set GPRC:$dst, (atomic_load_add_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I16", + [(set i32:$dst, (atomic_load_add_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_SUB_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I16", - [(set GPRC:$dst, (atomic_load_sub_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I16", + [(set i32:$dst, (atomic_load_sub_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_AND_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I16", - [(set GPRC:$dst, (atomic_load_and_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I16", + [(set i32:$dst, (atomic_load_and_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_OR_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I16", - [(set GPRC:$dst, (atomic_load_or_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I16", + [(set i32:$dst, (atomic_load_or_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_XOR_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I16", - [(set GPRC:$dst, (atomic_load_xor_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_XOR_I16", + [(set i32:$dst, (atomic_load_xor_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_NAND_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I16", - [(set GPRC:$dst, (atomic_load_nand_16 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I16", + [(set i32:$dst, (atomic_load_nand_16 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_ADD_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I32", - [(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I32", + [(set i32:$dst, (atomic_load_add_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_SUB_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I32", - [(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_SUB_I32", + [(set i32:$dst, (atomic_load_sub_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_AND_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I32", - [(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_AND_I32", + [(set i32:$dst, (atomic_load_and_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_OR_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I32", - [(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_OR_I32", + [(set i32:$dst, (atomic_load_or_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_XOR_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I32", - [(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_XOR_I32", + [(set i32:$dst, (atomic_load_xor_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_LOAD_NAND_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I32", - [(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I32", + [(set i32:$dst, (atomic_load_nand_32 xoaddr:$ptr, i32:$incr))]>; def ATOMIC_CMP_SWAP_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I8", - [(set GPRC:$dst, - (atomic_cmp_swap_8 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I8", + [(set i32:$dst, (atomic_cmp_swap_8 xoaddr:$ptr, i32:$old, i32:$new))]>; def ATOMIC_CMP_SWAP_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I16 $dst $ptr $old $new", - [(set GPRC:$dst, - (atomic_cmp_swap_16 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I16 $dst $ptr $old $new", + [(set i32:$dst, (atomic_cmp_swap_16 xoaddr:$ptr, i32:$old, i32:$new))]>; def ATOMIC_CMP_SWAP_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I32 $dst $ptr $old $new", - [(set GPRC:$dst, - (atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I32 $dst $ptr $old $new", + [(set i32:$dst, (atomic_cmp_swap_32 xoaddr:$ptr, i32:$old, i32:$new))]>; def ATOMIC_SWAP_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_i8", - [(set GPRC:$dst, (atomic_swap_8 xoaddr:$ptr, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_i8", + [(set i32:$dst, (atomic_swap_8 xoaddr:$ptr, i32:$new))]>; def ATOMIC_SWAP_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I16", - [(set GPRC:$dst, (atomic_swap_16 xoaddr:$ptr, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_I16", + [(set i32:$dst, (atomic_swap_16 xoaddr:$ptr, i32:$new))]>; def ATOMIC_SWAP_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I32", - [(set GPRC:$dst, (atomic_swap_32 xoaddr:$ptr, GPRC:$new))]>; + (outs gprc:$dst), (ins memrr:$ptr, gprc:$new), "#ATOMIC_SWAP_I32", + [(set i32:$dst, (atomic_swap_32 xoaddr:$ptr, i32:$new))]>; } } // Instructions to support atomic operations -def LWARX : XForm_1<31, 20, (outs GPRC:$rD), (ins memrr:$src), +def LWARX : XForm_1<31, 20, (outs gprc:$rD), (ins memrr:$src), "lwarx $rD, $src", LdStLWARX, - [(set GPRC:$rD, (PPClarx xoaddr:$src))]>; + [(set i32:$rD, (PPClarx xoaddr:$src))]>; let Defs = [CR0] in -def STWCX : XForm_1<31, 150, (outs), (ins GPRC:$rS, memrr:$dst), +def STWCX : XForm_1<31, 150, (outs), (ins gprc:$rS, memrr:$dst), "stwcx. $rS, $dst", LdStSTWCX, - [(PPCstcx GPRC:$rS, xoaddr:$dst)]>, + [(PPCstcx i32:$rS, xoaddr:$dst)]>, isDOT; let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in @@ -716,96 +1017,96 @@ def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStLoad, [(trap)]>; // Unindexed (r+i) Loads. let canFoldAsLoad = 1, PPC970_Unit = 2 in { -def LBZ : DForm_1<34, (outs GPRC:$rD), (ins memri:$src), +def LBZ : DForm_1<34, (outs gprc:$rD), (ins memri:$src), "lbz $rD, $src", LdStLoad, - [(set GPRC:$rD, (zextloadi8 iaddr:$src))]>; -def LHA : DForm_1<42, (outs GPRC:$rD), (ins memri:$src), + [(set i32:$rD, (zextloadi8 iaddr:$src))]>; +def LHA : DForm_1<42, (outs gprc:$rD), (ins memri:$src), "lha $rD, $src", LdStLHA, - [(set GPRC:$rD, (sextloadi16 iaddr:$src))]>, + [(set i32:$rD, (sextloadi16 iaddr:$src))]>, PPC970_DGroup_Cracked; -def LHZ : DForm_1<40, (outs GPRC:$rD), (ins memri:$src), +def LHZ : DForm_1<40, (outs gprc:$rD), (ins memri:$src), "lhz $rD, $src", LdStLoad, - [(set GPRC:$rD, (zextloadi16 iaddr:$src))]>; -def LWZ : DForm_1<32, (outs GPRC:$rD), (ins memri:$src), + [(set i32:$rD, (zextloadi16 iaddr:$src))]>; +def LWZ : DForm_1<32, (outs gprc:$rD), (ins memri:$src), "lwz $rD, $src", LdStLoad, - [(set GPRC:$rD, (load iaddr:$src))]>; + [(set i32:$rD, (load iaddr:$src))]>; -def LFS : DForm_1<48, (outs F4RC:$rD), (ins memri:$src), +def LFS : DForm_1<48, (outs f4rc:$rD), (ins memri:$src), "lfs $rD, $src", LdStLFD, - [(set F4RC:$rD, (load iaddr:$src))]>; -def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src), + [(set f32:$rD, (load iaddr:$src))]>; +def LFD : DForm_1<50, (outs f8rc:$rD), (ins memri:$src), "lfd $rD, $src", LdStLFD, - [(set F8RC:$rD, (load iaddr:$src))]>; + [(set f64:$rD, (load iaddr:$src))]>; // Unindexed (r+i) Loads with Update (preinc). -let mayLoad = 1 in { -def LBZU : DForm_1<35, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +let mayLoad = 1, neverHasSideEffects = 1 in { +def LBZU : DForm_1<35, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lbzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LHAU : DForm_1<43, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LHAU : DForm_1<43, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lhau $rD, $addr", LdStLHAU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LHZU : DForm_1<41, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LHZU : DForm_1<41, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lhzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LWZU : DForm_1<33, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LWZU : DForm_1<33, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lwzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LFSU : DForm_1<49, (outs F4RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LFSU : DForm_1<49, (outs f4rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lfsu $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; -def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), +def LFDU : DForm_1<51, (outs f8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr), "lfdu $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; // Indexed (r+r) Loads with Update (preinc). -def LBZUX : XForm_1<31, 119, (outs GPRC:$rD, ptr_rc:$ea_result), +def LBZUX : XForm_1<31, 119, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lbzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LHAUX : XForm_1<31, 375, (outs GPRC:$rD, ptr_rc:$ea_result), +def LHAUX : XForm_1<31, 375, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lhaux $rD, $addr", LdStLHAU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LHZUX : XForm_1<31, 311, (outs GPRC:$rD, ptr_rc:$ea_result), +def LHZUX : XForm_1<31, 311, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lhzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LWZUX : XForm_1<31, 55, (outs GPRC:$rD, ptr_rc:$ea_result), +def LWZUX : XForm_1<31, 55, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lwzux $rD, $addr", LdStLoadUpd, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LFSUX : XForm_1<31, 567, (outs F4RC:$rD, ptr_rc:$ea_result), +def LFSUX : XForm_1<31, 567, (outs f4rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lfsux $rD, $addr", LdStLFDU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; -def LFDUX : XForm_1<31, 631, (outs F8RC:$rD, ptr_rc:$ea_result), +def LFDUX : XForm_1<31, 631, (outs f8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrr:$addr), "lfdux $rD, $addr", LdStLFDU, - []>, RegConstraint<"$addr.offreg = $ea_result">, + []>, RegConstraint<"$addr.ptrreg = $ea_result">, NoEncode<"$ea_result">; } } @@ -813,34 +1114,41 @@ def LFDUX : XForm_1<31, 631, (outs F8RC:$rD, ptr_rc:$ea_result), // Indexed (r+r) Loads. // let canFoldAsLoad = 1, PPC970_Unit = 2 in { -def LBZX : XForm_1<31, 87, (outs GPRC:$rD), (ins memrr:$src), +def LBZX : XForm_1<31, 87, (outs gprc:$rD), (ins memrr:$src), "lbzx $rD, $src", LdStLoad, - [(set GPRC:$rD, (zextloadi8 xaddr:$src))]>; -def LHAX : XForm_1<31, 343, (outs GPRC:$rD), (ins memrr:$src), + [(set i32:$rD, (zextloadi8 xaddr:$src))]>; +def LHAX : XForm_1<31, 343, (outs gprc:$rD), (ins memrr:$src), "lhax $rD, $src", LdStLHA, - [(set GPRC:$rD, (sextloadi16 xaddr:$src))]>, + [(set i32:$rD, (sextloadi16 xaddr:$src))]>, PPC970_DGroup_Cracked; -def LHZX : XForm_1<31, 279, (outs GPRC:$rD), (ins memrr:$src), +def LHZX : XForm_1<31, 279, (outs gprc:$rD), (ins memrr:$src), "lhzx $rD, $src", LdStLoad, - [(set GPRC:$rD, (zextloadi16 xaddr:$src))]>; -def LWZX : XForm_1<31, 23, (outs GPRC:$rD), (ins memrr:$src), + [(set i32:$rD, (zextloadi16 xaddr:$src))]>; +def LWZX : XForm_1<31, 23, (outs gprc:$rD), (ins memrr:$src), "lwzx $rD, $src", LdStLoad, - [(set GPRC:$rD, (load xaddr:$src))]>; + [(set i32:$rD, (load xaddr:$src))]>; -def LHBRX : XForm_1<31, 790, (outs GPRC:$rD), (ins memrr:$src), +def LHBRX : XForm_1<31, 790, (outs gprc:$rD), (ins memrr:$src), "lhbrx $rD, $src", LdStLoad, - [(set GPRC:$rD, (PPClbrx xoaddr:$src, i16))]>; -def LWBRX : XForm_1<31, 534, (outs GPRC:$rD), (ins memrr:$src), + [(set i32:$rD, (PPClbrx xoaddr:$src, i16))]>; +def LWBRX : XForm_1<31, 534, (outs gprc:$rD), (ins memrr:$src), "lwbrx $rD, $src", LdStLoad, - [(set GPRC:$rD, (PPClbrx xoaddr:$src, i32))]>; + [(set i32:$rD, (PPClbrx xoaddr:$src, i32))]>; -def LFSX : XForm_25<31, 535, (outs F4RC:$frD), (ins memrr:$src), +def LFSX : XForm_25<31, 535, (outs f4rc:$frD), (ins memrr:$src), "lfsx $frD, $src", LdStLFD, - [(set F4RC:$frD, (load xaddr:$src))]>; -def LFDX : XForm_25<31, 599, (outs F8RC:$frD), (ins memrr:$src), + [(set f32:$frD, (load xaddr:$src))]>; +def LFDX : XForm_25<31, 599, (outs f8rc:$frD), (ins memrr:$src), "lfdx $frD, $src", LdStLFD, - [(set F8RC:$frD, (load xaddr:$src))]>; + [(set f64:$frD, (load xaddr:$src))]>; + +def LFIWAX : XForm_25<31, 855, (outs f8rc:$frD), (ins memrr:$src), + "lfiwax $frD, $src", LdStLFD, + [(set f64:$frD, (PPClfiwax xoaddr:$src))]>; +def LFIWZX : XForm_25<31, 887, (outs f8rc:$frD), (ins memrr:$src), + "lfiwzx $frD, $src", LdStLFD, + [(set f64:$frD, (PPClfiwzx xoaddr:$src))]>; } //===----------------------------------------------------------------------===// @@ -849,139 +1157,130 @@ def LFDX : XForm_25<31, 599, (outs F8RC:$frD), (ins memrr:$src), // Unindexed (r+i) Stores. let PPC970_Unit = 2 in { -def STB : DForm_1<38, (outs), (ins GPRC:$rS, memri:$src), +def STB : DForm_1<38, (outs), (ins gprc:$rS, memri:$src), "stb $rS, $src", LdStStore, - [(truncstorei8 GPRC:$rS, iaddr:$src)]>; -def STH : DForm_1<44, (outs), (ins GPRC:$rS, memri:$src), + [(truncstorei8 i32:$rS, iaddr:$src)]>; +def STH : DForm_1<44, (outs), (ins gprc:$rS, memri:$src), "sth $rS, $src", LdStStore, - [(truncstorei16 GPRC:$rS, iaddr:$src)]>; -def STW : DForm_1<36, (outs), (ins GPRC:$rS, memri:$src), + [(truncstorei16 i32:$rS, iaddr:$src)]>; +def STW : DForm_1<36, (outs), (ins gprc:$rS, memri:$src), "stw $rS, $src", LdStStore, - [(store GPRC:$rS, iaddr:$src)]>; -def STFS : DForm_1<52, (outs), (ins F4RC:$rS, memri:$dst), + [(store i32:$rS, iaddr:$src)]>; +def STFS : DForm_1<52, (outs), (ins f4rc:$rS, memri:$dst), "stfs $rS, $dst", LdStSTFD, - [(store F4RC:$rS, iaddr:$dst)]>; -def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst), + [(store f32:$rS, iaddr:$dst)]>; +def STFD : DForm_1<54, (outs), (ins f8rc:$rS, memri:$dst), "stfd $rS, $dst", LdStSTFD, - [(store F8RC:$rS, iaddr:$dst)]>; + [(store f64:$rS, iaddr:$dst)]>; } // Unindexed (r+i) Stores with Update (preinc). -let PPC970_Unit = 2 in { -def STBU : DForm_1a<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; -def STHU : DForm_1a<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; -def STWU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd, - [(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; -def STFSU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stfsu $rS, $ptroff($ptrreg)", LdStSTFDU, - [(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; -def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS, - symbolLo:$ptroff, ptr_rc:$ptrreg), - "stfdu $rS, $ptroff($ptrreg)", LdStSTFDU, - [(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg, - iaddroff:$ptroff))]>, - RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; +let PPC970_Unit = 2, mayStore = 1 in { +def STBU : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst), + "stbu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STHU : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst), + "sthu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STWU : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst), + "stwu $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STFSU : DForm_1<53, (outs ptr_rc_nor0:$ea_res), (ins f4rc:$rS, memri:$dst), + "stfsu $rS, $dst", LdStSTFDU, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; +def STFDU : DForm_1<55, (outs ptr_rc_nor0:$ea_res), (ins f8rc:$rS, memri:$dst), + "stfdu $rS, $dst", LdStSTFDU, []>, + RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">; } +// Patterns to match the pre-inc stores. We can't put the patterns on +// the instruction definitions directly as ISel wants the address base +// and offset to be separate operands, not a single complex operand. +def : Pat<(pre_truncsti8 i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STBU $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_truncsti16 i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STHU $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_store i32:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STWU $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STFSU $rS, iaddroff:$ptroff, $ptrreg)>; +def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iaddroff:$ptroff), + (STFDU $rS, iaddroff:$ptroff, $ptrreg)>; // Indexed (r+r) Stores. -// let PPC970_Unit = 2 in { -def STBX : XForm_8<31, 215, (outs), (ins GPRC:$rS, memrr:$dst), +def STBX : XForm_8<31, 215, (outs), (ins gprc:$rS, memrr:$dst), "stbx $rS, $dst", LdStStore, - [(truncstorei8 GPRC:$rS, xaddr:$dst)]>, + [(truncstorei8 i32:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; -def STHX : XForm_8<31, 407, (outs), (ins GPRC:$rS, memrr:$dst), +def STHX : XForm_8<31, 407, (outs), (ins gprc:$rS, memrr:$dst), "sthx $rS, $dst", LdStStore, - [(truncstorei16 GPRC:$rS, xaddr:$dst)]>, + [(truncstorei16 i32:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; -def STWX : XForm_8<31, 151, (outs), (ins GPRC:$rS, memrr:$dst), +def STWX : XForm_8<31, 151, (outs), (ins gprc:$rS, memrr:$dst), "stwx $rS, $dst", LdStStore, - [(store GPRC:$rS, xaddr:$dst)]>, + [(store i32:$rS, xaddr:$dst)]>, PPC970_DGroup_Cracked; -def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res), - (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti8 GPRC:$rS, - ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, - PPC970_DGroup_Cracked; - -def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res), - (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_truncsti16 GPRC:$rS, - ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, - PPC970_DGroup_Cracked; - -def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res), - (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd, - [(set ptr_rc:$ea_res, - (pre_store GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, - PPC970_DGroup_Cracked; - -def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res), - (ins F4RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stfsux $rS, $ptroff, $ptrreg", LdStSTFDU, - [(set ptr_rc:$ea_res, - (pre_store F4RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, - PPC970_DGroup_Cracked; - -def STFDUX : XForm_8<31, 759, (outs ptr_rc:$ea_res), - (ins F8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stfdux $rS, $ptroff, $ptrreg", LdStSTFDU, - [(set ptr_rc:$ea_res, - (pre_store F8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, - RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, - PPC970_DGroup_Cracked; - -def STHBRX: XForm_8<31, 918, (outs), (ins GPRC:$rS, memrr:$dst), +def STHBRX: XForm_8<31, 918, (outs), (ins gprc:$rS, memrr:$dst), "sthbrx $rS, $dst", LdStStore, - [(PPCstbrx GPRC:$rS, xoaddr:$dst, i16)]>, + [(PPCstbrx i32:$rS, xoaddr:$dst, i16)]>, PPC970_DGroup_Cracked; -def STWBRX: XForm_8<31, 662, (outs), (ins GPRC:$rS, memrr:$dst), +def STWBRX: XForm_8<31, 662, (outs), (ins gprc:$rS, memrr:$dst), "stwbrx $rS, $dst", LdStStore, - [(PPCstbrx GPRC:$rS, xoaddr:$dst, i32)]>, + [(PPCstbrx i32:$rS, xoaddr:$dst, i32)]>, PPC970_DGroup_Cracked; -def STFIWX: XForm_28<31, 983, (outs), (ins F8RC:$frS, memrr:$dst), +def STFIWX: XForm_28<31, 983, (outs), (ins f8rc:$frS, memrr:$dst), "stfiwx $frS, $dst", LdStSTFD, - [(PPCstfiwx F8RC:$frS, xoaddr:$dst)]>; + [(PPCstfiwx f64:$frS, xoaddr:$dst)]>; -def STFSX : XForm_28<31, 663, (outs), (ins F4RC:$frS, memrr:$dst), +def STFSX : XForm_28<31, 663, (outs), (ins f4rc:$frS, memrr:$dst), "stfsx $frS, $dst", LdStSTFD, - [(store F4RC:$frS, xaddr:$dst)]>; -def STFDX : XForm_28<31, 727, (outs), (ins F8RC:$frS, memrr:$dst), + [(store f32:$frS, xaddr:$dst)]>; +def STFDX : XForm_28<31, 727, (outs), (ins f8rc:$frS, memrr:$dst), "stfdx $frS, $dst", LdStSTFD, - [(store F8RC:$frS, xaddr:$dst)]>; + [(store f64:$frS, xaddr:$dst)]>; +} + +// Indexed (r+r) Stores with Update (preinc). +let PPC970_Unit = 2, mayStore = 1 in { +def STBUX : XForm_8<31, 247, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst), + "stbux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, + PPC970_DGroup_Cracked; +def STHUX : XForm_8<31, 439, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst), + "sthux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, + PPC970_DGroup_Cracked; +def STWUX : XForm_8<31, 183, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst), + "stwux $rS, $dst", LdStStoreUpd, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, + PPC970_DGroup_Cracked; +def STFSUX: XForm_8<31, 695, (outs ptr_rc_nor0:$ea_res), (ins f4rc:$rS, memrr:$dst), + "stfsux $rS, $dst", LdStSTFDU, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, + PPC970_DGroup_Cracked; +def STFDUX: XForm_8<31, 759, (outs ptr_rc_nor0:$ea_res), (ins f8rc:$rS, memrr:$dst), + "stfdux $rS, $dst", LdStSTFDU, []>, + RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">, + PPC970_DGroup_Cracked; } +// Patterns to match the pre-inc stores. We can't put the patterns on +// the instruction definitions directly as ISel wants the address base +// and offset to be separate operands, not a single complex operand. +def : Pat<(pre_truncsti8 i32:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STBUX $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_truncsti16 i32:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STHUX $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_store i32:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STWUX $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_store f32:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STFSUX $rS, $ptrreg, $ptroff)>; +def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iPTR:$ptroff), + (STFDUX $rS, $ptrreg, $ptroff)>; + def SYNC : XForm_24_sync<31, 598, (outs), (ins), "sync", LdStSync, [(int_ppc_sync)]>; @@ -991,157 +1290,206 @@ def SYNC : XForm_24_sync<31, 598, (outs), (ins), // let PPC970_Unit = 1 in { // FXU Operations. -def ADDI : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), - "addi $rD, $rA, $imm", IntSimple, - [(set GPRC:$rD, (add GPRC:$rA, immSExt16:$imm))]>; -def ADDIL : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, symbolLo:$imm), +def ADDI : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolLo:$imm), "addi $rD, $rA, $imm", IntSimple, - [(set GPRC:$rD, (add GPRC:$rA, immSExt16:$imm))]>; -let Defs = [CARRY] in { -def ADDIC : DForm_2<12, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), + [(set i32:$rD, (add i32:$rA, immSExt16:$imm))]>; +let BaseName = "addic" in { +let Defs = [CARRY] in +def ADDIC : DForm_2<12, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "addic $rD, $rA, $imm", IntGeneral, - [(set GPRC:$rD, (addc GPRC:$rA, immSExt16:$imm))]>, - PPC970_DGroup_Cracked; -def ADDICo : DForm_2<13, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), + [(set i32:$rD, (addc i32:$rA, immSExt16:$imm))]>, + RecFormRel, PPC970_DGroup_Cracked; +let Defs = [CARRY, CR0] in +def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "addic. $rD, $rA, $imm", IntGeneral, - []>; + []>, isDOT, RecFormRel; } -def ADDIS : DForm_2<15, (outs GPRC:$rD), (ins GPRC:$rA, symbolHi:$imm), +def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolHi:$imm), "addis $rD, $rA, $imm", IntSimple, - [(set GPRC:$rD, (add GPRC:$rA, imm16ShiftedSExt:$imm))]>; -def LA : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, symbolLo:$sym), + [(set i32:$rD, (add i32:$rA, imm16ShiftedSExt:$imm))]>; +let isCodeGenOnly = 1 in +def LA : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, symbolLo:$sym), "la $rD, $sym($rA)", IntGeneral, - [(set GPRC:$rD, (add GPRC:$rA, + [(set i32:$rD, (add i32:$rA, (PPClo tglobaladdr:$sym, 0)))]>; -def MULLI : DForm_2< 7, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), +def MULLI : DForm_2< 7, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "mulli $rD, $rA, $imm", IntMulLI, - [(set GPRC:$rD, (mul GPRC:$rA, immSExt16:$imm))]>; -let Defs = [CARRY] in { -def SUBFIC : DForm_2< 8, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), + [(set i32:$rD, (mul i32:$rA, immSExt16:$imm))]>; +let Defs = [CARRY] in +def SUBFIC : DForm_2< 8, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "subfic $rD, $rA, $imm", IntGeneral, - [(set GPRC:$rD, (subc immSExt16:$imm, GPRC:$rA))]>; -} + [(set i32:$rD, (subc immSExt16:$imm, i32:$rA))]>; let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { - def LI : DForm_2_r0<14, (outs GPRC:$rD), (ins symbolLo:$imm), + def LI : DForm_2_r0<14, (outs gprc:$rD), (ins symbolLo:$imm), "li $rD, $imm", IntSimple, - [(set GPRC:$rD, immSExt16:$imm)]>; - def LIS : DForm_2_r0<15, (outs GPRC:$rD), (ins symbolHi:$imm), + [(set i32:$rD, immSExt16:$imm)]>; + def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins symbolHi:$imm), "lis $rD, $imm", IntSimple, - [(set GPRC:$rD, imm16ShiftedSExt:$imm)]>; + [(set i32:$rD, imm16ShiftedSExt:$imm)]>; } } let PPC970_Unit = 1 in { // FXU Operations. -def ANDIo : DForm_4<28, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), +let Defs = [CR0] in { +def ANDIo : DForm_4<28, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "andi. $dst, $src1, $src2", IntGeneral, - [(set GPRC:$dst, (and GPRC:$src1, immZExt16:$src2))]>, + [(set i32:$dst, (and i32:$src1, immZExt16:$src2))]>, isDOT; -def ANDISo : DForm_4<29, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), +def ANDISo : DForm_4<29, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "andis. $dst, $src1, $src2", IntGeneral, - [(set GPRC:$dst, (and GPRC:$src1,imm16ShiftedZExt:$src2))]>, + [(set i32:$dst, (and i32:$src1, imm16ShiftedZExt:$src2))]>, isDOT; -def ORI : DForm_4<24, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), +} +def ORI : DForm_4<24, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "ori $dst, $src1, $src2", IntSimple, - [(set GPRC:$dst, (or GPRC:$src1, immZExt16:$src2))]>; -def ORIS : DForm_4<25, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), + [(set i32:$dst, (or i32:$src1, immZExt16:$src2))]>; +def ORIS : DForm_4<25, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "oris $dst, $src1, $src2", IntSimple, - [(set GPRC:$dst, (or GPRC:$src1, imm16ShiftedZExt:$src2))]>; -def XORI : DForm_4<26, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), + [(set i32:$dst, (or i32:$src1, imm16ShiftedZExt:$src2))]>; +def XORI : DForm_4<26, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "xori $dst, $src1, $src2", IntSimple, - [(set GPRC:$dst, (xor GPRC:$src1, immZExt16:$src2))]>; -def XORIS : DForm_4<27, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2), + [(set i32:$dst, (xor i32:$src1, immZExt16:$src2))]>; +def XORIS : DForm_4<27, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "xoris $dst, $src1, $src2", IntSimple, - [(set GPRC:$dst, (xor GPRC:$src1,imm16ShiftedZExt:$src2))]>; + [(set i32:$dst, (xor i32:$src1, imm16ShiftedZExt:$src2))]>; def NOP : DForm_4_zero<24, (outs), (ins), "nop", IntSimple, []>; -def CMPWI : DForm_5_ext<11, (outs CRRC:$crD), (ins GPRC:$rA, s16imm:$imm), - "cmpwi $crD, $rA, $imm", IntCompare>; -def CMPLWI : DForm_6_ext<10, (outs CRRC:$dst), (ins GPRC:$src1, u16imm:$src2), - "cmplwi $dst, $src1, $src2", IntCompare>; +let isCompare = 1, neverHasSideEffects = 1 in { + def CMPWI : DForm_5_ext<11, (outs crrc:$crD), (ins gprc:$rA, s16imm:$imm), + "cmpwi $crD, $rA, $imm", IntCompare>; + def CMPLWI : DForm_6_ext<10, (outs crrc:$dst), (ins gprc:$src1, u16imm:$src2), + "cmplwi $dst, $src1, $src2", IntCompare>; +} } +let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations. +defm NAND : XForm_6r<31, 476, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "nand", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (not (and i32:$rS, i32:$rB)))]>; +defm AND : XForm_6r<31, 28, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "and", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (and i32:$rS, i32:$rB))]>; +defm ANDC : XForm_6r<31, 60, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "andc", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (and i32:$rS, (not i32:$rB)))]>; +defm OR : XForm_6r<31, 444, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "or", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (or i32:$rS, i32:$rB))]>; +defm NOR : XForm_6r<31, 124, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "nor", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (not (or i32:$rS, i32:$rB)))]>; +defm ORC : XForm_6r<31, 412, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "orc", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (or i32:$rS, (not i32:$rB)))]>; +defm EQV : XForm_6r<31, 284, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "eqv", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (not (xor i32:$rS, i32:$rB)))]>; +defm XOR : XForm_6r<31, 316, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "xor", "$rA, $rS, $rB", IntSimple, + [(set i32:$rA, (xor i32:$rS, i32:$rB))]>; +defm SLW : XForm_6r<31, 24, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "slw", "$rA, $rS, $rB", IntGeneral, + [(set i32:$rA, (PPCshl i32:$rS, i32:$rB))]>; +defm SRW : XForm_6r<31, 536, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "srw", "$rA, $rS, $rB", IntGeneral, + [(set i32:$rA, (PPCsrl i32:$rS, i32:$rB))]>; +defm SRAW : XForm_6rc<31, 792, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB), + "sraw", "$rA, $rS, $rB", IntShift, + [(set i32:$rA, (PPCsra i32:$rS, i32:$rB))]>; +} let PPC970_Unit = 1 in { // FXU Operations. -def NAND : XForm_6<31, 476, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "nand $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (not (and GPRC:$rS, GPRC:$rB)))]>; -def AND : XForm_6<31, 28, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "and $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (and GPRC:$rS, GPRC:$rB))]>; -def ANDC : XForm_6<31, 60, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "andc $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (and GPRC:$rS, (not GPRC:$rB)))]>; -def OR : XForm_6<31, 444, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "or $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (or GPRC:$rS, GPRC:$rB))]>; -def NOR : XForm_6<31, 124, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "nor $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (not (or GPRC:$rS, GPRC:$rB)))]>; -def ORC : XForm_6<31, 412, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "orc $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (or GPRC:$rS, (not GPRC:$rB)))]>; -def EQV : XForm_6<31, 284, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "eqv $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (not (xor GPRC:$rS, GPRC:$rB)))]>; -def XOR : XForm_6<31, 316, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "xor $rA, $rS, $rB", IntSimple, - [(set GPRC:$rA, (xor GPRC:$rS, GPRC:$rB))]>; -def SLW : XForm_6<31, 24, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "slw $rA, $rS, $rB", IntGeneral, - [(set GPRC:$rA, (PPCshl GPRC:$rS, GPRC:$rB))]>; -def SRW : XForm_6<31, 536, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "srw $rA, $rS, $rB", IntGeneral, - [(set GPRC:$rA, (PPCsrl GPRC:$rS, GPRC:$rB))]>; -let Defs = [CARRY] in { -def SRAW : XForm_6<31, 792, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB), - "sraw $rA, $rS, $rB", IntShift, - [(set GPRC:$rA, (PPCsra GPRC:$rS, GPRC:$rB))]>; +let neverHasSideEffects = 1 in { +defm SRAWI : XForm_10rc<31, 824, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH), + "srawi", "$rA, $rS, $SH", IntShift, + [(set i32:$rA, (sra i32:$rS, (i32 imm:$SH)))]>; +defm CNTLZW : XForm_11r<31, 26, (outs gprc:$rA), (ins gprc:$rS), + "cntlzw", "$rA, $rS", IntGeneral, + [(set i32:$rA, (ctlz i32:$rS))]>; +defm EXTSB : XForm_11r<31, 954, (outs gprc:$rA), (ins gprc:$rS), + "extsb", "$rA, $rS", IntSimple, + [(set i32:$rA, (sext_inreg i32:$rS, i8))]>; +defm EXTSH : XForm_11r<31, 922, (outs gprc:$rA), (ins gprc:$rS), + "extsh", "$rA, $rS", IntSimple, + [(set i32:$rA, (sext_inreg i32:$rS, i16))]>; } +let isCompare = 1, neverHasSideEffects = 1 in { + def CMPW : XForm_16_ext<31, 0, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB), + "cmpw $crD, $rA, $rB", IntCompare>; + def CMPLW : XForm_16_ext<31, 32, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB), + "cmplw $crD, $rA, $rB", IntCompare>; } - -let PPC970_Unit = 1 in { // FXU Operations. -let Defs = [CARRY] in { -def SRAWI : XForm_10<31, 824, (outs GPRC:$rA), (ins GPRC:$rS, u5imm:$SH), - "srawi $rA, $rS, $SH", IntShift, - [(set GPRC:$rA, (sra GPRC:$rS, (i32 imm:$SH)))]>; -} -def CNTLZW : XForm_11<31, 26, (outs GPRC:$rA), (ins GPRC:$rS), - "cntlzw $rA, $rS", IntGeneral, - [(set GPRC:$rA, (ctlz GPRC:$rS))]>; -def EXTSB : XForm_11<31, 954, (outs GPRC:$rA), (ins GPRC:$rS), - "extsb $rA, $rS", IntSimple, - [(set GPRC:$rA, (sext_inreg GPRC:$rS, i8))]>; -def EXTSH : XForm_11<31, 922, (outs GPRC:$rA), (ins GPRC:$rS), - "extsh $rA, $rS", IntSimple, - [(set GPRC:$rA, (sext_inreg GPRC:$rS, i16))]>; - -def CMPW : XForm_16_ext<31, 0, (outs CRRC:$crD), (ins GPRC:$rA, GPRC:$rB), - "cmpw $crD, $rA, $rB", IntCompare>; -def CMPLW : XForm_16_ext<31, 32, (outs CRRC:$crD), (ins GPRC:$rA, GPRC:$rB), - "cmplw $crD, $rA, $rB", IntCompare>; } let PPC970_Unit = 3 in { // FPU Operations. //def FCMPO : XForm_17<63, 32, (outs CRRC:$crD), (ins FPRC:$fA, FPRC:$fB), // "fcmpo $crD, $fA, $fB", FPCompare>; -def FCMPUS : XForm_17<63, 0, (outs CRRC:$crD), (ins F4RC:$fA, F4RC:$fB), - "fcmpu $crD, $fA, $fB", FPCompare>; -def FCMPUD : XForm_17<63, 0, (outs CRRC:$crD), (ins F8RC:$fA, F8RC:$fB), - "fcmpu $crD, $fA, $fB", FPCompare>; +let isCompare = 1, neverHasSideEffects = 1 in { + def FCMPUS : XForm_17<63, 0, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB), + "fcmpu $crD, $fA, $fB", FPCompare>; + def FCMPUD : XForm_17<63, 0, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB), + "fcmpu $crD, $fA, $fB", FPCompare>; +} let Uses = [RM] in { - def FCTIWZ : XForm_26<63, 15, (outs F8RC:$frD), (ins F8RC:$frB), - "fctiwz $frD, $frB", FPGeneral, - [(set F8RC:$frD, (PPCfctiwz F8RC:$frB))]>; - def FRSP : XForm_26<63, 12, (outs F4RC:$frD), (ins F8RC:$frB), - "frsp $frD, $frB", FPGeneral, - [(set F4RC:$frD, (fround F8RC:$frB))]>; - def FSQRT : XForm_26<63, 22, (outs F8RC:$frD), (ins F8RC:$frB), - "fsqrt $frD, $frB", FPSqrt, - [(set F8RC:$frD, (fsqrt F8RC:$frB))]>; - def FSQRTS : XForm_26<59, 22, (outs F4RC:$frD), (ins F4RC:$frB), - "fsqrts $frD, $frB", FPSqrt, - [(set F4RC:$frD, (fsqrt F4RC:$frB))]>; + let neverHasSideEffects = 1 in { + defm FCTIWZ : XForm_26r<63, 15, (outs f8rc:$frD), (ins f8rc:$frB), + "fctiwz", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfctiwz f64:$frB))]>; + + defm FRSP : XForm_26r<63, 12, (outs f4rc:$frD), (ins f8rc:$frB), + "frsp", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fround f64:$frB))]>; + + // The frin -> nearbyint mapping is valid only in fast-math mode. + let Interpretation64Bit = 1 in + defm FRIND : XForm_26r<63, 392, (outs f8rc:$frD), (ins f8rc:$frB), + "frin", "$frD, $frB", FPGeneral, + [(set f64:$frD, (fnearbyint f64:$frB))]>; + defm FRINS : XForm_26r<63, 392, (outs f4rc:$frD), (ins f4rc:$frB), + "frin", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fnearbyint f32:$frB))]>; + } + + // These pseudos expand to rint but also set FE_INEXACT when the result does + // not equal the argument. + let usesCustomInserter = 1, Defs = [RM] in { // FIXME: Model FPSCR! + def FRINDrint : Pseudo<(outs f8rc:$frD), (ins f8rc:$frB), + "#FRINDrint", [(set f64:$frD, (frint f64:$frB))]>; + def FRINSrint : Pseudo<(outs f4rc:$frD), (ins f4rc:$frB), + "#FRINSrint", [(set f32:$frD, (frint f32:$frB))]>; + } + + let neverHasSideEffects = 1 in { + let Interpretation64Bit = 1 in + defm FRIPD : XForm_26r<63, 456, (outs f8rc:$frD), (ins f8rc:$frB), + "frip", "$frD, $frB", FPGeneral, + [(set f64:$frD, (fceil f64:$frB))]>; + defm FRIPS : XForm_26r<63, 456, (outs f4rc:$frD), (ins f4rc:$frB), + "frip", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fceil f32:$frB))]>; + let Interpretation64Bit = 1 in + defm FRIZD : XForm_26r<63, 424, (outs f8rc:$frD), (ins f8rc:$frB), + "friz", "$frD, $frB", FPGeneral, + [(set f64:$frD, (ftrunc f64:$frB))]>; + defm FRIZS : XForm_26r<63, 424, (outs f4rc:$frD), (ins f4rc:$frB), + "friz", "$frD, $frB", FPGeneral, + [(set f32:$frD, (ftrunc f32:$frB))]>; + let Interpretation64Bit = 1 in + defm FRIMD : XForm_26r<63, 488, (outs f8rc:$frD), (ins f8rc:$frB), + "frim", "$frD, $frB", FPGeneral, + [(set f64:$frD, (ffloor f64:$frB))]>; + defm FRIMS : XForm_26r<63, 488, (outs f4rc:$frD), (ins f4rc:$frB), + "frim", "$frD, $frB", FPGeneral, + [(set f32:$frD, (ffloor f32:$frB))]>; + + defm FSQRT : XForm_26r<63, 22, (outs f8rc:$frD), (ins f8rc:$frB), + "fsqrt", "$frD, $frB", FPSqrt, + [(set f64:$frD, (fsqrt f64:$frB))]>; + defm FSQRTS : XForm_26r<59, 22, (outs f4rc:$frD), (ins f4rc:$frB), + "fsqrts", "$frD, $frB", FPSqrt, + [(set f32:$frD, (fsqrt f32:$frB))]>; + } } } @@ -1149,55 +1497,74 @@ let Uses = [RM] in { /// often coalesced away and we don't want the dispatch group builder to think /// that they will fill slots (which could cause the load of a LSU reject to /// sneak into a d-group with a store). -def FMR : XForm_26<63, 72, (outs F4RC:$frD), (ins F4RC:$frB), - "fmr $frD, $frB", FPGeneral, - []>, // (set F4RC:$frD, F4RC:$frB) - PPC970_Unit_Pseudo; +let neverHasSideEffects = 1 in +defm FMR : XForm_26r<63, 72, (outs f4rc:$frD), (ins f4rc:$frB), + "fmr", "$frD, $frB", FPGeneral, + []>, // (set f32:$frD, f32:$frB) + PPC970_Unit_Pseudo; -let PPC970_Unit = 3 in { // FPU Operations. +let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations. // These are artificially split into two different forms, for 4/8 byte FP. -def FABSS : XForm_26<63, 264, (outs F4RC:$frD), (ins F4RC:$frB), - "fabs $frD, $frB", FPGeneral, - [(set F4RC:$frD, (fabs F4RC:$frB))]>; -def FABSD : XForm_26<63, 264, (outs F8RC:$frD), (ins F8RC:$frB), - "fabs $frD, $frB", FPGeneral, - [(set F8RC:$frD, (fabs F8RC:$frB))]>; -def FNABSS : XForm_26<63, 136, (outs F4RC:$frD), (ins F4RC:$frB), - "fnabs $frD, $frB", FPGeneral, - [(set F4RC:$frD, (fneg (fabs F4RC:$frB)))]>; -def FNABSD : XForm_26<63, 136, (outs F8RC:$frD), (ins F8RC:$frB), - "fnabs $frD, $frB", FPGeneral, - [(set F8RC:$frD, (fneg (fabs F8RC:$frB)))]>; -def FNEGS : XForm_26<63, 40, (outs F4RC:$frD), (ins F4RC:$frB), - "fneg $frD, $frB", FPGeneral, - [(set F4RC:$frD, (fneg F4RC:$frB))]>; -def FNEGD : XForm_26<63, 40, (outs F8RC:$frD), (ins F8RC:$frB), - "fneg $frD, $frB", FPGeneral, - [(set F8RC:$frD, (fneg F8RC:$frB))]>; -} - +defm FABSS : XForm_26r<63, 264, (outs f4rc:$frD), (ins f4rc:$frB), + "fabs", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fabs f32:$frB))]>; +let Interpretation64Bit = 1 in +defm FABSD : XForm_26r<63, 264, (outs f8rc:$frD), (ins f8rc:$frB), + "fabs", "$frD, $frB", FPGeneral, + [(set f64:$frD, (fabs f64:$frB))]>; +defm FNABSS : XForm_26r<63, 136, (outs f4rc:$frD), (ins f4rc:$frB), + "fnabs", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fneg (fabs f32:$frB)))]>; +let Interpretation64Bit = 1 in +defm FNABSD : XForm_26r<63, 136, (outs f8rc:$frD), (ins f8rc:$frB), + "fnabs", "$frD, $frB", FPGeneral, + [(set f64:$frD, (fneg (fabs f64:$frB)))]>; +defm FNEGS : XForm_26r<63, 40, (outs f4rc:$frD), (ins f4rc:$frB), + "fneg", "$frD, $frB", FPGeneral, + [(set f32:$frD, (fneg f32:$frB))]>; +let Interpretation64Bit = 1 in +defm FNEGD : XForm_26r<63, 40, (outs f8rc:$frD), (ins f8rc:$frB), + "fneg", "$frD, $frB", FPGeneral, + [(set f64:$frD, (fneg f64:$frB))]>; + +// Reciprocal estimates. +defm FRE : XForm_26r<63, 24, (outs f8rc:$frD), (ins f8rc:$frB), + "fre", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfre f64:$frB))]>; +defm FRES : XForm_26r<59, 24, (outs f4rc:$frD), (ins f4rc:$frB), + "fres", "$frD, $frB", FPGeneral, + [(set f32:$frD, (PPCfre f32:$frB))]>; +defm FRSQRTE : XForm_26r<63, 26, (outs f8rc:$frD), (ins f8rc:$frB), + "frsqrte", "$frD, $frB", FPGeneral, + [(set f64:$frD, (PPCfrsqrte f64:$frB))]>; +defm FRSQRTES : XForm_26r<59, 26, (outs f4rc:$frD), (ins f4rc:$frB), + "frsqrtes", "$frD, $frB", FPGeneral, + [(set f32:$frD, (PPCfrsqrte f32:$frB))]>; +} // XL-Form instructions. condition register logical ops. // -def MCRF : XLForm_3<19, 0, (outs CRRC:$BF), (ins CRRC:$BFA), +let neverHasSideEffects = 1 in +def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA), "mcrf $BF, $BFA", BrMCR>, PPC970_DGroup_First, PPC970_Unit_CRU; -def CREQV : XLForm_1<19, 289, (outs CRBITRC:$CRD), - (ins CRBITRC:$CRA, CRBITRC:$CRB), +def CREQV : XLForm_1<19, 289, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), "creqv $CRD, $CRA, $CRB", BrCR, []>; -def CROR : XLForm_1<19, 449, (outs CRBITRC:$CRD), - (ins CRBITRC:$CRA, CRBITRC:$CRB), +def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD), + (ins crbitrc:$CRA, crbitrc:$CRB), "cror $CRD, $CRA, $CRB", BrCR, []>; -def CRSET : XLForm_1_ext<19, 289, (outs CRBITRC:$dst), (ins), +let isCodeGenOnly = 1 in { +def CRSET : XLForm_1_ext<19, 289, (outs crbitrc:$dst), (ins), "creqv $dst, $dst, $dst", BrCR, []>; -def CRUNSET: XLForm_1_ext<19, 193, (outs CRBITRC:$dst), (ins), +def CRUNSET: XLForm_1_ext<19, 193, (outs crbitrc:$dst), (ins), "crxor $dst, $dst, $dst", BrCR, []>; @@ -1210,27 +1577,28 @@ def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins), "crxor 6, 6, 6", BrCR, [(PPCcr6unset)]>; } +} // XFX-Form instructions. Instructions that deal with SPRs. // let Uses = [CTR] in { -def MFCTR : XFXForm_1_ext<31, 339, 9, (outs GPRC:$rT), (ins), +def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins), "mfctr $rT", SprMFSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } -let Defs = [CTR], Pattern = [(PPCmtctr GPRC:$rS)] in { -def MTCTR : XFXForm_7_ext<31, 467, 9, (outs), (ins GPRC:$rS), +let Defs = [CTR], Pattern = [(PPCmtctr i32:$rS)] in { +def MTCTR : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS), "mtctr $rS", SprMTSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } let Defs = [LR] in { -def MTLR : XFXForm_7_ext<31, 467, 8, (outs), (ins GPRC:$rS), +def MTLR : XFXForm_7_ext<31, 467, 8, (outs), (ins gprc:$rS), "mtlr $rS", SprMTSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } let Uses = [LR] in { -def MFLR : XFXForm_1_ext<31, 339, 8, (outs GPRC:$rT), (ins), +def MFLR : XFXForm_1_ext<31, 339, 8, (outs gprc:$rT), (ins), "mflr $rT", SprMFSPR>, PPC970_DGroup_First, PPC970_Unit_FXU; } @@ -1238,14 +1606,38 @@ def MFLR : XFXForm_1_ext<31, 339, 8, (outs GPRC:$rT), (ins), // Move to/from VRSAVE: despite being a SPR, the VRSAVE register is renamed like // a GPR on the PPC970. As such, copies in and out have the same performance // characteristics as an OR instruction. -def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins GPRC:$rS), +def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS), "mtspr 256, $rS", IntGeneral>, PPC970_DGroup_Single, PPC970_Unit_FXU; -def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs GPRC:$rT), (ins), +def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins), "mfspr $rT, 256", IntGeneral>, PPC970_DGroup_First, PPC970_Unit_FXU; -def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins GPRC:$rS), +let isCodeGenOnly = 1 in { + def MTVRSAVEv : XFXForm_7_ext<31, 467, 256, + (outs VRSAVERC:$reg), (ins gprc:$rS), + "mtspr 256, $rS", IntGeneral>, + PPC970_DGroup_Single, PPC970_Unit_FXU; + def MFVRSAVEv : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), + (ins VRSAVERC:$reg), + "mfspr $rT, 256", IntGeneral>, + PPC970_DGroup_First, PPC970_Unit_FXU; +} + +// SPILL_VRSAVE - Indicate that we're dumping the VRSAVE register, +// so we'll need to scavenge a register for it. +let mayStore = 1 in +def SPILL_VRSAVE : Pseudo<(outs), (ins VRSAVERC:$vrsave, memri:$F), + "#SPILL_VRSAVE", []>; + +// RESTORE_VRSAVE - Indicate that we're restoring the VRSAVE register (previously +// spilled), so we'll need to scavenge a register for it. +let mayLoad = 1 in +def RESTORE_VRSAVE : Pseudo<(outs VRSAVERC:$vrsave), (ins memri:$F), + "#RESTORE_VRSAVE", []>; + +let neverHasSideEffects = 1 in { +def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins gprc:$rS), "mtcrf $FXM, $rS", BrMCRX>, PPC970_MicroCode, PPC970_Unit_CRU; @@ -1259,215 +1651,205 @@ def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins GPRC:$rS), // instruction to keep the register allocator from becoming confused. // // FIXME: Make this a real Pseudo instruction when the JIT switches to MC. -def MFCRpseud: XFXForm_3<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM), +let isCodeGenOnly = 1 in +def MFCRpseud: XFXForm_3<31, 19, (outs gprc:$rT), (ins crbitm:$FXM), "#MFCRpseud", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; - -def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins), - "mfcr $rT", SprMFCR>, - PPC970_MicroCode, PPC970_Unit_CRU; -def MFOCRF: XFXForm_5a<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM), +def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM), "mfocrf $rT, $FXM", SprMFCR>, PPC970_DGroup_First, PPC970_Unit_CRU; +} // neverHasSideEffects = 1 -// Instructions to manipulate FPSCR. Only long double handling uses these. -// FPSCR is not modelled; we use the SDNode Flag to keep things in order. +let neverHasSideEffects = 1 in +def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins), + "mfcr $rT", SprMFCR>, + PPC970_MicroCode, PPC970_Unit_CRU; +// Pseudo instruction to perform FADD in round-to-zero mode. +let usesCustomInserter = 1, Uses = [RM] in { + def FADDrtz: Pseudo<(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB), "", + [(set f64:$FRT, (PPCfaddrtz f64:$FRA, f64:$FRB))]>; +} + +// The above pseudo gets expanded to make use of the following instructions +// to manipulate FPSCR. Note that FPSCR is not modeled at the DAG level. let Uses = [RM], Defs = [RM] in { def MTFSB0 : XForm_43<63, 70, (outs), (ins u5imm:$FM), - "mtfsb0 $FM", IntMTFSB0, - [(PPCmtfsb0 (i32 imm:$FM))]>, + "mtfsb0 $FM", IntMTFSB0, []>, PPC970_DGroup_Single, PPC970_Unit_FPU; def MTFSB1 : XForm_43<63, 38, (outs), (ins u5imm:$FM), - "mtfsb1 $FM", IntMTFSB0, - [(PPCmtfsb1 (i32 imm:$FM))]>, + "mtfsb1 $FM", IntMTFSB0, []>, PPC970_DGroup_Single, PPC970_Unit_FPU; - // MTFSF does not actually produce an FP result. We pretend it copies - // input reg B to the output. If we didn't do this it would look like the - // instruction had no outputs (because we aren't modelling the FPSCR) and - // it would be deleted. - def MTFSF : XFLForm<63, 711, (outs F8RC:$FRA), - (ins i32imm:$FM, F8RC:$rT, F8RC:$FRB), - "mtfsf $FM, $rT", "$FRB = $FRA", IntMTFSB0, - [(set F8RC:$FRA, (PPCmtfsf (i32 imm:$FM), - F8RC:$rT, F8RC:$FRB))]>, + def MTFSF : XFLForm<63, 711, (outs), (ins i32imm:$FM, f8rc:$rT), + "mtfsf $FM, $rT", IntMTFSB0, []>, PPC970_DGroup_Single, PPC970_Unit_FPU; } let Uses = [RM] in { - def MFFS : XForm_42<63, 583, (outs F8RC:$rT), (ins), + def MFFS : XForm_42<63, 583, (outs f8rc:$rT), (ins), "mffs $rT", IntMFFS, - [(set F8RC:$rT, (PPCmffs))]>, - PPC970_DGroup_Single, PPC970_Unit_FPU; - def FADDrtz: AForm_2<63, 21, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fadd $FRT, $FRA, $FRB", FPAddSub, - [(set F8RC:$FRT, (PPCfaddrtz F8RC:$FRA, F8RC:$FRB))]>, + [(set f64:$rT, (PPCmffs))]>, PPC970_DGroup_Single, PPC970_Unit_FPU; } -let PPC970_Unit = 1 in { // FXU Operations. - +let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations. // XO-Form instructions. Arithmetic instructions that can set overflow bit // -def ADD4 : XOForm_1<31, 266, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "add $rT, $rA, $rB", IntSimple, - [(set GPRC:$rT, (add GPRC:$rA, GPRC:$rB))]>; -let Defs = [CARRY] in { -def ADDC : XOForm_1<31, 10, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "addc $rT, $rA, $rB", IntGeneral, - [(set GPRC:$rT, (addc GPRC:$rA, GPRC:$rB))]>, - PPC970_DGroup_Cracked; -} -def DIVW : XOForm_1<31, 491, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "divw $rT, $rA, $rB", IntDivW, - [(set GPRC:$rT, (sdiv GPRC:$rA, GPRC:$rB))]>, - PPC970_DGroup_First, PPC970_DGroup_Cracked; -def DIVWU : XOForm_1<31, 459, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "divwu $rT, $rA, $rB", IntDivW, - [(set GPRC:$rT, (udiv GPRC:$rA, GPRC:$rB))]>, - PPC970_DGroup_First, PPC970_DGroup_Cracked; -def MULHW : XOForm_1<31, 75, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "mulhw $rT, $rA, $rB", IntMulHW, - [(set GPRC:$rT, (mulhs GPRC:$rA, GPRC:$rB))]>; -def MULHWU : XOForm_1<31, 11, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "mulhwu $rT, $rA, $rB", IntMulHWU, - [(set GPRC:$rT, (mulhu GPRC:$rA, GPRC:$rB))]>; -def MULLW : XOForm_1<31, 235, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "mullw $rT, $rA, $rB", IntMulHW, - [(set GPRC:$rT, (mul GPRC:$rA, GPRC:$rB))]>; -def SUBF : XOForm_1<31, 40, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "subf $rT, $rA, $rB", IntGeneral, - [(set GPRC:$rT, (sub GPRC:$rB, GPRC:$rA))]>; -let Defs = [CARRY] in { -def SUBFC : XOForm_1<31, 8, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "subfc $rT, $rA, $rB", IntGeneral, - [(set GPRC:$rT, (subc GPRC:$rB, GPRC:$rA))]>, - PPC970_DGroup_Cracked; -} -def NEG : XOForm_3<31, 104, 0, (outs GPRC:$rT), (ins GPRC:$rA), - "neg $rT, $rA", IntSimple, - [(set GPRC:$rT, (ineg GPRC:$rA))]>; -let Uses = [CARRY], Defs = [CARRY] in { -def ADDE : XOForm_1<31, 138, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "adde $rT, $rA, $rB", IntGeneral, - [(set GPRC:$rT, (adde GPRC:$rA, GPRC:$rB))]>; -def ADDME : XOForm_3<31, 234, 0, (outs GPRC:$rT), (ins GPRC:$rA), - "addme $rT, $rA", IntGeneral, - [(set GPRC:$rT, (adde GPRC:$rA, -1))]>; -def ADDZE : XOForm_3<31, 202, 0, (outs GPRC:$rT), (ins GPRC:$rA), - "addze $rT, $rA", IntGeneral, - [(set GPRC:$rT, (adde GPRC:$rA, 0))]>; -def SUBFE : XOForm_1<31, 136, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "subfe $rT, $rA, $rB", IntGeneral, - [(set GPRC:$rT, (sube GPRC:$rB, GPRC:$rA))]>; -def SUBFME : XOForm_3<31, 232, 0, (outs GPRC:$rT), (ins GPRC:$rA), - "subfme $rT, $rA", IntGeneral, - [(set GPRC:$rT, (sube -1, GPRC:$rA))]>; -def SUBFZE : XOForm_3<31, 200, 0, (outs GPRC:$rT), (ins GPRC:$rA), - "subfze $rT, $rA", IntGeneral, - [(set GPRC:$rT, (sube 0, GPRC:$rA))]>; +defm ADD4 : XOForm_1r<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "add", "$rT, $rA, $rB", IntSimple, + [(set i32:$rT, (add i32:$rA, i32:$rB))]>; +defm ADDC : XOForm_1rc<31, 10, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "addc", "$rT, $rA, $rB", IntGeneral, + [(set i32:$rT, (addc i32:$rA, i32:$rB))]>, + PPC970_DGroup_Cracked; +defm DIVW : XOForm_1r<31, 491, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "divw", "$rT, $rA, $rB", IntDivW, + [(set i32:$rT, (sdiv i32:$rA, i32:$rB))]>, + PPC970_DGroup_First, PPC970_DGroup_Cracked; +defm DIVWU : XOForm_1r<31, 459, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "divwu", "$rT, $rA, $rB", IntDivW, + [(set i32:$rT, (udiv i32:$rA, i32:$rB))]>, + PPC970_DGroup_First, PPC970_DGroup_Cracked; +defm MULHW : XOForm_1r<31, 75, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "mulhw", "$rT, $rA, $rB", IntMulHW, + [(set i32:$rT, (mulhs i32:$rA, i32:$rB))]>; +defm MULHWU : XOForm_1r<31, 11, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "mulhwu", "$rT, $rA, $rB", IntMulHWU, + [(set i32:$rT, (mulhu i32:$rA, i32:$rB))]>; +defm MULLW : XOForm_1r<31, 235, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "mullw", "$rT, $rA, $rB", IntMulHW, + [(set i32:$rT, (mul i32:$rA, i32:$rB))]>; +defm SUBF : XOForm_1r<31, 40, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "subf", "$rT, $rA, $rB", IntGeneral, + [(set i32:$rT, (sub i32:$rB, i32:$rA))]>; +defm SUBFC : XOForm_1rc<31, 8, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "subfc", "$rT, $rA, $rB", IntGeneral, + [(set i32:$rT, (subc i32:$rB, i32:$rA))]>, + PPC970_DGroup_Cracked; +defm NEG : XOForm_3r<31, 104, 0, (outs gprc:$rT), (ins gprc:$rA), + "neg", "$rT, $rA", IntSimple, + [(set i32:$rT, (ineg i32:$rA))]>; +let Uses = [CARRY] in { +defm ADDE : XOForm_1rc<31, 138, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "adde", "$rT, $rA, $rB", IntGeneral, + [(set i32:$rT, (adde i32:$rA, i32:$rB))]>; +defm ADDME : XOForm_3rc<31, 234, 0, (outs gprc:$rT), (ins gprc:$rA), + "addme", "$rT, $rA", IntGeneral, + [(set i32:$rT, (adde i32:$rA, -1))]>; +defm ADDZE : XOForm_3rc<31, 202, 0, (outs gprc:$rT), (ins gprc:$rA), + "addze", "$rT, $rA", IntGeneral, + [(set i32:$rT, (adde i32:$rA, 0))]>; +defm SUBFE : XOForm_1rc<31, 136, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB), + "subfe", "$rT, $rA, $rB", IntGeneral, + [(set i32:$rT, (sube i32:$rB, i32:$rA))]>; +defm SUBFME : XOForm_3rc<31, 232, 0, (outs gprc:$rT), (ins gprc:$rA), + "subfme", "$rT, $rA", IntGeneral, + [(set i32:$rT, (sube -1, i32:$rA))]>; +defm SUBFZE : XOForm_3rc<31, 200, 0, (outs gprc:$rT), (ins gprc:$rA), + "subfze", "$rT, $rA", IntGeneral, + [(set i32:$rT, (sube 0, i32:$rA))]>; } } // A-Form instructions. Most of the instructions executed in the FPU are of // this type. // -let PPC970_Unit = 3 in { // FPU Operations. +let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations. let Uses = [RM] in { - def FMADD : AForm_1<63, 29, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB), - "fmadd $FRT, $FRA, $FRC, $FRB", FPFused, - [(set F8RC:$FRT, - (fma F8RC:$FRA, F8RC:$FRC, F8RC:$FRB))]>; - def FMADDS : AForm_1<59, 29, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB), - "fmadds $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F4RC:$FRT, - (fma F4RC:$FRA, F4RC:$FRC, F4RC:$FRB))]>; - def FMSUB : AForm_1<63, 28, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB), - "fmsub $FRT, $FRA, $FRC, $FRB", FPFused, - [(set F8RC:$FRT, - (fma F8RC:$FRA, F8RC:$FRC, (fneg F8RC:$FRB)))]>; - def FMSUBS : AForm_1<59, 28, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB), - "fmsubs $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F4RC:$FRT, - (fma F4RC:$FRA, F4RC:$FRC, (fneg F4RC:$FRB)))]>; - def FNMADD : AForm_1<63, 31, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB), - "fnmadd $FRT, $FRA, $FRC, $FRB", FPFused, - [(set F8RC:$FRT, - (fneg (fma F8RC:$FRA, F8RC:$FRC, F8RC:$FRB)))]>; - def FNMADDS : AForm_1<59, 31, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB), - "fnmadds $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F4RC:$FRT, - (fneg (fma F4RC:$FRA, F4RC:$FRC, F4RC:$FRB)))]>; - def FNMSUB : AForm_1<63, 30, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB), - "fnmsub $FRT, $FRA, $FRC, $FRB", FPFused, - [(set F8RC:$FRT, (fneg (fma F8RC:$FRA, F8RC:$FRC, - (fneg F8RC:$FRB))))]>; - def FNMSUBS : AForm_1<59, 30, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB), - "fnmsubs $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F4RC:$FRT, (fneg (fma F4RC:$FRA, F4RC:$FRC, - (fneg F4RC:$FRB))))]>; + defm FMADD : AForm_1r<63, 29, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB), + "fmadd", "$FRT, $FRA, $FRC, $FRB", FPFused, + [(set f64:$FRT, (fma f64:$FRA, f64:$FRC, f64:$FRB))]>; + defm FMADDS : AForm_1r<59, 29, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB), + "fmadds", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f32:$FRT, (fma f32:$FRA, f32:$FRC, f32:$FRB))]>; + defm FMSUB : AForm_1r<63, 28, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB), + "fmsub", "$FRT, $FRA, $FRC, $FRB", FPFused, + [(set f64:$FRT, + (fma f64:$FRA, f64:$FRC, (fneg f64:$FRB)))]>; + defm FMSUBS : AForm_1r<59, 28, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB), + "fmsubs", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f32:$FRT, + (fma f32:$FRA, f32:$FRC, (fneg f32:$FRB)))]>; + defm FNMADD : AForm_1r<63, 31, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB), + "fnmadd", "$FRT, $FRA, $FRC, $FRB", FPFused, + [(set f64:$FRT, + (fneg (fma f64:$FRA, f64:$FRC, f64:$FRB)))]>; + defm FNMADDS : AForm_1r<59, 31, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB), + "fnmadds", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f32:$FRT, + (fneg (fma f32:$FRA, f32:$FRC, f32:$FRB)))]>; + defm FNMSUB : AForm_1r<63, 30, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB), + "fnmsub", "$FRT, $FRA, $FRC, $FRB", FPFused, + [(set f64:$FRT, (fneg (fma f64:$FRA, f64:$FRC, + (fneg f64:$FRB))))]>; + defm FNMSUBS : AForm_1r<59, 30, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB), + "fnmsubs", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f32:$FRT, (fneg (fma f32:$FRA, f32:$FRC, + (fneg f32:$FRB))))]>; } // FSEL is artificially split into 4 and 8-byte forms for the result. To avoid // having 4 of these, force the comparison to always be an 8-byte double (code // should use an FMRSD if the input comparison value really wants to be a float) // and 4/8 byte forms for the result and operand type.. -def FSELD : AForm_1<63, 23, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB), - "fsel $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F8RC:$FRT, (PPCfsel F8RC:$FRA,F8RC:$FRC,F8RC:$FRB))]>; -def FSELS : AForm_1<63, 23, - (outs F4RC:$FRT), (ins F8RC:$FRA, F4RC:$FRC, F4RC:$FRB), - "fsel $FRT, $FRA, $FRC, $FRB", FPGeneral, - [(set F4RC:$FRT, (PPCfsel F8RC:$FRA,F4RC:$FRC,F4RC:$FRB))]>; +let Interpretation64Bit = 1 in +defm FSELD : AForm_1r<63, 23, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB), + "fsel", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f64:$FRT, (PPCfsel f64:$FRA, f64:$FRC, f64:$FRB))]>; +defm FSELS : AForm_1r<63, 23, + (outs f4rc:$FRT), (ins f8rc:$FRA, f4rc:$FRC, f4rc:$FRB), + "fsel", "$FRT, $FRA, $FRC, $FRB", FPGeneral, + [(set f32:$FRT, (PPCfsel f64:$FRA, f32:$FRC, f32:$FRB))]>; let Uses = [RM] in { - def FADD : AForm_2<63, 21, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fadd $FRT, $FRA, $FRB", FPAddSub, - [(set F8RC:$FRT, (fadd F8RC:$FRA, F8RC:$FRB))]>; - def FADDS : AForm_2<59, 21, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), - "fadds $FRT, $FRA, $FRB", FPGeneral, - [(set F4RC:$FRT, (fadd F4RC:$FRA, F4RC:$FRB))]>; - def FDIV : AForm_2<63, 18, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fdiv $FRT, $FRA, $FRB", FPDivD, - [(set F8RC:$FRT, (fdiv F8RC:$FRA, F8RC:$FRB))]>; - def FDIVS : AForm_2<59, 18, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), - "fdivs $FRT, $FRA, $FRB", FPDivS, - [(set F4RC:$FRT, (fdiv F4RC:$FRA, F4RC:$FRB))]>; - def FMUL : AForm_3<63, 25, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC), - "fmul $FRT, $FRA, $FRC", FPFused, - [(set F8RC:$FRT, (fmul F8RC:$FRA, F8RC:$FRC))]>; - def FMULS : AForm_3<59, 25, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC), - "fmuls $FRT, $FRA, $FRC", FPGeneral, - [(set F4RC:$FRT, (fmul F4RC:$FRA, F4RC:$FRC))]>; - def FSUB : AForm_2<63, 20, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fsub $FRT, $FRA, $FRB", FPAddSub, - [(set F8RC:$FRT, (fsub F8RC:$FRA, F8RC:$FRB))]>; - def FSUBS : AForm_2<59, 20, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), - "fsubs $FRT, $FRA, $FRB", FPGeneral, - [(set F4RC:$FRT, (fsub F4RC:$FRA, F4RC:$FRB))]>; + defm FADD : AForm_2r<63, 21, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB), + "fadd", "$FRT, $FRA, $FRB", FPAddSub, + [(set f64:$FRT, (fadd f64:$FRA, f64:$FRB))]>; + defm FADDS : AForm_2r<59, 21, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB), + "fadds", "$FRT, $FRA, $FRB", FPGeneral, + [(set f32:$FRT, (fadd f32:$FRA, f32:$FRB))]>; + defm FDIV : AForm_2r<63, 18, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB), + "fdiv", "$FRT, $FRA, $FRB", FPDivD, + [(set f64:$FRT, (fdiv f64:$FRA, f64:$FRB))]>; + defm FDIVS : AForm_2r<59, 18, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB), + "fdivs", "$FRT, $FRA, $FRB", FPDivS, + [(set f32:$FRT, (fdiv f32:$FRA, f32:$FRB))]>; + defm FMUL : AForm_3r<63, 25, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC), + "fmul", "$FRT, $FRA, $FRC", FPFused, + [(set f64:$FRT, (fmul f64:$FRA, f64:$FRC))]>; + defm FMULS : AForm_3r<59, 25, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC), + "fmuls", "$FRT, $FRA, $FRC", FPGeneral, + [(set f32:$FRT, (fmul f32:$FRA, f32:$FRC))]>; + defm FSUB : AForm_2r<63, 20, + (outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB), + "fsub", "$FRT, $FRA, $FRB", FPAddSub, + [(set f64:$FRT, (fsub f64:$FRA, f64:$FRB))]>; + defm FSUBS : AForm_2r<59, 20, + (outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB), + "fsubs", "$FRT, $FRA, $FRB", FPGeneral, + [(set f32:$FRT, (fsub f32:$FRA, f32:$FRB))]>; } } +let neverHasSideEffects = 1 in { let PPC970_Unit = 1 in { // FXU Operations. + let isSelect = 1 in def ISEL : AForm_4<31, 15, - (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB, pred:$cond), + (outs gprc:$rT), (ins gprc_nor0:$rA, gprc:$rB, crbitrc:$cond), "isel $rT, $rA, $rB, $cond", IntGeneral, []>; } @@ -1477,26 +1859,29 @@ let PPC970_Unit = 1 in { // FXU Operations. // let isCommutable = 1 in { // RLWIMI can be commuted if the rotate amount is zero. -def RLWIMI : MForm_2<20, - (outs GPRC:$rA), (ins GPRC:$rSi, GPRC:$rS, u5imm:$SH, u5imm:$MB, - u5imm:$ME), "rlwimi $rA, $rS, $SH, $MB, $ME", IntRotate, - []>, PPC970_DGroup_Cracked, RegConstraint<"$rSi = $rA">, - NoEncode<"$rSi">; +defm RLWIMI : MForm_2r<20, (outs gprc:$rA), + (ins gprc:$rSi, gprc:$rS, u5imm:$SH, u5imm:$MB, + u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME", IntRotate, + []>, PPC970_DGroup_Cracked, RegConstraint<"$rSi = $rA">, + NoEncode<"$rSi">; } +let BaseName = "rlwinm" in { def RLWINM : MForm_2<21, - (outs GPRC:$rA), (ins GPRC:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), + (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), "rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral, - []>; + []>, RecFormRel; +let Defs = [CR0] in def RLWINMo : MForm_2<21, - (outs GPRC:$rA), (ins GPRC:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), - "rlwinm. $rA, $rS, $SH, $MB, $ME", IntGeneral, - []>, isDOT, PPC970_DGroup_Cracked; -def RLWNM : MForm_2<23, - (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB, u5imm:$MB, u5imm:$ME), - "rlwnm $rA, $rS, $rB, $MB, $ME", IntGeneral, - []>; + (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), + "rlwinm. $rA, $rS, $SH, $MB, $ME", IntGeneral, + []>, isDOT, RecFormRel, PPC970_DGroup_Cracked; } - +defm RLWNM : MForm_2r<23, (outs gprc:$rA), + (ins gprc:$rS, gprc:$rB, u5imm:$MB, u5imm:$ME), + "rlwnm", "$rA, $rS, $rB, $MB, $ME", IntGeneral, + []>; +} +} // neverHasSideEffects = 1 //===----------------------------------------------------------------------===// // PowerPC Instruction Patterns @@ -1507,47 +1892,43 @@ def : Pat<(i32 imm:$imm), (ORI (LIS (HI16 imm:$imm)), (LO16 imm:$imm))>; // Implement the 'not' operation with the NOR instruction. -def NOT : Pat<(not GPRC:$in), - (NOR GPRC:$in, GPRC:$in)>; +def NOT : Pat<(not i32:$in), + (NOR $in, $in)>; // ADD an arbitrary immediate. -def : Pat<(add GPRC:$in, imm:$imm), - (ADDIS (ADDI GPRC:$in, (LO16 imm:$imm)), (HA16 imm:$imm))>; +def : Pat<(add i32:$in, imm:$imm), + (ADDIS (ADDI $in, (LO16 imm:$imm)), (HA16 imm:$imm))>; // OR an arbitrary immediate. -def : Pat<(or GPRC:$in, imm:$imm), - (ORIS (ORI GPRC:$in, (LO16 imm:$imm)), (HI16 imm:$imm))>; +def : Pat<(or i32:$in, imm:$imm), + (ORIS (ORI $in, (LO16 imm:$imm)), (HI16 imm:$imm))>; // XOR an arbitrary immediate. -def : Pat<(xor GPRC:$in, imm:$imm), - (XORIS (XORI GPRC:$in, (LO16 imm:$imm)), (HI16 imm:$imm))>; +def : Pat<(xor i32:$in, imm:$imm), + (XORIS (XORI $in, (LO16 imm:$imm)), (HI16 imm:$imm))>; // SUBFIC -def : Pat<(sub immSExt16:$imm, GPRC:$in), - (SUBFIC GPRC:$in, imm:$imm)>; +def : Pat<(sub immSExt16:$imm, i32:$in), + (SUBFIC $in, imm:$imm)>; // SHL/SRL -def : Pat<(shl GPRC:$in, (i32 imm:$imm)), - (RLWINM GPRC:$in, imm:$imm, 0, (SHL32 imm:$imm))>; -def : Pat<(srl GPRC:$in, (i32 imm:$imm)), - (RLWINM GPRC:$in, (SRL32 imm:$imm), imm:$imm, 31)>; +def : Pat<(shl i32:$in, (i32 imm:$imm)), + (RLWINM $in, imm:$imm, 0, (SHL32 imm:$imm))>; +def : Pat<(srl i32:$in, (i32 imm:$imm)), + (RLWINM $in, (SRL32 imm:$imm), imm:$imm, 31)>; // ROTL -def : Pat<(rotl GPRC:$in, GPRC:$sh), - (RLWNM GPRC:$in, GPRC:$sh, 0, 31)>; -def : Pat<(rotl GPRC:$in, (i32 imm:$imm)), - (RLWINM GPRC:$in, imm:$imm, 0, 31)>; +def : Pat<(rotl i32:$in, i32:$sh), + (RLWNM $in, $sh, 0, 31)>; +def : Pat<(rotl i32:$in, (i32 imm:$imm)), + (RLWINM $in, imm:$imm, 0, 31)>; // RLWNM -def : Pat<(and (rotl GPRC:$in, GPRC:$sh), maskimm32:$imm), - (RLWNM GPRC:$in, GPRC:$sh, (MB maskimm32:$imm), (ME maskimm32:$imm))>; +def : Pat<(and (rotl i32:$in, i32:$sh), maskimm32:$imm), + (RLWNM $in, $sh, (MB maskimm32:$imm), (ME maskimm32:$imm))>; // Calls -def : Pat<(PPCcall_Darwin (i32 tglobaladdr:$dst)), - (BL_Darwin tglobaladdr:$dst)>; -def : Pat<(PPCcall_Darwin (i32 texternalsym:$dst)), - (BL_Darwin texternalsym:$dst)>; -def : Pat<(PPCcall_SVR4 (i32 tglobaladdr:$dst)), - (BL_SVR4 tglobaladdr:$dst)>; -def : Pat<(PPCcall_SVR4 (i32 texternalsym:$dst)), - (BL_SVR4 texternalsym:$dst)>; +def : Pat<(PPCcall (i32 tglobaladdr:$dst)), + (BL tglobaladdr:$dst)>; +def : Pat<(PPCcall (i32 texternalsym:$dst)), + (BL texternalsym:$dst)>; def : Pat<(PPCtc_return (i32 tglobaladdr:$dst), imm:$imm), @@ -1570,28 +1951,28 @@ def : Pat<(PPChi tjumptable:$in, 0), (LIS tjumptable:$in)>; def : Pat<(PPClo tjumptable:$in, 0), (LI tjumptable:$in)>; def : Pat<(PPChi tblockaddress:$in, 0), (LIS tblockaddress:$in)>; def : Pat<(PPClo tblockaddress:$in, 0), (LI tblockaddress:$in)>; -def : Pat<(PPChi tglobaltlsaddr:$g, GPRC:$in), - (ADDIS GPRC:$in, tglobaltlsaddr:$g)>; -def : Pat<(PPClo tglobaltlsaddr:$g, GPRC:$in), - (ADDIL GPRC:$in, tglobaltlsaddr:$g)>; -def : Pat<(add GPRC:$in, (PPChi tglobaladdr:$g, 0)), - (ADDIS GPRC:$in, tglobaladdr:$g)>; -def : Pat<(add GPRC:$in, (PPChi tconstpool:$g, 0)), - (ADDIS GPRC:$in, tconstpool:$g)>; -def : Pat<(add GPRC:$in, (PPChi tjumptable:$g, 0)), - (ADDIS GPRC:$in, tjumptable:$g)>; -def : Pat<(add GPRC:$in, (PPChi tblockaddress:$g, 0)), - (ADDIS GPRC:$in, tblockaddress:$g)>; +def : Pat<(PPChi tglobaltlsaddr:$g, i32:$in), + (ADDIS $in, tglobaltlsaddr:$g)>; +def : Pat<(PPClo tglobaltlsaddr:$g, i32:$in), + (ADDI $in, tglobaltlsaddr:$g)>; +def : Pat<(add i32:$in, (PPChi tglobaladdr:$g, 0)), + (ADDIS $in, tglobaladdr:$g)>; +def : Pat<(add i32:$in, (PPChi tconstpool:$g, 0)), + (ADDIS $in, tconstpool:$g)>; +def : Pat<(add i32:$in, (PPChi tjumptable:$g, 0)), + (ADDIS $in, tjumptable:$g)>; +def : Pat<(add i32:$in, (PPChi tblockaddress:$g, 0)), + (ADDIS $in, tblockaddress:$g)>; // Standard shifts. These are represented separately from the real shifts above // so that we can distinguish between shifts that allow 5-bit and 6-bit shift // amounts. -def : Pat<(sra GPRC:$rS, GPRC:$rB), - (SRAW GPRC:$rS, GPRC:$rB)>; -def : Pat<(srl GPRC:$rS, GPRC:$rB), - (SRW GPRC:$rS, GPRC:$rB)>; -def : Pat<(shl GPRC:$rS, GPRC:$rB), - (SLW GPRC:$rS, GPRC:$rB)>; +def : Pat<(sra i32:$rS, i32:$rB), + (SRAW $rS, $rB)>; +def : Pat<(srl i32:$rS, i32:$rB), + (SRW $rS, $rB)>; +def : Pat<(shl i32:$rS, i32:$rB), + (SLW $rS, $rB)>; def : Pat<(zextloadi1 iaddr:$src), (LBZ iaddr:$src)>; @@ -1614,18 +1995,20 @@ def : Pat<(f64 (extloadf32 iaddr:$src)), def : Pat<(f64 (extloadf32 xaddr:$src)), (COPY_TO_REGCLASS (LFSX xaddr:$src), F8RC)>; -def : Pat<(f64 (fextend F4RC:$src)), - (COPY_TO_REGCLASS F4RC:$src, F8RC)>; - -// Memory barriers -def : Pat<(membarrier (i32 imm /*ll*/), - (i32 imm /*ls*/), - (i32 imm /*sl*/), - (i32 imm /*ss*/), - (i32 imm /*device*/)), - (SYNC)>; +def : Pat<(f64 (fextend f32:$src)), + (COPY_TO_REGCLASS $src, F8RC)>; def : Pat<(atomic_fence (imm), (imm)), (SYNC)>; +// Additional FNMSUB patterns: -a*c + b == -(a*c - b) +def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B), + (FNMSUB $A, $C, $B)>; +def : Pat<(fma f64:$A, (fneg f64:$C), f64:$B), + (FNMSUB $A, $C, $B)>; +def : Pat<(fma (fneg f32:$A), f32:$C, f32:$B), + (FNMSUBS $A, $C, $B)>; +def : Pat<(fma f32:$A, (fneg f32:$C), f32:$B), + (FNMSUBS $A, $C, $B)>; + include "PPCInstrAltivec.td" include "PPCInstr64Bit.td" diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp index 9b0df3e..f8cf3a5 100644 --- a/lib/Target/PowerPC/PPCMCInstLower.cpp +++ b/lib/Target/PowerPC/PPCMCInstLower.cpp @@ -14,6 +14,7 @@ #include "PPC.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/Twine.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" @@ -51,7 +52,14 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){ // before we return the symbol. if (MO.getTargetFlags() == PPCII::MO_DARWIN_STUB) { Name += "$stub"; - MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str()); + const char *PGP = AP.MAI->getPrivateGlobalPrefix(); + const char *Prefix = ""; + if (!Name.startswith(PGP)) { + // http://llvm.org/bugs/show_bug.cgi?id=15763 + // all stubs and lazy_ptrs should be local symbols, which need leading 'L' + Prefix = PGP; + } + MCSymbol *Sym = Ctx.GetOrCreateSymbol(Twine(Prefix) + Twine(Name)); MachineModuleInfoImpl::StubValueTy &StubSym = getMachOMMI(AP).getFnStubEntry(Sym); if (StubSym.getPointer()) diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h index b1636a2..40d1f3a 100644 --- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h +++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h @@ -47,6 +47,9 @@ class PPCFunctionInfo : public MachineFunctionInfo { /// SpillsCR - Indicates whether CR is spilled in the current function. bool SpillsCR; + /// Indicates whether VRSAVE is spilled in the current function. + bool SpillsVRSAVE; + /// LRStoreRequired - The bool indicates whether there is some explicit use of /// the LR/LR8 stack slot that is not obvious from scanning the code. This /// requires that the code generator produce a store of LR to the stack on @@ -81,6 +84,11 @@ class PPCFunctionInfo : public MachineFunctionInfo { /// CRSpillFrameIndex - FrameIndex for CR spill slot for 32-bit SVR4. int CRSpillFrameIndex; + /// If any of CR[2-4] need to be saved in the prologue and restored in the + /// epilogue then they are added to this array. This is used for the + /// 64-bit SVR4 ABI. + SmallVector<unsigned, 3> MustSaveCRs; + public: explicit PPCFunctionInfo(MachineFunction &MF) : FramePointerSaveIndex(0), @@ -88,6 +96,7 @@ public: HasSpills(false), HasNonRISpills(false), SpillsCR(false), + SpillsVRSAVE(false), LRStoreRequired(false), MinReservedArea(0), TailCallSPDelta(0), @@ -127,6 +136,9 @@ public: void setSpillsCR() { SpillsCR = true; } bool isCRSpilled() const { return SpillsCR; } + void setSpillsVRSAVE() { SpillsVRSAVE = true; } + bool isVRSAVESpilled() const { return SpillsVRSAVE; } + void setLRStoreRequired() { LRStoreRequired = true; } bool isLRStoreRequired() const { return LRStoreRequired; } @@ -147,6 +159,10 @@ public: int getCRSpillFrameIndex() const { return CRSpillFrameIndex; } void setCRSpillFrameIndex(int idx) { CRSpillFrameIndex = idx; } + + const SmallVector<unsigned, 3> & + getMustSaveCRs() const { return MustSaveCRs; } + void addMustSaveCR(unsigned Reg) { MustSaveCRs.push_back(Reg); } }; } // end of namespace llvm diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index e2c7221..2be6324 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -68,7 +68,7 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST, ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8; ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8; ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX; - ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; ImmToIdxMap[PPC::STD_32] = PPC::STDX_32; + ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; } /// getPointerRegClass - Return the register class to use to hold pointers. @@ -76,6 +76,14 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST, const TargetRegisterClass * PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) const { + // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value + // when it checks for ZERO folding. + if (Kind == 1) { + if (Subtarget.isPPC64()) + return &PPC::G8RC_NOX0RegClass; + return &PPC::GPRC_NOR0RegClass; + } + if (Subtarget.isPPC64()) return &PPC::G8RCRegClass; return &PPC::GPRCRegClass; @@ -99,12 +107,35 @@ PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { return Subtarget.isPPC64() ? CSR_SVR464_RegMask : CSR_SVR432_RegMask; } +const uint32_t* +PPCRegisterInfo::getNoPreservedMask() const { + // The naming here is inverted: The CSR_NoRegs_Altivec has the + // Altivec registers masked so that they're not saved and restored around + // instructions with this preserved mask. + + if (!Subtarget.hasAltivec()) + return CSR_NoRegs_Altivec_RegMask; + + if (Subtarget.isDarwin()) + return CSR_NoRegs_Darwin_RegMask; + return CSR_NoRegs_RegMask; +} + BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); const PPCFrameLowering *PPCFI = static_cast<const PPCFrameLowering*>(MF.getTarget().getFrameLowering()); - Reserved.set(PPC::R0); + // The ZERO register is not really a register, but the representation of r0 + // when used in instructions that treat r0 as the constant 0. + Reserved.set(PPC::ZERO); + Reserved.set(PPC::ZERO8); + + // The FP register is also not really a register, but is the representation + // of the frame pointer register used by ISD::FRAMEADDR. + Reserved.set(PPC::FP); + Reserved.set(PPC::FP8); + Reserved.set(PPC::R1); Reserved.set(PPC::LR); Reserved.set(PPC::LR8); @@ -117,16 +148,14 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { } // On PPC64, r13 is the thread pointer. Never allocate this register. - // Note that this is over conservative, as it also prevents allocation of R31 - // when the FP is not needed. if (Subtarget.isPPC64()) { Reserved.set(PPC::R13); - Reserved.set(PPC::R31); - Reserved.set(PPC::X0); Reserved.set(PPC::X1); Reserved.set(PPC::X13); - Reserved.set(PPC::X31); + + if (PPCFI->needsFP(MF)) + Reserved.set(PPC::X31); // The 64-bit SVR4 ABI reserves r2 for the TOC pointer. if (Subtarget.isSVR4ABI()) { @@ -149,6 +178,8 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, switch (RC->getID()) { default: return 0; + case PPC::G8RC_NOX0RegClassID: + case PPC::GPRC_NOR0RegClassID: case PPC::G8RCRegClassID: case PPC::GPRCRegClassID: { unsigned FP = TFI->hasFP(MF) ? 1 : 0; @@ -174,8 +205,7 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size /// addi Rnew, SP, \#maxCalFrameSize ; get the top of the allocation /// -void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II, - int SPAdj, RegScavenger *RS) const { +void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { // Get the instruction. MachineInstr &MI = *II; // Get the instruction's basic block. @@ -271,22 +301,19 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II, /// stw rA, FI ; Store rA to the frame. /// void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, - unsigned FrameIndex, int SPAdj, - RegScavenger *RS) const { + unsigned FrameIndex) const { // Get the instruction. MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset> // Get the instruction's basic block. MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); DebugLoc dl = MI.getDebugLoc(); - // FIXME: Once LLVM supports creating virtual registers here, or the register - // scavenger can return multiple registers, stop using reserved registers - // here. - (void) SPAdj; - (void) RS; - bool LP64 = Subtarget.isPPC64(); - unsigned Reg = LP64 ? PPC::X0 : PPC::R0; + const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; + const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; + + unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); unsigned SrcReg = MI.getOperand(0).getReg(); // We need to store the CR in the low 4-bits of the saved value. First, issue @@ -296,16 +323,20 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, // If the saved register wasn't CR0, shift the bits left so that they are in // CR0's slot. - if (SrcReg != PPC::CR0) + if (SrcReg != PPC::CR0) { + unsigned Reg1 = Reg; + Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + // rlwinm rA, rA, ShiftBits, 0, 31. BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) - .addReg(Reg, RegState::Kill) - .addImm(getPPCRegisterNumbering(SrcReg) * 4) + .addReg(Reg1, RegState::Kill) + .addImm(getEncodingValue(SrcReg) * 4) .addImm(0) .addImm(31); + } addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW)) - .addReg(Reg, getKillRegState(MI.getOperand(1).getImm())), + .addReg(Reg, RegState::Kill), FrameIndex); // Discard the pseudo instruction. @@ -313,22 +344,19 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, } void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, - unsigned FrameIndex, int SPAdj, - RegScavenger *RS) const { + unsigned FrameIndex) const { // Get the instruction. MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset> // Get the instruction's basic block. MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); DebugLoc dl = MI.getDebugLoc(); - // FIXME: Once LLVM supports creating virtual registers here, or the register - // scavenger can return multiple registers, stop using reserved registers - // here. - (void) SPAdj; - (void) RS; - bool LP64 = Subtarget.isPPC64(); - unsigned Reg = LP64 ? PPC::X0 : PPC::R0; + const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; + const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; + + unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); unsigned DestReg = MI.getOperand(0).getReg(); assert(MI.definesRegister(DestReg) && "RESTORE_CR does not define its destination"); @@ -339,15 +367,67 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, // If the reloaded register isn't CR0, shift the bits right so that they are // in the right CR's slot. if (DestReg != PPC::CR0) { - unsigned ShiftBits = getPPCRegisterNumbering(DestReg)*4; + unsigned Reg1 = Reg; + Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + + unsigned ShiftBits = getEncodingValue(DestReg)*4; // rlwinm r11, r11, 32-ShiftBits, 0, 31. BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) - .addReg(Reg).addImm(32-ShiftBits).addImm(0) + .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0) .addImm(31); } BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTCRF8 : PPC::MTCRF), DestReg) - .addReg(Reg); + .addReg(Reg, RegState::Kill); + + // Discard the pseudo instruction. + MBB.erase(II); +} + +void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II, + unsigned FrameIndex) const { + // Get the instruction. + MachineInstr &MI = *II; // ; SPILL_VRSAVE <SrcReg>, <offset> + // Get the instruction's basic block. + MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); + DebugLoc dl = MI.getDebugLoc(); + + const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; + unsigned Reg = MF.getRegInfo().createVirtualRegister(GPRC); + unsigned SrcReg = MI.getOperand(0).getReg(); + + BuildMI(MBB, II, dl, TII.get(PPC::MFVRSAVEv), Reg) + .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); + + addFrameReference(BuildMI(MBB, II, dl, TII.get(PPC::STW)) + .addReg(Reg, RegState::Kill), + FrameIndex); + + // Discard the pseudo instruction. + MBB.erase(II); +} + +void PPCRegisterInfo::lowerVRSAVERestore(MachineBasicBlock::iterator II, + unsigned FrameIndex) const { + // Get the instruction. + MachineInstr &MI = *II; // ; <DestReg> = RESTORE_VRSAVE <offset> + // Get the instruction's basic block. + MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); + DebugLoc dl = MI.getDebugLoc(); + + const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; + unsigned Reg = MF.getRegInfo().createVirtualRegister(GPRC); + unsigned DestReg = MI.getOperand(0).getReg(); + assert(MI.definesRegister(DestReg) && + "RESTORE_VRSAVE does not define its destination"); + + addFrameReference(BuildMI(MBB, II, dl, TII.get(PPC::LWZ), + Reg), FrameIndex); + + BuildMI(MBB, II, dl, TII.get(PPC::MTVRSAVEv), DestReg) + .addReg(Reg, RegState::Kill); // Discard the pseudo instruction. MBB.erase(II); @@ -374,6 +454,33 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, return false; } +// Figure out if the offset in the instruction is shifted right two bits. This +// is true for instructions like "STD", which the machine implicitly adds two +// low zeros to. +static bool usesIXAddr(const MachineInstr &MI) { + unsigned OpC = MI.getOpcode(); + + switch (OpC) { + default: + return false; + case PPC::LWA: + case PPC::LD: + case PPC::STD: + return true; + } +} + +// Return the OffsetOperandNo given the FIOperandNum (and the instruction). +static unsigned getOffsetONFromFION(const MachineInstr &MI, + unsigned FIOperandNum) { + // Take into account whether it's an add or mem instruction + unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2; + if (MI.isInlineAsm()) + OffsetOperandNo = FIOperandNum-1; + + return OffsetOperandNo; +} + void PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, @@ -391,10 +498,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); DebugLoc dl = MI.getDebugLoc(); - // Take into account whether it's an add or mem instruction - unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2; - if (MI.isInlineAsm()) - OffsetOperandNo = FIOperandNum-1; + unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); // Get the frame index. int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); @@ -409,16 +513,22 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Special case for dynamic alloca. if (FPSI && FrameIndex == FPSI && (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) { - lowerDynamicAlloc(II, SPAdj, RS); + lowerDynamicAlloc(II); return; } - // Special case for pseudo-ops SPILL_CR and RESTORE_CR. + // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc. if (OpC == PPC::SPILL_CR) { - lowerCRSpilling(II, FrameIndex, SPAdj, RS); + lowerCRSpilling(II, FrameIndex); return; } else if (OpC == PPC::RESTORE_CR) { - lowerCRRestore(II, FrameIndex, SPAdj, RS); + lowerCRRestore(II, FrameIndex); + return; + } else if (OpC == PPC::SPILL_VRSAVE) { + lowerVRSAVESpilling(II, FrameIndex); + return; + } else if (OpC == PPC::RESTORE_VRSAVE) { + lowerVRSAVERestore(II, FrameIndex); return; } @@ -430,36 +540,12 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, (is64Bit ? PPC::X1 : PPC::R1), false); - // Figure out if the offset in the instruction is shifted right two bits. This - // is true for instructions like "STD", which the machine implicitly adds two - // low zeros to. - bool isIXAddr = false; - switch (OpC) { - case PPC::LWA: - case PPC::LD: - case PPC::STD: - case PPC::STD_32: - isIXAddr = true; - break; - } + // Figure out if the offset in the instruction is shifted right two bits. + bool isIXAddr = usesIXAddr(MI); - bool noImmForm = false; - switch (OpC) { - case PPC::LVEBX: - case PPC::LVEHX: - case PPC::LVEWX: - case PPC::LVX: - case PPC::LVXL: - case PPC::LVSL: - case PPC::LVSR: - case PPC::STVEBX: - case PPC::STVEHX: - case PPC::STVEWX: - case PPC::STVX: - case PPC::STVXL: - noImmForm = true; - break; - } + // If the instruction is not present in ImmToIdxMap, then it has no immediate + // form (and must be r+r). + bool noImmForm = !MI.isInlineAsm() && !ImmToIdxMap.count(OpC); // Now add the frame object offset to the offset from r1. int Offset = MFI->getObjectOffset(FrameIndex); @@ -497,13 +583,15 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned SReg = MF.getRegInfo().createVirtualRegister(is64Bit ? G8RC : GPRC); + const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC; + unsigned SRegHi = MF.getRegInfo().createVirtualRegister(RC), + SReg = MF.getRegInfo().createVirtualRegister(RC); // Insert a set of rA with the full offset value before the ld, st, or add - BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SReg) + BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi) .addImm(Offset >> 16); BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg) - .addReg(SReg, RegState::Kill) + .addReg(SRegHi, RegState::Kill) .addImm(Offset); // Convert into indexed form of the instruction: @@ -545,3 +633,124 @@ unsigned PPCRegisterInfo::getEHExceptionRegister() const { unsigned PPCRegisterInfo::getEHHandlerRegister() const { return !Subtarget.isPPC64() ? PPC::R4 : PPC::X4; } + +/// Returns true if the instruction's frame index +/// reference would be better served by a base register other than FP +/// or SP. Used by LocalStackFrameAllocation to determine which frame index +/// references it should create new base registers for. +bool PPCRegisterInfo:: +needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { + assert(Offset < 0 && "Local offset must be negative"); + + unsigned FIOperandNum = 0; + while (!MI->getOperand(FIOperandNum).isFI()) { + ++FIOperandNum; + assert(FIOperandNum < MI->getNumOperands() && + "Instr doesn't have FrameIndex operand!"); + } + + unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum); + + if (!usesIXAddr(*MI)) + Offset += MI->getOperand(OffsetOperandNo).getImm(); + else + Offset += MI->getOperand(OffsetOperandNo).getImm() << 2; + + // It's the load/store FI references that cause issues, as it can be difficult + // to materialize the offset if it won't fit in the literal field. Estimate + // based on the size of the local frame and some conservative assumptions + // about the rest of the stack frame (note, this is pre-regalloc, so + // we don't know everything for certain yet) whether this offset is likely + // to be out of range of the immediate. Return true if so. + + // We only generate virtual base registers for loads and stores that have + // an r+i form. Return false for everything else. + unsigned OpC = MI->getOpcode(); + if (!ImmToIdxMap.count(OpC)) + return false; + + // Don't generate a new virtual base register just to add zero to it. + if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) && + MI->getOperand(2).getImm() == 0) + return false; + + MachineBasicBlock &MBB = *MI->getParent(); + MachineFunction &MF = *MBB.getParent(); + + const PPCFrameLowering *PPCFI = + static_cast<const PPCFrameLowering*>(MF.getTarget().getFrameLowering()); + unsigned StackEst = + PPCFI->determineFrameLayout(MF, false, true); + + // If we likely don't need a stack frame, then we probably don't need a + // virtual base register either. + if (!StackEst) + return false; + + // Estimate an offset from the stack pointer. + // The incoming offset is relating to the SP at the start of the function, + // but when we access the local it'll be relative to the SP after local + // allocation, so adjust our SP-relative offset by that allocation size. + Offset += StackEst; + + // The frame pointer will point to the end of the stack, so estimate the + // offset as the difference between the object offset and the FP location. + return !isFrameOffsetLegal(MI, Offset); +} + +/// Insert defining instruction(s) for BaseReg to +/// be a pointer to FrameIdx at the beginning of the basic block. +void PPCRegisterInfo:: +materializeFrameBaseRegister(MachineBasicBlock *MBB, + unsigned BaseReg, int FrameIdx, + int64_t Offset) const { + unsigned ADDriOpc = Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI; + + MachineBasicBlock::iterator Ins = MBB->begin(); + DebugLoc DL; // Defaults to "unknown" + if (Ins != MBB->end()) + DL = Ins->getDebugLoc(); + + const MCInstrDesc &MCID = TII.get(ADDriOpc); + MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); + const MachineFunction &MF = *MBB->getParent(); + MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); + + BuildMI(*MBB, Ins, DL, MCID, BaseReg) + .addFrameIndex(FrameIdx).addImm(Offset); +} + +void +PPCRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I, + unsigned BaseReg, int64_t Offset) const { + MachineInstr &MI = *I; + + unsigned FIOperandNum = 0; + while (!MI.getOperand(FIOperandNum).isFI()) { + ++FIOperandNum; + assert(FIOperandNum < MI.getNumOperands() && + "Instr doesn't have FrameIndex operand!"); + } + + MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false); + unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); + + bool isIXAddr = usesIXAddr(MI); + if (!isIXAddr) + Offset += MI.getOperand(OffsetOperandNo).getImm(); + else + Offset += MI.getOperand(OffsetOperandNo).getImm() << 2; + + // Figure out if the offset in the instruction is shifted right two bits. + if (isIXAddr) + Offset >>= 2; // The actual encoded value has the low two bits zero. + + MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); +} + +bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, + int64_t Offset) const { + return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm + (isInt<16>(Offset) && (!usesIXAddr(*MI) || (Offset & 3) == 0)); +} + diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h index 5f89f63..7a48b4b 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.h +++ b/lib/Target/PowerPC/PPCRegisterInfo.h @@ -15,8 +15,8 @@ #ifndef POWERPC32_REGISTERINFO_H #define POWERPC32_REGISTERINFO_H +#include "llvm/ADT/DenseMap.h" #include "PPC.h" -#include <map> #define GET_REGINFO_HEADER #include "PPCGenRegisterInfo.inc" @@ -27,7 +27,7 @@ class TargetInstrInfo; class Type; class PPCRegisterInfo : public PPCGenRegisterInfo { - std::map<unsigned, unsigned> ImmToIdxMap; + DenseMap<unsigned, unsigned> ImmToIdxMap; const PPCSubtarget &Subtarget; const TargetInstrInfo &TII; public: @@ -44,6 +44,7 @@ public: /// Code Generation virtual methods... const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const; const uint32_t *getCallPreservedMask(CallingConv::ID CC) const; + const uint32_t *getNoPreservedMask() const; BitVector getReservedRegs(const MachineFunction &MF) const; @@ -60,18 +61,35 @@ public: return true; } - void lowerDynamicAlloc(MachineBasicBlock::iterator II, - int SPAdj, RegScavenger *RS) const; - void lowerCRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex, - int SPAdj, RegScavenger *RS) const; - void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex, - int SPAdj, RegScavenger *RS) const; + virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const { + return true; + } + + void lowerDynamicAlloc(MachineBasicBlock::iterator II) const; + void lowerCRSpilling(MachineBasicBlock::iterator II, + unsigned FrameIndex) const; + void lowerCRRestore(MachineBasicBlock::iterator II, + unsigned FrameIndex) const; + void lowerVRSAVESpilling(MachineBasicBlock::iterator II, + unsigned FrameIndex) const; + void lowerVRSAVERestore(MachineBasicBlock::iterator II, + unsigned FrameIndex) const; + bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg, int &FrameIdx) const; void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS = NULL) const; + // Support for virtual base registers. + bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const; + void materializeFrameBaseRegister(MachineBasicBlock *MBB, + unsigned BaseReg, int FrameIdx, + int64_t Offset) const; + void resolveFrameIndex(MachineBasicBlock::iterator I, + unsigned BaseReg, int64_t Offset) const; + bool isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const; + // Debug information queries. unsigned getFrameRegister(const MachineFunction &MF) const; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td index 8ee9b1e..57a25f5 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/lib/Target/PowerPC/PPCRegisterInfo.td @@ -27,40 +27,40 @@ class PPCReg<string n> : Register<n> { // GPR - One of the 32 32-bit general-purpose registers class GPR<bits<5> num, string n> : PPCReg<n> { - field bits<5> Num = num; + let HWEncoding{4-0} = num; } // GP8 - One of the 32 64-bit general-purpose registers class GP8<GPR SubReg, string n> : PPCReg<n> { - field bits<5> Num = SubReg.Num; + let HWEncoding = SubReg.HWEncoding; let SubRegs = [SubReg]; let SubRegIndices = [sub_32]; } // SPR - One of the 32-bit special-purpose registers class SPR<bits<10> num, string n> : PPCReg<n> { - field bits<10> Num = num; + let HWEncoding{9-0} = num; } // FPR - One of the 32 64-bit floating-point registers class FPR<bits<5> num, string n> : PPCReg<n> { - field bits<5> Num = num; + let HWEncoding{4-0} = num; } // VR - One of the 32 128-bit vector registers class VR<bits<5> num, string n> : PPCReg<n> { - field bits<5> Num = num; + let HWEncoding{4-0} = num; } // CR - One of the 8 4-bit condition registers class CR<bits<3> num, string n, list<Register> subregs> : PPCReg<n> { - field bits<3> Num = num; + let HWEncoding{2-0} = num; let SubRegs = subregs; } // CRBIT - One of the 32 1-bit condition register fields class CRBIT<bits<5> num, string n> : PPCReg<n> { - field bits<5> Num = num; + let HWEncoding{4-0} = num; } // General-purpose registers @@ -86,6 +86,14 @@ foreach Index = 0-31 in { DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>; } +// The reprsentation of r0 when treated as the constant 0. +def ZERO : GPR<0, "0">; +def ZERO8 : GP8<ZERO, "0">; + +// Representations of the frame pointer used by ISD::FRAMEADDR. +def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">; +def FP8 : GP8<FP, "**FRAME POINTER**">; + // Condition register bits def CR0LT : CRBIT< 0, "0">; def CR0GT : CRBIT< 1, "1">; @@ -164,11 +172,17 @@ def RM: SPR<512, "**ROUNDING MODE**">; // then nonvolatiles in reverse order since stmw/lmw save from rN to r31 def GPRC : RegisterClass<"PPC", [i32], 32, (add (sequence "R%u", 2, 12), (sequence "R%u", 30, 13), - R31, R0, R1, LR)>; + R31, R0, R1, FP)>; def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12), (sequence "X%u", 30, 14), - X31, X13, X0, X1, LR8)>; + X31, X13, X0, X1, FP8)>; + +// For some instructions r0 is special (representing the value 0 instead of +// the value in the r0 register), and we use these register subclasses to +// prevent r0 from being allocated for use by those instructions. +def GPRC_NOR0 : RegisterClass<"PPC", [i32], 32, (add (sub GPRC, R0), ZERO)>; +def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)>; // Allocate volatiles first, then non-volatiles in reverse order. With the SVR4 // ABI the size of the Floating-point register save area is determined by the diff --git a/lib/Target/PowerPC/PPCScheduleA2.td b/lib/Target/PowerPC/PPCScheduleA2.td index ba63b5c..8d5838e 100644 --- a/lib/Target/PowerPC/PPCScheduleA2.td +++ b/lib/Target/PowerPC/PPCScheduleA2.td @@ -749,3 +749,18 @@ def PPCA2Itineraries : ProcessorItineraries< [15, 7], [FPR_Bypass, FPR_Bypass]> ]>; + +// ===---------------------------------------------------------------------===// +// A2 machine model for scheduling and other instruction cost heuristics. + +def PPCA2Model : SchedMachineModel { + let IssueWidth = 1; // 2 micro-ops are dispatched per cycle. + let MinLatency = -1; // OperandCycles are interpreted as MinLatency. + let LoadLatency = 6; // Optimistic load latency assuming bypass. + // This is overriden by OperandCycles if the + // Itineraries are queried instead. + let MispredictPenalty = 13; + + let Itineraries = PPCA2Itineraries; +} + diff --git a/lib/Target/PowerPC/PPCScheduleG5.td b/lib/Target/PowerPC/PPCScheduleG5.td index 7c02ea0..c64998d 100644 --- a/lib/Target/PowerPC/PPCScheduleG5.td +++ b/lib/Target/PowerPC/PPCScheduleG5.td @@ -92,3 +92,18 @@ def G5Itineraries : ProcessorItineraries< InstrItinData<VecVSL , [InstrStage<2, [VIU1]>]>, InstrItinData<VecVSR , [InstrStage<3, [VPU]>]> ]>; + +// ===---------------------------------------------------------------------===// +// e5500 machine model for scheduling and other instruction cost heuristics. + +def G5Model : SchedMachineModel { + let IssueWidth = 4; // 4 (non-branch) instructions are dispatched per cycle. + let MinLatency = 0; // Out-of-order dispatch. + let LoadLatency = 3; // Optimistic load latency assuming bypass. + // This is overriden by OperandCycles if the + // Itineraries are queried instead. + let MispredictPenalty = 16; + + let Itineraries = G5Itineraries; +} + diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp index 18e4c07..a8f2b3f 100644 --- a/lib/Target/PowerPC/PPCSubtarget.cpp +++ b/lib/Target/PowerPC/PPCSubtarget.cpp @@ -38,8 +38,18 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU, , HasAltivec(false) , HasQPX(false) , HasFSQRT(false) + , HasFRE(false) + , HasFRES(false) + , HasFRSQRTE(false) + , HasFRSQRTES(false) + , HasRecipPrec(false) , HasSTFIWX(false) + , HasLFIWAX(false) + , HasFPRND(false) + , HasFPCVT(false) , HasISEL(false) + , HasPOPCNTD(false) + , HasLDBRX(false) , IsBookE(false) , HasLazyResolverStubs(false) , IsJITCodeModel(false) diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h index 15885bd..65b4d21 100644 --- a/lib/Target/PowerPC/PPCSubtarget.h +++ b/lib/Target/PowerPC/PPCSubtarget.h @@ -77,8 +77,15 @@ protected: bool HasAltivec; bool HasQPX; bool HasFSQRT; + bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES; + bool HasRecipPrec; bool HasSTFIWX; + bool HasLFIWAX; + bool HasFPRND; + bool HasFPCVT; bool HasISEL; + bool HasPOPCNTD; + bool HasLDBRX; bool IsBookE; bool HasLazyResolverStubs; bool IsJITCodeModel; @@ -154,11 +161,21 @@ public: // Specific obvious features. bool hasFSQRT() const { return HasFSQRT; } + bool hasFRE() const { return HasFRE; } + bool hasFRES() const { return HasFRES; } + bool hasFRSQRTE() const { return HasFRSQRTE; } + bool hasFRSQRTES() const { return HasFRSQRTES; } + bool hasRecipPrec() const { return HasRecipPrec; } bool hasSTFIWX() const { return HasSTFIWX; } + bool hasLFIWAX() const { return HasLFIWAX; } + bool hasFPRND() const { return HasFPRND; } + bool hasFPCVT() const { return HasFPCVT; } bool hasAltivec() const { return HasAltivec; } bool hasQPX() const { return HasQPX; } bool hasMFOCRF() const { return HasMFOCRF; } bool hasISEL() const { return HasISEL; } + bool hasPOPCNTD() const { return HasPOPCNTD; } + bool hasLDBRX() const { return HasLDBRX; } bool isBookE() const { return IsBookE; } const Triple &getTargetTriple() const { return TargetTriple; } diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index fe851c1..14dc794 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -86,8 +86,14 @@ public: return getTM<PPCTargetMachine>(); } + const PPCSubtarget &getPPCSubtarget() const { + return *getPPCTargetMachine().getSubtargetImpl(); + } + virtual bool addPreRegAlloc(); + virtual bool addILPOpts(); virtual bool addInstSelector(); + virtual bool addPreSched2(); virtual bool addPreEmitPass(); }; } // namespace @@ -103,13 +109,31 @@ bool PPCPassConfig::addPreRegAlloc() { return false; } +bool PPCPassConfig::addILPOpts() { + if (getPPCSubtarget().hasISEL()) { + addPass(&EarlyIfConverterID); + return true; + } + + return false; +} + bool PPCPassConfig::addInstSelector() { // Install an instruction selector. addPass(createPPCISelDag(getPPCTargetMachine())); return false; } +bool PPCPassConfig::addPreSched2() { + if (getOptLevel() != CodeGenOpt::None) + addPass(&IfConverterID); + + return true; +} + bool PPCPassConfig::addPreEmitPass() { + if (getOptLevel() != CodeGenOpt::None) + addPass(createPPCEarlyReturnPass()); // Must run branch selection immediately preceding the asm printer. addPass(createPPCBranchSelectionPass()); return false; diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp index 5e9ad34..2504ba7 100644 --- a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -86,7 +86,9 @@ public: virtual unsigned getNumberOfRegisters(bool Vector) const; virtual unsigned getRegisterBitWidth(bool Vector) const; virtual unsigned getMaximumUnrollFactor() const; - virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind, + OperandValueKind) const; virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, Type *SubTp) const; virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, @@ -122,9 +124,8 @@ llvm::createPPCTargetTransformInfoPass(const PPCTargetMachine *TM) { PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const { assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); - // FIXME: PPC currently does not have custom popcnt lowering even though - // there is hardware support. Once this is fixed, update this function - // to reflect the real capabilities of the hardware. + if (ST->hasPOPCNTD() && TyWidth <= 64) + return PSK_FastHardware; return PSK_Software; } @@ -167,11 +168,14 @@ unsigned PPCTTI::getMaximumUnrollFactor() const { return 2; } -unsigned PPCTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { +unsigned PPCTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind Op1Info, + OperandValueKind Op2Info) const { assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); // Fallback to the default implementation. - return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty); + return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, + Op2Info); } unsigned PPCTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt index b6763aa..514f840 100644 --- a/lib/Target/PowerPC/README.txt +++ b/lib/Target/PowerPC/README.txt @@ -1,7 +1,6 @@ //===- README.txt - Notes for improving PowerPC-specific code gen ---------===// TODO: -* gpr0 allocation * lmw/stmw pass a la arm load store optimizer for prolog/epilog ===-------------------------------------------------------------------------=== @@ -127,25 +126,6 @@ produced this with bdnz, the loop would be a single dispatch group. ===-------------------------------------------------------------------------=== -Compile: - -void foo(int *P) { - if (P) *P = 0; -} - -into: - -_foo: - cmpwi cr0,r3,0 - beqlr cr0 - li r0,0 - stw r0,0(r3) - blr - -This is effectively a simple form of predication. - -===-------------------------------------------------------------------------=== - Lump the constant pool for each function into ONE pic object, and reference pieces of it as offsets from the start. For functions like this (contrived to have lots of constants obviously): @@ -204,12 +184,6 @@ http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html ===-------------------------------------------------------------------------=== -Implement Newton-Rhapson method for improving estimate instructions to the -correct accuracy, and implementing divide as multiply by reciprocal when it has -more than one use. Itanium would want this too. - -===-------------------------------------------------------------------------=== - Compile offsets from allocas: int *%test() { @@ -536,20 +510,6 @@ void func(unsigned int *ret, float dx, float dy, float dz, float dw) { ===-------------------------------------------------------------------------=== -Complete the signed i32 to FP conversion code using 64-bit registers -transformation, good for PI. See PPCISelLowering.cpp, this comment: - - // FIXME: disable this lowered code. This generates 64-bit register values, - // and we don't model the fact that the top part is clobbered by calls. We - // need to flag these together so that the value isn't live across a call. - //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - -Also, if the registers are spilled to the stack, we have to ensure that all -64-bits of them are save/restored, otherwise we will miscompile the code. It -sounds like we need to get the 64-bit register classes going. - -===-------------------------------------------------------------------------=== - %struct.B = type { i8, [3 x i8] } define void @bar(%struct.B* %b) { diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h index e099a9f..9792bd8 100644 --- a/lib/Target/R600/AMDGPU.h +++ b/lib/Target/R600/AMDGPU.h @@ -23,6 +23,9 @@ class AMDGPUTargetMachine; // R600 Passes FunctionPass* createR600KernelParametersPass(const DataLayout *TD); FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm); +FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm); +FunctionPass *createR600Packetizer(TargetMachine &tm); +FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm); // SI Passes FunctionPass *createSIAnnotateControlFlowPass(); diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp index f600144..c915f50 100644 --- a/lib/Target/R600/AMDGPUAsmPrinter.cpp +++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp @@ -19,9 +19,15 @@ #include "AMDGPUAsmPrinter.h" #include "AMDGPU.h" +#include "SIDefines.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" +#include "R600MachineFunctionInfo.h" +#include "R600RegisterInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCStreamer.h" +#include "llvm/Support/ELF.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetLoweringObjectFile.h" @@ -50,15 +56,57 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { if (OutStreamer.hasRawTextSupport()) { OutStreamer.EmitRawText("@" + MF.getName() + ":"); } - OutStreamer.SwitchSection(getObjFileLowering().getTextSection()); + + const MCSectionELF *ConfigSection = getObjFileLowering().getContext() + .getELFSection(".AMDGPU.config", + ELF::SHT_PROGBITS, 0, + SectionKind::getReadOnly()); + OutStreamer.SwitchSection(ConfigSection); if (STM.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { - EmitProgramInfo(MF); + EmitProgramInfoSI(MF); + } else { + EmitProgramInfoR600(MF); } + OutStreamer.SwitchSection(getObjFileLowering().getTextSection()); EmitFunctionBody(); return false; } -void AMDGPUAsmPrinter::EmitProgramInfo(MachineFunction &MF) { +void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) { + unsigned MaxGPR = 0; + bool killPixel = false; + const R600RegisterInfo * RI = + static_cast<const R600RegisterInfo*>(TM.getRegisterInfo()); + R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); + + for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); + BB != BB_E; ++BB) { + MachineBasicBlock &MBB = *BB; + for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); + I != E; ++I) { + MachineInstr &MI = *I; + if (MI.getOpcode() == AMDGPU::KILLGT) + killPixel = true; + unsigned numOperands = MI.getNumOperands(); + for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) { + MachineOperand & MO = MI.getOperand(op_idx); + if (!MO.isReg()) + continue; + unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff; + + // Register with value > 127 aren't GPR + if (HWReg > 127) + continue; + MaxGPR = std::max(MaxGPR, HWReg); + } + } + } + OutStreamer.EmitIntValue(MaxGPR + 1, 4); + OutStreamer.EmitIntValue(MFI->StackSize, 4); + OutStreamer.EmitIntValue(killPixel, 4); +} + +void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) { unsigned MaxSGPR = 0; unsigned MaxVGPR = 0; bool VCCUsed = false; @@ -107,6 +155,9 @@ void AMDGPUAsmPrinter::EmitProgramInfo(MachineFunction &MF) { } else if (AMDGPU::VReg_64RegClass.contains(reg)) { isSGPR = false; width = 2; + } else if (AMDGPU::VReg_96RegClass.contains(reg)) { + isSGPR = false; + width = 3; } else if (AMDGPU::SReg_128RegClass.contains(reg)) { isSGPR = true; width = 4; @@ -139,7 +190,19 @@ void AMDGPUAsmPrinter::EmitProgramInfo(MachineFunction &MF) { MaxSGPR += 2; } SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>(); - OutStreamer.EmitIntValue(MaxSGPR + 1, 4); - OutStreamer.EmitIntValue(MaxVGPR + 1, 4); - OutStreamer.EmitIntValue(MFI->PSInputAddr, 4); + unsigned RsrcReg; + switch (MFI->ShaderType) { + default: // Fall through + case ShaderType::COMPUTE: RsrcReg = R_00B848_COMPUTE_PGM_RSRC1; break; + case ShaderType::GEOMETRY: RsrcReg = R_00B228_SPI_SHADER_PGM_RSRC1_GS; break; + case ShaderType::PIXEL: RsrcReg = R_00B028_SPI_SHADER_PGM_RSRC1_PS; break; + case ShaderType::VERTEX: RsrcReg = R_00B128_SPI_SHADER_PGM_RSRC1_VS; break; + } + + OutStreamer.EmitIntValue(RsrcReg, 4); + OutStreamer.EmitIntValue(S_00B028_VGPRS(MaxVGPR / 4) | S_00B028_SGPRS(MaxSGPR / 8), 4); + if (MFI->ShaderType == ShaderType::PIXEL) { + OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4); + OutStreamer.EmitIntValue(MFI->PSInputAddr, 4); + } } diff --git a/lib/Target/R600/AMDGPUAsmPrinter.h b/lib/Target/R600/AMDGPUAsmPrinter.h index 3812282..f425ef4 100644 --- a/lib/Target/R600/AMDGPUAsmPrinter.h +++ b/lib/Target/R600/AMDGPUAsmPrinter.h @@ -33,7 +33,8 @@ public: /// \brief Emit register usage information so that the GPU driver /// can correctly setup the GPU state. - void EmitProgramInfo(MachineFunction &MF); + void EmitProgramInfoR600(MachineFunction &MF); + void EmitProgramInfoSI(MachineFunction &MF); /// Implemented in AMDGPUMCInstLower.cpp virtual void EmitInstruction(const MachineInstr *MI); diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td index 45ae37e..9c30515 100644 --- a/lib/Target/R600/AMDGPUCallingConv.td +++ b/lib/Target/R600/AMDGPUCallingConv.td @@ -32,8 +32,14 @@ def CC_SI : CallingConv<[ VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15, VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23, VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31 - ]>>> + ]>>>, + // This is the default for i64 values. + // XXX: We should change this once clang understands the CC_AMDGPU. + CCIfType<[i64], CCAssignToRegWithShadow< + [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ], + [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ] + >> ]>; def CC_AMDGPU : CallingConv<[ diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 5995b6f..a266df5 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -60,6 +60,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::LOAD, MVT::v4f32, Promote); AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); + setOperationAction(ISD::MUL, MVT::i64, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); setOperationAction(ISD::UDIVREM, MVT::i32, Custom); setOperationAction(ISD::UREM, MVT::i32, Expand); diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index f31b646..c2a79ea 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -116,6 +116,7 @@ enum { BRANCH_COND, // End AMDIL ISD Opcodes BITALIGN, + BUFFER_STORE, DWORDADDR, FRACT, FMAX, diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td index e740348..83e1359 100644 --- a/lib/Target/R600/AMDGPUInstructions.td +++ b/lib/Target/R600/AMDGPUInstructions.td @@ -94,6 +94,7 @@ class Constants { int TWO_PI = 0x40c90fdb; int PI = 0x40490fdb; int TWO_PI_INV = 0x3e22f983; +int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding } def CONST : Constants; @@ -115,21 +116,21 @@ class CLAMP <RegisterClass rc> : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "CLAMP $dst, $src0", - [(set rc:$dst, (int_AMDIL_clamp rc:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] + [(set f32:$dst, (int_AMDIL_clamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] >; class FABS <RegisterClass rc> : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "FABS $dst, $src0", - [(set rc:$dst, (fabs rc:$src0))] + [(set f32:$dst, (fabs f32:$src0))] >; class FNEG <RegisterClass rc> : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "FNEG $dst, $src0", - [(set rc:$dst, (fneg rc:$src0))] + [(set f32:$dst, (fneg f32:$src0))] >; } // usesCustomInserter = 1 @@ -140,8 +141,7 @@ multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, (outs dstClass:$dst), (ins addrClass:$addr, i32imm:$chan), "RegisterLoad $dst, $addr", - [(set (i32 dstClass:$dst), (AMDGPUregister_load addrPat:$addr, - (i32 timm:$chan)))] + [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] > { let isRegisterLoad = 1; } @@ -150,7 +150,7 @@ multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, (outs), (ins dstClass:$val, addrClass:$addr, i32imm:$chan), "RegisterStore $val, $addr", - [(AMDGPUregister_store (i32 dstClass:$val), addrPat:$addr, (i32 timm:$chan))] + [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] > { let isRegisterStore = 1; } @@ -161,105 +161,121 @@ multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, /* Generic helper patterns for intrinsics */ /* -------------------------------------- */ -class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul, - RegisterClass rc> : Pat < - (fpow rc:$src0, rc:$src1), - (exp_ieee (mul rc:$src1, (log_ieee rc:$src0))) +class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul> + : Pat < + (fpow f32:$src0, f32:$src1), + (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) >; /* Other helper patterns */ /* --------------------- */ /* Extract element pattern */ -class Extract_Element <ValueType sub_type, ValueType vec_type, - RegisterClass vec_class, int sub_idx, - SubRegIndex sub_reg>: Pat< - (sub_type (vector_extract (vec_type vec_class:$src), sub_idx)), - (EXTRACT_SUBREG vec_class:$src, sub_reg) +class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx, + SubRegIndex sub_reg> + : Pat< + (sub_type (vector_extract vec_type:$src, sub_idx)), + (EXTRACT_SUBREG $src, sub_reg) >; /* Insert element pattern */ class Insert_Element <ValueType elem_type, ValueType vec_type, - RegisterClass elem_class, RegisterClass vec_class, - int sub_idx, SubRegIndex sub_reg> : Pat < - - (vec_type (vector_insert (vec_type vec_class:$vec), - (elem_type elem_class:$elem), sub_idx)), - (INSERT_SUBREG vec_class:$vec, elem_class:$elem, sub_reg) + int sub_idx, SubRegIndex sub_reg> + : Pat < + (vector_insert vec_type:$vec, elem_type:$elem, sub_idx), + (INSERT_SUBREG $vec, $elem, sub_reg) >; // Vector Build pattern -class Vector1_Build <ValueType vecType, RegisterClass vectorClass, - ValueType elemType, RegisterClass elemClass> : Pat < - (vecType (build_vector (elemType elemClass:$src))), - (vecType elemClass:$src) +class Vector1_Build <ValueType vecType, ValueType elemType, + RegisterClass rc> : Pat < + (vecType (build_vector elemType:$src)), + (vecType (COPY_TO_REGCLASS $src, rc)) >; -class Vector2_Build <ValueType vecType, RegisterClass vectorClass, - ValueType elemType, RegisterClass elemClass> : Pat < - (vecType (build_vector (elemType elemClass:$sub0), (elemType elemClass:$sub1))), +class Vector2_Build <ValueType vecType, ValueType elemType> : Pat < + (vecType (build_vector elemType:$sub0, elemType:$sub1)), (INSERT_SUBREG (INSERT_SUBREG - (vecType (IMPLICIT_DEF)), elemClass:$sub0, sub0), elemClass:$sub1, sub1) + (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1) >; -class Vector4_Build <ValueType vecType, RegisterClass vectorClass, - ValueType elemType, RegisterClass elemClass> : Pat < - (vecType (build_vector (elemType elemClass:$x), (elemType elemClass:$y), - (elemType elemClass:$z), (elemType elemClass:$w))), +class Vector4_Build <ValueType vecType, ValueType elemType> : Pat < + (vecType (build_vector elemType:$x, elemType:$y, elemType:$z, elemType:$w)), (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG - (vecType (IMPLICIT_DEF)), elemClass:$x, sub0), elemClass:$y, sub1), - elemClass:$z, sub2), elemClass:$w, sub3) + (vecType (IMPLICIT_DEF)), $x, sub0), $y, sub1), $z, sub2), $w, sub3) >; -class Vector8_Build <ValueType vecType, RegisterClass vectorClass, - ValueType elemType, RegisterClass elemClass> : Pat < - (vecType (build_vector (elemType elemClass:$sub0), (elemType elemClass:$sub1), - (elemType elemClass:$sub2), (elemType elemClass:$sub3), - (elemType elemClass:$sub4), (elemType elemClass:$sub5), - (elemType elemClass:$sub6), (elemType elemClass:$sub7))), - (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG +class Vector8_Build <ValueType vecType, ValueType elemType> : Pat < + (vecType (build_vector elemType:$sub0, elemType:$sub1, + elemType:$sub2, elemType:$sub3, + elemType:$sub4, elemType:$sub5, + elemType:$sub6, elemType:$sub7)), (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG - (vecType (IMPLICIT_DEF)), elemClass:$sub0, sub0), elemClass:$sub1, sub1), - elemClass:$sub2, sub2), elemClass:$sub3, sub3), - elemClass:$sub4, sub4), elemClass:$sub5, sub5), - elemClass:$sub6, sub6), elemClass:$sub7, sub7) + (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG + (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1), + $sub2, sub2), $sub3, sub3), + $sub4, sub4), $sub5, sub5), + $sub6, sub6), $sub7, sub7) >; -class Vector16_Build <ValueType vecType, RegisterClass vectorClass, - ValueType elemType, RegisterClass elemClass> : Pat < - (vecType (build_vector (elemType elemClass:$sub0), (elemType elemClass:$sub1), - (elemType elemClass:$sub2), (elemType elemClass:$sub3), - (elemType elemClass:$sub4), (elemType elemClass:$sub5), - (elemType elemClass:$sub6), (elemType elemClass:$sub7), - (elemType elemClass:$sub8), (elemType elemClass:$sub9), - (elemType elemClass:$sub10), (elemType elemClass:$sub11), - (elemType elemClass:$sub12), (elemType elemClass:$sub13), - (elemType elemClass:$sub14), (elemType elemClass:$sub15))), - (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG +class Vector16_Build <ValueType vecType, ValueType elemType> : Pat < + (vecType (build_vector elemType:$sub0, elemType:$sub1, + elemType:$sub2, elemType:$sub3, + elemType:$sub4, elemType:$sub5, + elemType:$sub6, elemType:$sub7, + elemType:$sub8, elemType:$sub9, + elemType:$sub10, elemType:$sub11, + elemType:$sub12, elemType:$sub13, + elemType:$sub14, elemType:$sub15)), (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG - (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG - (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG - (vecType (IMPLICIT_DEF)), elemClass:$sub0, sub0), elemClass:$sub1, sub1), - elemClass:$sub2, sub2), elemClass:$sub3, sub3), - elemClass:$sub4, sub4), elemClass:$sub5, sub5), - elemClass:$sub6, sub6), elemClass:$sub7, sub7), - elemClass:$sub8, sub8), elemClass:$sub9, sub9), - elemClass:$sub10, sub10), elemClass:$sub11, sub11), - elemClass:$sub12, sub12), elemClass:$sub13, sub13), - elemClass:$sub14, sub14), elemClass:$sub15, sub15) + (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG + (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG + (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG + (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1), + $sub2, sub2), $sub3, sub3), + $sub4, sub4), $sub5, sub5), + $sub6, sub6), $sub7, sub7), + $sub8, sub8), $sub9, sub9), + $sub10, sub10), $sub11, sub11), + $sub12, sub12), $sub13, sub13), + $sub14, sub14), $sub15, sub15) >; +// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer +// can handle COPY instructions. // bitconvert pattern class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat < (dt (bitconvert (st rc:$src0))), (dt rc:$src0) >; +// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer +// can handle COPY instructions. class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat < (vt (AMDGPUdwordaddr (vt rc:$addr))), (vt rc:$addr) >; +// BFI_INT patterns + +multiclass BFIPatterns <Instruction BFI_INT> { + + // Definition from ISA doc: + // (y & x) | (z & ~x) + def : Pat < + (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), + (BFI_INT $x, $y, $z) + >; + + // SHA-256 Ch function + // z ^ (x & (y ^ z)) + def : Pat < + (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), + (BFI_INT $x, $y, $z) + >; + +} + include "R600Instructions.td" include "SIInstrInfo.td" diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp new file mode 100644 index 0000000..0461025 --- /dev/null +++ b/lib/Target/R600/AMDGPUMachineFunction.cpp @@ -0,0 +1,24 @@ +#include "AMDGPUMachineFunction.h" +#include "AMDGPU.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" + +namespace llvm { + +const char *AMDGPUMachineFunction::ShaderTypeAttribute = "ShaderType"; + +AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : + MachineFunctionInfo() { + ShaderType = ShaderType::COMPUTE; + AttributeSet Set = MF.getFunction()->getAttributes(); + Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, + ShaderTypeAttribute); + + if (A.isStringAttribute()) { + StringRef Str = A.getValueAsString(); + if (Str.getAsInteger(0, ShaderType)) + llvm_unreachable("Can't parse shader type!"); + } +} + +} diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/R600/AMDGPUMachineFunction.h new file mode 100644 index 0000000..21c8c51 --- /dev/null +++ b/lib/Target/R600/AMDGPUMachineFunction.h @@ -0,0 +1,29 @@ +//===-- R600MachineFunctionInfo.h - R600 Machine Function Info ----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +//===----------------------------------------------------------------------===// + +#ifndef AMDGPUMACHINEFUNCTION_H +#define AMDGPUMACHINEFUNCTION_H + +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +class AMDGPUMachineFunction : public MachineFunctionInfo { +private: + static const char *ShaderTypeAttribute; +public: + AMDGPUMachineFunction(const MachineFunction &MF); + unsigned ShaderType; +}; + +} +#endif // AMDGPUMACHINEFUNCTION_H diff --git a/lib/Target/R600/AMDGPUStructurizeCFG.cpp b/lib/Target/R600/AMDGPUStructurizeCFG.cpp index b723433..dea43b8 100644 --- a/lib/Target/R600/AMDGPUStructurizeCFG.cpp +++ b/lib/Target/R600/AMDGPUStructurizeCFG.cpp @@ -17,6 +17,7 @@ #include "AMDGPU.h" #include "llvm/ADT/SCCIterator.h" +#include "llvm/ADT/MapVector.h" #include "llvm/Analysis/RegionInfo.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Analysis/RegionPass.h" @@ -40,13 +41,14 @@ typedef SmallVector<BBValuePair, 2> BBValueVector; typedef SmallPtrSet<BasicBlock *, 8> BBSet; -typedef DenseMap<PHINode *, BBValueVector> PhiMap; +typedef MapVector<PHINode *, BBValueVector> PhiMap; +typedef MapVector<BasicBlock *, BBVector> BB2BBVecMap; + typedef DenseMap<DomTreeNode *, unsigned> DTN2UnsignedMap; typedef DenseMap<BasicBlock *, PhiMap> BBPhiMap; typedef DenseMap<BasicBlock *, Value *> BBPredicates; typedef DenseMap<BasicBlock *, BBPredicates> PredMap; typedef DenseMap<BasicBlock *, BasicBlock*> BB2BBMap; -typedef DenseMap<BasicBlock *, BBVector> BB2BBVecMap; // The name for newly created blocks. diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp index 0f356a1..a7e1d7b 100644 --- a/lib/Target/R600/AMDGPUSubtarget.cpp +++ b/lib/Target/R600/AMDGPUSubtarget.cpp @@ -33,6 +33,7 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) : DefaultSize[0] = 64; DefaultSize[1] = 1; DefaultSize[2] = 1; + HasVertexCache = false; ParseSubtargetFeatures(GPU, FS); DevName = GPU; Device = AMDGPUDeviceInfo::getDeviceFromName(DevName, this, Is64bit); @@ -53,6 +54,10 @@ AMDGPUSubtarget::is64bit() const { return Is64bit; } bool +AMDGPUSubtarget::hasVertexCache() const { + return HasVertexCache; +} +bool AMDGPUSubtarget::isTargetELF() const { return false; } diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h index 1973fc6..b6501a4 100644 --- a/lib/Target/R600/AMDGPUSubtarget.h +++ b/lib/Target/R600/AMDGPUSubtarget.h @@ -36,6 +36,7 @@ private: bool Is32on64bit; bool DumpCode; bool R600ALUInst; + bool HasVertexCache; InstrItineraryData InstrItins; @@ -48,6 +49,7 @@ public: bool isOverride(AMDGPUDeviceInfo::Caps) const; bool is64bit() const; + bool hasVertexCache() const; // Helper functions to simplify if statements bool isTargetELF() const; diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp index 0185747..0ec67ce 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.cpp +++ b/lib/Target/R600/AMDGPUTargetMachine.cpp @@ -151,8 +151,11 @@ bool AMDGPUPassConfig::addPreEmitPass() { if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { addPass(createAMDGPUCFGPreparationPass(*TM)); addPass(createAMDGPUCFGStructurizerPass(*TM)); + addPass(createR600EmitClauseMarkers(*TM)); addPass(createR600ExpandSpecialInstrsPass(*TM)); addPass(&FinalizeMachineBundlesID); + addPass(createR600Packetizer(*TM)); + addPass(createR600ControlFlowFinalizer(*TM)); } else { addPass(createSILowerControlFlowPass(*TM)); } diff --git a/lib/Target/R600/AMDILBase.td b/lib/Target/R600/AMDILBase.td index c12cedc..e221110 100644 --- a/lib/Target/R600/AMDILBase.td +++ b/lib/Target/R600/AMDILBase.td @@ -74,6 +74,10 @@ def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst", "false", "Older version of ALU instructions encoding.">; +def FeatureVertexCache : SubtargetFeature<"HasVertexCache", + "HasVertexCache", + "true", + "Specify use of dedicated vertex cache.">; //===----------------------------------------------------------------------===// // Register File, Calling Conv, Instruction Descriptions diff --git a/lib/Target/R600/AMDILDeviceInfo.cpp b/lib/Target/R600/AMDILDeviceInfo.cpp index 9605fbe..1787959 100644 --- a/lib/Target/R600/AMDILDeviceInfo.cpp +++ b/lib/Target/R600/AMDILDeviceInfo.cpp @@ -44,7 +44,7 @@ AMDGPUDevice* getDeviceFromName(const std::string &deviceName, " on 32bit pointers!"); #endif return new AMDGPUEvergreenDevice(ptr); - } else if (deviceName == "redwood") { + } else if (deviceName == "redwood" || deviceName == "sumo") { #if DEBUG assert(!is64bit && "This device does not support 64bit pointers!"); assert(!is64on32bit && "This device does not support 64bit" @@ -79,7 +79,9 @@ AMDGPUDevice* getDeviceFromName(const std::string &deviceName, " on 32bit pointers!"); #endif return new AMDGPUNIDevice(ptr); - } else if (deviceName == "SI") { + } else if (deviceName == "SI" || + deviceName == "tahiti" || deviceName == "pitcairn" || + deviceName == "verde" || deviceName == "oland") { return new AMDGPUSIDevice(ptr); } else { #if DEBUG diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp index fa8f62d..ba75a44 100644 --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp @@ -191,6 +191,29 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs, 2 * N->getNumOperands() + 1); } + case ISD::BUILD_PAIR: { + SDValue RC, SubReg0, SubReg1; + const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); + if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + break; + } + if (N->getValueType(0) == MVT::i128) { + RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32); + SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32); + SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32); + } else if (N->getValueType(0) == MVT::i64) { + RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32); + SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); + SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); + } else { + llvm_unreachable("Unhandled value type for BUILD_PAIR"); + } + const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, + N->getOperand(1), SubReg1 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, + N->getDebugLoc(), N->getValueType(0), Ops); + } + case ISD::ConstantFP: case ISD::Constant: { const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt index 63c59e1..2ad2047 100644 --- a/lib/Target/R600/CMakeLists.txt +++ b/lib/Target/R600/CMakeLists.txt @@ -27,6 +27,7 @@ add_llvm_target(R600CodeGen AMDGPUFrameLowering.cpp AMDGPUIndirectAddressing.cpp AMDGPUMCInstLower.cpp + AMDGPUMachineFunction.cpp AMDGPUSubtarget.cpp AMDGPUStructurizeCFG.cpp AMDGPUTargetMachine.cpp @@ -34,11 +35,14 @@ add_llvm_target(R600CodeGen AMDGPUConvertToISA.cpp AMDGPUInstrInfo.cpp AMDGPURegisterInfo.cpp + R600ControlFlowFinalizer.cpp + R600EmitClauseMarkers.cpp R600ExpandSpecialInstrs.cpp R600InstrInfo.cpp R600ISelLowering.cpp R600MachineFunctionInfo.cpp R600MachineScheduler.cpp + R600Packetizer.cpp R600RegisterInfo.cpp SIAnnotateControlFlow.cpp SIInsertWaits.cpp diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp index 98fca43..a3397f3 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp @@ -44,7 +44,6 @@ public: AMDGPUAsmBackend(const Target &T) : MCAsmBackend() {} - virtual AMDGPUMCObjectWriter *createObjectWriter(raw_ostream &OS) const; virtual unsigned getNumFixupKinds() const { return 0; }; virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, uint64_t Value) const; @@ -71,16 +70,6 @@ void AMDGPUMCObjectWriter::WriteObject(MCAssembler &Asm, } } -MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, StringRef TT, - StringRef CPU) { - return new AMDGPUAsmBackend(T); -} - -AMDGPUMCObjectWriter * AMDGPUAsmBackend::createObjectWriter( - raw_ostream &OS) const { - return new AMDGPUMCObjectWriter(OS); -} - void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, uint64_t Value) const { @@ -88,3 +77,21 @@ void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, assert(Fixup.getKind() == FK_PCRel_4); *Dst = (Value - 4) / 4; } + +//===----------------------------------------------------------------------===// +// ELFAMDGPUAsmBackend class +//===----------------------------------------------------------------------===// + +class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend { +public: + ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { } + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createAMDGPUELFObjectWriter(OS); + } +}; + +MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, StringRef TT, + StringRef CPU) { + return new ELFAMDGPUAsmBackend(T); +} diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp new file mode 100644 index 0000000..48fac9f --- /dev/null +++ b/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp @@ -0,0 +1,39 @@ +//===-- AMDGPUELFObjectWriter.cpp - AMDGPU ELF Writer ----------------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +/// \file +//===----------------------------------------------------------------------===// + +#include "AMDGPUMCTargetDesc.h" +#include "llvm/MC/MCELFObjectWriter.h" + +using namespace llvm; + +namespace { + +class AMDGPUELFObjectWriter : public MCELFObjectTargetWriter { +public: + AMDGPUELFObjectWriter(); +protected: + virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, + bool IsPCRel, bool IsRelocWithSymbol, + int64_t Addend) const { + llvm_unreachable("Not implemented"); + } + +}; + + +} // End anonymous namespace + +AMDGPUELFObjectWriter::AMDGPUELFObjectWriter() + : MCELFObjectTargetWriter(false, 0, 0, false) { } + +MCObjectWriter *llvm::createAMDGPUELFObjectWriter(raw_ostream &OS) { + MCELFObjectTargetWriter *MOTW = new AMDGPUELFObjectWriter(); + return createELFObjectWriter(MOTW, OS, true); +} diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp index 4d3d3e7..2aae26a 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp @@ -68,10 +68,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() { //===--- Dwarf Emission Directives -----------------------------------===// HasLEB128 = true; SupportsDebugInformation = true; - ExceptionsType = ExceptionHandling::None; - DwarfUsesInlineInfoSection = false; - DwarfSectionOffsetDirective = ".offset"; - } const char* diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp index 072ee49..45d009c 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp @@ -88,7 +88,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT, MCCodeEmitter *_Emitter, bool RelaxAll, bool NoExecStack) { - return createPureStreamer(Ctx, MAB, _OS, _Emitter); + return createELFStreamer(Ctx, MAB, _OS, _Emitter, false, false); } extern "C" void LLVMInitializeR600TargetMC() { diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h index 363a4af..09d0d5b 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h @@ -23,9 +23,11 @@ class MCAsmBackend; class MCCodeEmitter; class MCContext; class MCInstrInfo; +class MCObjectWriter; class MCRegisterInfo; class MCSubtargetInfo; class Target; +class raw_ostream; extern Target TheAMDGPUTarget; @@ -41,6 +43,8 @@ MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII, MCAsmBackend *createAMDGPUAsmBackend(const Target &T, StringRef TT, StringRef CPU); + +MCObjectWriter *createAMDGPUELFObjectWriter(raw_ostream &OS); } // End llvm namespace #define GET_REGINFO_ENUM diff --git a/lib/Target/R600/MCTargetDesc/CMakeLists.txt b/lib/Target/R600/MCTargetDesc/CMakeLists.txt index 37e714c..3ccdf42 100644 --- a/lib/Target/R600/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/R600/MCTargetDesc/CMakeLists.txt @@ -1,6 +1,7 @@ add_llvm_library(LLVMR600Desc AMDGPUAsmBackend.cpp + AMDGPUELFObjectWriter.cpp AMDGPUMCTargetDesc.cpp AMDGPUMCAsmInfo.cpp R600MCCodeEmitter.cpp diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp index d207160..7c83d86 100644 --- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp +++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp @@ -66,8 +66,6 @@ private: void EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, unsigned SelOpIdx, raw_ostream &OS) const; void EmitDst(const MCInst &MI, raw_ostream &OS) const; - void EmitTexInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups, - raw_ostream &OS) const; void EmitFCInstr(const MCInst &MI, raw_ostream &OS) const; void EmitNullBytes(unsigned int byteCount, raw_ostream &OS) const; @@ -103,7 +101,8 @@ enum InstrTypes { INSTR_FC, INSTR_NATIVE, INSTR_VTX, - INSTR_EXPORT + INSTR_EXPORT, + INSTR_CFALU }; enum FCInstr { @@ -140,11 +139,11 @@ MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII, void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const { - if (isTexOp(MI.getOpcode())) { - EmitTexInstr(MI, Fixups, OS); - } else if (isFCOp(MI.getOpcode())){ + if (isFCOp(MI.getOpcode())){ EmitFCInstr(MI, OS); } else if (MI.getOpcode() == AMDGPU::RETURN || + MI.getOpcode() == AMDGPU::FETCH_CLAUSE || + MI.getOpcode() == AMDGPU::ALU_CLAUSE || MI.getOpcode() == AMDGPU::BUNDLE || MI.getOpcode() == AMDGPU::KILL) { return; @@ -169,24 +168,136 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS, case AMDGPU::TEX_VTX_TEXBUF : { uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups); uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset + InstWord2 |= 1 << 19; - EmitByte(INSTR_VTX, OS); + EmitByte(INSTR_NATIVE, OS); Emit(InstWord01, OS); + EmitByte(INSTR_NATIVE, OS); Emit(InstWord2, OS); + Emit((u_int32_t) 0, OS); + break; + } + case AMDGPU::TEX_LD: + case AMDGPU::TEX_GET_TEXTURE_RESINFO: + case AMDGPU::TEX_SAMPLE: + case AMDGPU::TEX_SAMPLE_C: + case AMDGPU::TEX_SAMPLE_L: + case AMDGPU::TEX_SAMPLE_C_L: + case AMDGPU::TEX_SAMPLE_LB: + case AMDGPU::TEX_SAMPLE_C_LB: + case AMDGPU::TEX_SAMPLE_G: + case AMDGPU::TEX_SAMPLE_C_G: + case AMDGPU::TEX_GET_GRADIENTS_H: + case AMDGPU::TEX_GET_GRADIENTS_V: + case AMDGPU::TEX_SET_GRADIENTS_H: + case AMDGPU::TEX_SET_GRADIENTS_V: { + unsigned Opcode = MI.getOpcode(); + bool HasOffsets = (Opcode == AMDGPU::TEX_LD); + unsigned OpOffset = HasOffsets ? 3 : 0; + int64_t Sampler = MI.getOperand(OpOffset + 3).getImm(); + int64_t TextureType = MI.getOperand(OpOffset + 4).getImm(); + + uint32_t SrcSelect[4] = {0, 1, 2, 3}; + uint32_t Offsets[3] = {0, 0, 0}; + uint64_t CoordType[4] = {1, 1, 1, 1}; + + if (HasOffsets) + for (unsigned i = 0; i < 3; i++) { + int SignedOffset = MI.getOperand(i + 2).getImm(); + Offsets[i] = (SignedOffset & 0x1F); + } + + + if (TextureType == TEXTURE_RECT || + TextureType == TEXTURE_SHADOWRECT) { + CoordType[ELEMENT_X] = 0; + CoordType[ELEMENT_Y] = 0; + } + + if (TextureType == TEXTURE_1D_ARRAY || + TextureType == TEXTURE_SHADOW1D_ARRAY) { + if (Opcode == AMDGPU::TEX_SAMPLE_C_L || + Opcode == AMDGPU::TEX_SAMPLE_C_LB) { + CoordType[ELEMENT_Y] = 0; + } else { + CoordType[ELEMENT_Z] = 0; + SrcSelect[ELEMENT_Z] = ELEMENT_Y; + } + } else if (TextureType == TEXTURE_2D_ARRAY || + TextureType == TEXTURE_SHADOW2D_ARRAY) { + CoordType[ELEMENT_Z] = 0; + } + + + if ((TextureType == TEXTURE_SHADOW1D || + TextureType == TEXTURE_SHADOW2D || + TextureType == TEXTURE_SHADOWRECT || + TextureType == TEXTURE_SHADOW1D_ARRAY) && + Opcode != AMDGPU::TEX_SAMPLE_C_L && + Opcode != AMDGPU::TEX_SAMPLE_C_LB) { + SrcSelect[ELEMENT_W] = ELEMENT_Z; + } + + uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) | + CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 | + CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63; + uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 | + SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 | + SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 | + Offsets[2] << 10; + + EmitByte(INSTR_NATIVE, OS); + Emit(Word01, OS); + EmitByte(INSTR_NATIVE, OS); + Emit(Word2, OS); + Emit((u_int32_t) 0, OS); break; } + case AMDGPU::CF_ALU: + case AMDGPU::CF_ALU_PUSH_BEFORE: { + uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); + EmitByte(INSTR_NATIVE, OS); + Emit(Inst, OS); + break; + } + case AMDGPU::CF_CALL_FS_EG: + case AMDGPU::CF_CALL_FS_R600: + return; + case AMDGPU::CF_TC_EG: + case AMDGPU::CF_VC_EG: + case AMDGPU::CF_TC_R600: + case AMDGPU::CF_VC_R600: + case AMDGPU::WHILE_LOOP_EG: + case AMDGPU::END_LOOP_EG: + case AMDGPU::LOOP_BREAK_EG: + case AMDGPU::CF_CONTINUE_EG: + case AMDGPU::CF_JUMP_EG: + case AMDGPU::CF_ELSE_EG: + case AMDGPU::POP_EG: + case AMDGPU::WHILE_LOOP_R600: + case AMDGPU::END_LOOP_R600: + case AMDGPU::LOOP_BREAK_R600: + case AMDGPU::CF_CONTINUE_R600: + case AMDGPU::CF_JUMP_R600: + case AMDGPU::CF_ELSE_R600: + case AMDGPU::POP_R600: case AMDGPU::EG_ExportSwz: case AMDGPU::R600_ExportSwz: case AMDGPU::EG_ExportBuf: - case AMDGPU::R600_ExportBuf: { + case AMDGPU::R600_ExportBuf: + case AMDGPU::PAD: + case AMDGPU::CF_END_R600: + case AMDGPU::CF_END_EG: + case AMDGPU::CF_END_CM: { uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); - EmitByte(INSTR_EXPORT, OS); + EmitByte(INSTR_NATIVE, OS); Emit(Inst, OS); break; } - default: - EmitALUInstr(MI, Fixups, OS); + uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); + EmitByte(INSTR_NATIVE, OS); + Emit(Inst, OS); break; } } @@ -320,7 +431,7 @@ void R600MCCodeEmitter::EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, } if (Reg == AMDGPU::ALU_LITERAL_X) { - unsigned ImmOpIndex = MI.getNumOperands() - 1; + unsigned ImmOpIndex = MI.getNumOperands() - 2; MCOperand ImmOp = MI.getOperand(ImmOpIndex); if (ImmOp.isFPImm()) { InlineConstant.f = ImmOp.getFPImm(); @@ -334,99 +445,6 @@ void R600MCCodeEmitter::EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, Emit(InlineConstant.i, OS); } -void R600MCCodeEmitter::EmitTexInstr(const MCInst &MI, - SmallVectorImpl<MCFixup> &Fixups, - raw_ostream &OS) const { - - unsigned Opcode = MI.getOpcode(); - bool hasOffsets = (Opcode == AMDGPU::TEX_LD); - unsigned OpOffset = hasOffsets ? 3 : 0; - int64_t Resource = MI.getOperand(OpOffset + 2).getImm(); - int64_t Sampler = MI.getOperand(OpOffset + 3).getImm(); - int64_t TextureType = MI.getOperand(OpOffset + 4).getImm(); - unsigned srcSelect[4] = {0, 1, 2, 3}; - - // Emit instruction type - EmitByte(1, OS); - - // Emit instruction - EmitByte(getBinaryCodeForInstr(MI, Fixups), OS); - - // Emit resource id - EmitByte(Resource, OS); - - // Emit source register - EmitByte(getHWReg(MI.getOperand(1).getReg()), OS); - - // XXX: Emit src isRelativeAddress - EmitByte(0, OS); - - // Emit destination register - EmitByte(getHWReg(MI.getOperand(0).getReg()), OS); - - // XXX: Emit dst isRealtiveAddress - EmitByte(0, OS); - - // XXX: Emit dst select - EmitByte(0, OS); // X - EmitByte(1, OS); // Y - EmitByte(2, OS); // Z - EmitByte(3, OS); // W - - // XXX: Emit lod bias - EmitByte(0, OS); - - // XXX: Emit coord types - unsigned coordType[4] = {1, 1, 1, 1}; - - if (TextureType == TEXTURE_RECT - || TextureType == TEXTURE_SHADOWRECT) { - coordType[ELEMENT_X] = 0; - coordType[ELEMENT_Y] = 0; - } - - if (TextureType == TEXTURE_1D_ARRAY - || TextureType == TEXTURE_SHADOW1D_ARRAY) { - if (Opcode == AMDGPU::TEX_SAMPLE_C_L || Opcode == AMDGPU::TEX_SAMPLE_C_LB) { - coordType[ELEMENT_Y] = 0; - } else { - coordType[ELEMENT_Z] = 0; - srcSelect[ELEMENT_Z] = ELEMENT_Y; - } - } else if (TextureType == TEXTURE_2D_ARRAY - || TextureType == TEXTURE_SHADOW2D_ARRAY) { - coordType[ELEMENT_Z] = 0; - } - - for (unsigned i = 0; i < 4; i++) { - EmitByte(coordType[i], OS); - } - - // XXX: Emit offsets - if (hasOffsets) - for (unsigned i = 2; i < 5; i++) - EmitByte(MI.getOperand(i).getImm()<<1, OS); - else - EmitNullBytes(3, OS); - - // Emit sampler id - EmitByte(Sampler, OS); - - // XXX:Emit source select - if ((TextureType == TEXTURE_SHADOW1D - || TextureType == TEXTURE_SHADOW2D - || TextureType == TEXTURE_SHADOWRECT - || TextureType == TEXTURE_SHADOW1D_ARRAY) - && Opcode != AMDGPU::TEX_SAMPLE_C_L - && Opcode != AMDGPU::TEX_SAMPLE_C_LB) { - srcSelect[ELEMENT_W] = ELEMENT_Z; - } - - for (unsigned i = 0; i < 4; i++) { - EmitByte(srcSelect[i], OS); - } -} - void R600MCCodeEmitter::EmitFCInstr(const MCInst &MI, raw_ostream &OS) const { // Emit instruction type diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp index e27abcc..5af8320 100644 --- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp +++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp @@ -39,8 +39,6 @@ class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { void operator=(const SIMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; const MCRegisterInfo &MRI; - const MCSubtargetInfo &STI; - MCContext &Ctx; /// \brief Can this operand also contain immediate values? bool isSrcOperand(const MCInstrDesc &Desc, unsigned OpNo) const; @@ -51,7 +49,7 @@ class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { public: SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, const MCSubtargetInfo &sti, MCContext &ctx) - : MCII(mcii), MRI(mri), STI(sti), Ctx(ctx) { } + : MCII(mcii), MRI(mri) { } ~SIMCCodeEmitter() { } diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td index 868810c..e024e66 100644 --- a/lib/Target/R600/Processors.td +++ b/lib/Target/R600/Processors.td @@ -13,18 +13,39 @@ class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features> : Processor<Name, itin, Features>; -def : Proc<"", R600_EG_Itin, [FeatureR600ALUInst]>; -def : Proc<"r600", R600_EG_Itin, [FeatureR600ALUInst]>; -def : Proc<"rv710", R600_EG_Itin, []>; -def : Proc<"rv730", R600_EG_Itin, []>; -def : Proc<"rv770", R600_EG_Itin, [FeatureFP64]>; -def : Proc<"cedar", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"redwood", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"juniper", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"cypress", R600_EG_Itin, [FeatureByteAddress, FeatureImages, FeatureFP64]>; -def : Proc<"barts", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"turks", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"caicos", R600_EG_Itin, [FeatureByteAddress, FeatureImages]>; -def : Proc<"cayman", R600_EG_Itin, [FeatureByteAddress, FeatureImages, FeatureFP64]>; -def : Proc<"SI", SI_Itin, [Feature64BitPtr]>; - +def : Proc<"", R600_VLIW5_Itin, + [FeatureR600ALUInst, FeatureVertexCache]>; +def : Proc<"r600", R600_VLIW5_Itin, + [FeatureR600ALUInst , FeatureVertexCache]>; +def : Proc<"rs880", R600_VLIW5_Itin, + [FeatureR600ALUInst]>; +def : Proc<"rv670", R600_VLIW5_Itin, + [FeatureR600ALUInst, FeatureFP64, FeatureVertexCache]>; +def : Proc<"rv710", R600_VLIW5_Itin, + [FeatureVertexCache]>; +def : Proc<"rv730", R600_VLIW5_Itin, + [FeatureVertexCache]>; +def : Proc<"rv770", R600_VLIW5_Itin, + [FeatureFP64, FeatureVertexCache]>; +def : Proc<"cedar", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; +def : Proc<"redwood", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; +def : Proc<"sumo", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages]>; +def : Proc<"juniper", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; +def : Proc<"cypress", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureFP64, FeatureVertexCache]>; +def : Proc<"barts", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; +def : Proc<"turks", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; +def : Proc<"caicos", R600_VLIW5_Itin, + [FeatureByteAddress, FeatureImages]>; +def : Proc<"cayman", R600_VLIW4_Itin, + [FeatureByteAddress, FeatureImages, FeatureFP64]>;def : Proc<"SI", SI_Itin, [Feature64BitPtr, FeatureFP64]>; +def : Proc<"tahiti", SI_Itin, [Feature64BitPtr, FeatureFP64]>; +def : Proc<"pitcairn", SI_Itin, [Feature64BitPtr, FeatureFP64]>; +def : Proc<"verde", SI_Itin, [Feature64BitPtr, FeatureFP64]>; +def : Proc<"oland", SI_Itin, [Feature64BitPtr, FeatureFP64]>; diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp new file mode 100644 index 0000000..0995795 --- /dev/null +++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp @@ -0,0 +1,496 @@ +//===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This pass compute turns all control flow pseudo instructions into native one +/// computing their address on the fly ; it also sets STACK_SIZE info. +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "r600cf" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include "AMDGPU.h" +#include "R600Defines.h" +#include "R600InstrInfo.h" +#include "R600MachineFunctionInfo.h" +#include "R600RegisterInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +namespace llvm { + +class R600ControlFlowFinalizer : public MachineFunctionPass { + +private: + typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile; + + enum ControlFlowInstruction { + CF_TC, + CF_VC, + CF_CALL_FS, + CF_WHILE_LOOP, + CF_END_LOOP, + CF_LOOP_BREAK, + CF_LOOP_CONTINUE, + CF_JUMP, + CF_ELSE, + CF_POP, + CF_END + }; + + static char ID; + const R600InstrInfo *TII; + const R600RegisterInfo &TRI; + unsigned MaxFetchInst; + const AMDGPUSubtarget &ST; + + bool IsTrivialInst(MachineInstr *MI) const { + switch (MI->getOpcode()) { + case AMDGPU::KILL: + case AMDGPU::RETURN: + return true; + default: + return false; + } + } + + const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const { + unsigned Opcode = 0; + bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX); + switch (CFI) { + case CF_TC: + Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600; + break; + case CF_VC: + Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600; + break; + case CF_CALL_FS: + Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600; + break; + case CF_WHILE_LOOP: + Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600; + break; + case CF_END_LOOP: + Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600; + break; + case CF_LOOP_BREAK: + Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600; + break; + case CF_LOOP_CONTINUE: + Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600; + break; + case CF_JUMP: + Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600; + break; + case CF_ELSE: + Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600; + break; + case CF_POP: + Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600; + break; + case CF_END: + if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) { + Opcode = AMDGPU::CF_END_CM; + break; + } + Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600; + break; + } + assert (Opcode && "No opcode selected"); + return TII->get(Opcode); + } + + bool isCompatibleWithClause(const MachineInstr *MI, + std::set<unsigned> &DstRegs, std::set<unsigned> &SrcRegs) const { + unsigned DstMI, SrcMI; + for (MachineInstr::const_mop_iterator I = MI->operands_begin(), + E = MI->operands_end(); I != E; ++I) { + const MachineOperand &MO = *I; + if (!MO.isReg()) + continue; + if (MO.isDef()) + DstMI = MO.getReg(); + if (MO.isUse()) { + unsigned Reg = MO.getReg(); + if (AMDGPU::R600_Reg128RegClass.contains(Reg)) + SrcMI = Reg; + else + SrcMI = TRI.getMatchingSuperReg(Reg, + TRI.getSubRegFromChannel(TRI.getHWRegChan(Reg)), + &AMDGPU::R600_Reg128RegClass); + } + } + if ((DstRegs.find(SrcMI) == DstRegs.end()) && + (SrcRegs.find(DstMI) == SrcRegs.end())) { + SrcRegs.insert(SrcMI); + DstRegs.insert(DstMI); + return true; + } else + return false; + } + + ClauseFile + MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I) + const { + MachineBasicBlock::iterator ClauseHead = I; + std::vector<MachineInstr *> ClauseContent; + unsigned AluInstCount = 0; + bool IsTex = TII->usesTextureCache(ClauseHead); + std::set<unsigned> DstRegs, SrcRegs; + for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) { + if (IsTrivialInst(I)) + continue; + if (AluInstCount > MaxFetchInst) + break; + if ((IsTex && !TII->usesTextureCache(I)) || + (!IsTex && !TII->usesVertexCache(I))) + break; + if (!isCompatibleWithClause(I, DstRegs, SrcRegs)) + break; + AluInstCount ++; + ClauseContent.push_back(I); + } + MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), + getHWInstrDesc(IsTex?CF_TC:CF_VC)) + .addImm(0) // ADDR + .addImm(AluInstCount - 1); // COUNT + return ClauseFile(MIb, ClauseContent); + } + + void getLiteral(MachineInstr *MI, std::vector<unsigned> &Lits) const { + unsigned LiteralRegs[] = { + AMDGPU::ALU_LITERAL_X, + AMDGPU::ALU_LITERAL_Y, + AMDGPU::ALU_LITERAL_Z, + AMDGPU::ALU_LITERAL_W + }; + for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg()) + continue; + if (MO.getReg() != AMDGPU::ALU_LITERAL_X) + continue; + unsigned ImmIdx = TII->getOperandIdx(MI->getOpcode(), R600Operands::IMM); + int64_t Imm = MI->getOperand(ImmIdx).getImm(); + std::vector<unsigned>::iterator It = + std::find(Lits.begin(), Lits.end(), Imm); + if (It != Lits.end()) { + unsigned Index = It - Lits.begin(); + MO.setReg(LiteralRegs[Index]); + } else { + assert(Lits.size() < 4 && "Too many literals in Instruction Group"); + MO.setReg(LiteralRegs[Lits.size()]); + Lits.push_back(Imm); + } + } + } + + MachineBasicBlock::iterator insertLiterals( + MachineBasicBlock::iterator InsertPos, + const std::vector<unsigned> &Literals) const { + MachineBasicBlock *MBB = InsertPos->getParent(); + for (unsigned i = 0, e = Literals.size(); i < e; i+=2) { + unsigned LiteralPair0 = Literals[i]; + unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0; + InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(), + TII->get(AMDGPU::LITERALS)) + .addImm(LiteralPair0) + .addImm(LiteralPair1); + } + return InsertPos; + } + + ClauseFile + MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I) + const { + MachineBasicBlock::iterator ClauseHead = I; + std::vector<MachineInstr *> ClauseContent; + I++; + for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) { + if (IsTrivialInst(I)) { + ++I; + continue; + } + if (!I->isBundle() && !TII->isALUInstr(I->getOpcode())) + break; + std::vector<unsigned> Literals; + if (I->isBundle()) { + MachineInstr *DeleteMI = I; + MachineBasicBlock::instr_iterator BI = I.getInstrIterator(); + while (++BI != E && BI->isBundledWithPred()) { + BI->unbundleFromPred(); + for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = BI->getOperand(i); + if (MO.isReg() && MO.isInternalRead()) + MO.setIsInternalRead(false); + } + getLiteral(BI, Literals); + ClauseContent.push_back(BI); + } + I = BI; + DeleteMI->eraseFromParent(); + } else { + getLiteral(I, Literals); + ClauseContent.push_back(I); + I++; + } + for (unsigned i = 0, e = Literals.size(); i < e; i+=2) { + unsigned literal0 = Literals[i]; + unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0; + MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(), + TII->get(AMDGPU::LITERALS)) + .addImm(literal0) + .addImm(literal2); + ClauseContent.push_back(MILit); + } + } + ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1); + return ClauseFile(ClauseHead, ClauseContent); + } + + void + EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause, + unsigned &CfCount) { + CounterPropagateAddr(Clause.first, CfCount); + MachineBasicBlock *BB = Clause.first->getParent(); + BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE)) + .addImm(CfCount); + for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) { + BB->splice(InsertPos, BB, Clause.second[i]); + } + CfCount += 2 * Clause.second.size(); + } + + void + EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause, + unsigned &CfCount) { + CounterPropagateAddr(Clause.first, CfCount); + MachineBasicBlock *BB = Clause.first->getParent(); + BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE)) + .addImm(CfCount); + for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) { + BB->splice(InsertPos, BB, Clause.second[i]); + } + CfCount += Clause.second.size(); + } + + void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const { + MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm()); + } + void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr) + const { + for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end(); + It != E; ++It) { + MachineInstr *MI = *It; + CounterPropagateAddr(MI, Addr); + } + } + + unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const { + switch (ST.device()->getGeneration()) { + case AMDGPUDeviceInfo::HD4XXX: + if (hasPush) + StackSubEntry += 2; + break; + case AMDGPUDeviceInfo::HD5XXX: + if (hasPush) + StackSubEntry ++; + case AMDGPUDeviceInfo::HD6XXX: + StackSubEntry += 2; + break; + } + return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4 + } + +public: + R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID), + TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())), + TRI(TII->getRegisterInfo()), + ST(tm.getSubtarget<AMDGPUSubtarget>()) { + const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>(); + if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX) + MaxFetchInst = 8; + else + MaxFetchInst = 16; + } + + virtual bool runOnMachineFunction(MachineFunction &MF) { + unsigned MaxStack = 0; + unsigned CurrentStack = 0; + bool hasPush; + for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME; + ++MB) { + MachineBasicBlock &MBB = *MB; + unsigned CfCount = 0; + std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack; + std::vector<MachineInstr * > IfThenElseStack; + R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); + if (MFI->ShaderType == 1) { + BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), + getHWInstrDesc(CF_CALL_FS)); + CfCount++; + } + std::vector<ClauseFile> FetchClauses, AluClauses; + for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); + I != E;) { + if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) { + DEBUG(dbgs() << CfCount << ":"; I->dump();); + FetchClauses.push_back(MakeFetchClause(MBB, I)); + CfCount++; + continue; + } + + MachineBasicBlock::iterator MI = I; + I++; + switch (MI->getOpcode()) { + case AMDGPU::CF_ALU_PUSH_BEFORE: + CurrentStack++; + MaxStack = std::max(MaxStack, CurrentStack); + hasPush = true; + case AMDGPU::CF_ALU: + I = MI; + AluClauses.push_back(MakeALUClause(MBB, I)); + case AMDGPU::EG_ExportBuf: + case AMDGPU::EG_ExportSwz: + case AMDGPU::R600_ExportBuf: + case AMDGPU::R600_ExportSwz: + case AMDGPU::RAT_WRITE_CACHELESS_32_eg: + case AMDGPU::RAT_WRITE_CACHELESS_128_eg: + DEBUG(dbgs() << CfCount << ":"; MI->dump();); + CfCount++; + break; + case AMDGPU::WHILELOOP: { + CurrentStack+=4; + MaxStack = std::max(MaxStack, CurrentStack); + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_WHILE_LOOP)) + .addImm(1); + std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount, + std::set<MachineInstr *>()); + Pair.second.insert(MIb); + LoopStack.push_back(Pair); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::ENDLOOP: { + CurrentStack-=4; + std::pair<unsigned, std::set<MachineInstr *> > Pair = + LoopStack.back(); + LoopStack.pop_back(); + CounterPropagateAddr(Pair.second, CfCount); + BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP)) + .addImm(Pair.first + 1); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::IF_PREDICATE_SET: { + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_JUMP)) + .addImm(0) + .addImm(0); + IfThenElseStack.push_back(MIb); + DEBUG(dbgs() << CfCount << ":"; MIb->dump();); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::ELSE: { + MachineInstr * JumpInst = IfThenElseStack.back(); + IfThenElseStack.pop_back(); + CounterPropagateAddr(JumpInst, CfCount); + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_ELSE)) + .addImm(0) + .addImm(1); + DEBUG(dbgs() << CfCount << ":"; MIb->dump();); + IfThenElseStack.push_back(MIb); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::ENDIF: { + CurrentStack--; + MachineInstr *IfOrElseInst = IfThenElseStack.back(); + IfThenElseStack.pop_back(); + CounterPropagateAddr(IfOrElseInst, CfCount + 1); + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_POP)) + .addImm(CfCount + 1) + .addImm(1); + (void)MIb; + DEBUG(dbgs() << CfCount << ":"; MIb->dump();); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::PREDICATED_BREAK: { + CurrentStack--; + CfCount += 3; + BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP)) + .addImm(CfCount) + .addImm(1); + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_LOOP_BREAK)) + .addImm(0); + BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP)) + .addImm(CfCount) + .addImm(1); + LoopStack.back().second.insert(MIb); + MI->eraseFromParent(); + break; + } + case AMDGPU::CONTINUE: { + MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), + getHWInstrDesc(CF_LOOP_CONTINUE)) + .addImm(0); + LoopStack.back().second.insert(MIb); + MI->eraseFromParent(); + CfCount++; + break; + } + case AMDGPU::RETURN: { + BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END)); + CfCount++; + MI->eraseFromParent(); + if (CfCount % 2) { + BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD)); + CfCount++; + } + for (unsigned i = 0, e = FetchClauses.size(); i < e; i++) + EmitFetchClause(I, FetchClauses[i], CfCount); + for (unsigned i = 0, e = AluClauses.size(); i < e; i++) + EmitALUClause(I, AluClauses[i], CfCount); + } + default: + break; + } + } + MFI->StackSize = getHWStackSize(MaxStack, hasPush); + } + + return false; + } + + const char *getPassName() const { + return "R600 Control Flow Finalizer Pass"; + } +}; + +char R600ControlFlowFinalizer::ID = 0; + +} + + +llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) { + return new R600ControlFlowFinalizer(TM); +} diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h index 16cfcf5..303ca73 100644 --- a/lib/Target/R600/R600Defines.h +++ b/lib/Target/R600/R600Defines.h @@ -39,7 +39,9 @@ namespace R600_InstFlag { //FlagOperand bits 7, 8 NATIVE_OPERANDS = (1 << 9), OP1 = (1 << 10), - OP2 = (1 << 11) + OP2 = (1 << 11), + VTX_INST = (1 << 12), + TEX_INST = (1 << 13) }; } @@ -78,6 +80,7 @@ namespace R600Operands { LAST, PRED_SEL, IMM, + BANK_SWIZZLE, COUNT }; @@ -85,11 +88,11 @@ namespace R600Operands { // W C S S S S S S S S S S S // R O D L S R R R R S R R R R S R R R L P // D U I M R A R C C C C R C C C C R C C C A R I -// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M -// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M - {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12}, - {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19}, - {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17} +// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M B +// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M S + {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13}, + {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20}, + {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18} }; } diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp new file mode 100644 index 0000000..3fdc678 --- /dev/null +++ b/lib/Target/R600/R600EmitClauseMarkers.cpp @@ -0,0 +1,255 @@ +//===-- R600EmitClauseMarkers.cpp - Emit CF_ALU ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// Add CF_ALU. R600 Alu instructions are grouped in clause which can hold +/// 128 Alu instructions ; these instructions can access up to 4 prefetched +/// 4 lines of 16 registers from constant buffers. Such ALU clauses are +/// initiated by CF_ALU instructions. +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "R600Defines.h" +#include "R600InstrInfo.h" +#include "R600MachineFunctionInfo.h" +#include "R600RegisterInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +namespace llvm { + +class R600EmitClauseMarkersPass : public MachineFunctionPass { + +private: + static char ID; + const R600InstrInfo *TII; + + unsigned OccupiedDwords(MachineInstr *MI) const { + switch (MI->getOpcode()) { + case AMDGPU::INTERP_PAIR_XY: + case AMDGPU::INTERP_PAIR_ZW: + case AMDGPU::INTERP_VEC_LOAD: + case AMDGPU::DOT4_eg_pseudo: + case AMDGPU::DOT4_r600_pseudo: + return 4; + case AMDGPU::KILL: + return 0; + default: + break; + } + + if(TII->isVector(*MI) || + TII->isCubeOp(MI->getOpcode()) || + TII->isReductionOp(MI->getOpcode())) + return 4; + + unsigned NumLiteral = 0; + for (MachineInstr::mop_iterator It = MI->operands_begin(), + E = MI->operands_end(); It != E; ++It) { + MachineOperand &MO = *It; + if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) + ++NumLiteral; + } + return 1 + NumLiteral; + } + + bool isALU(const MachineInstr *MI) const { + if (TII->isALUInstr(MI->getOpcode())) + return true; + if (TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode())) + return true; + switch (MI->getOpcode()) { + case AMDGPU::PRED_X: + case AMDGPU::INTERP_PAIR_XY: + case AMDGPU::INTERP_PAIR_ZW: + case AMDGPU::INTERP_VEC_LOAD: + case AMDGPU::COPY: + case AMDGPU::DOT4_eg_pseudo: + case AMDGPU::DOT4_r600_pseudo: + return true; + default: + return false; + } + } + + bool IsTrivialInst(MachineInstr *MI) const { + switch (MI->getOpcode()) { + case AMDGPU::KILL: + case AMDGPU::RETURN: + return true; + default: + return false; + } + } + + // Register Idx, then Const value + std::vector<std::pair<unsigned, unsigned> > ExtractConstRead(MachineInstr *MI) + const { + const R600Operands::Ops OpTable[3][2] = { + {R600Operands::SRC0, R600Operands::SRC0_SEL}, + {R600Operands::SRC1, R600Operands::SRC1_SEL}, + {R600Operands::SRC2, R600Operands::SRC2_SEL}, + }; + std::vector<std::pair<unsigned, unsigned> > Result; + + if (!TII->isALUInstr(MI->getOpcode())) + return Result; + for (unsigned j = 0; j < 3; j++) { + int SrcIdx = TII->getOperandIdx(MI->getOpcode(), OpTable[j][0]); + if (SrcIdx < 0) + break; + if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) { + unsigned Const = MI->getOperand( + TII->getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); + Result.push_back(std::pair<unsigned, unsigned>(SrcIdx, Const)); + } + } + return Result; + } + + std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const { + // Sel is (512 + (kc_bank << 12) + ConstIndex) << 2 + // (See also R600ISelLowering.cpp) + // ConstIndex value is in [0, 4095]; + return std::pair<unsigned, unsigned>( + ((Sel >> 2) - 512) >> 12, // KC_BANK + // Line Number of ConstIndex + // A line contains 16 constant registers however KCX bank can lock + // two line at the same time ; thus we want to get an even line number. + // Line number can be retrieved with (>>4), using (>>5) <<1 generates + // an even number. + ((((Sel >> 2) - 512) & 4095) >> 5) << 1); + } + + bool SubstituteKCacheBank(MachineInstr *MI, + std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const { + std::vector<std::pair<unsigned, unsigned> > UsedKCache; + std::vector<std::pair<unsigned, unsigned> > Consts = ExtractConstRead(MI); + assert(TII->isALUInstr(MI->getOpcode()) && "Can't assign Const"); + for (unsigned i = 0, n = Consts.size(); i < n; ++i) { + unsigned Sel = Consts[i].second; + unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31; + unsigned KCacheIndex = Index * 4 + Chan; + const std::pair<unsigned, unsigned> &BankLine = getAccessedBankLine(Sel); + if (CachedConsts.empty()) { + CachedConsts.push_back(BankLine); + UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex)); + continue; + } + if (CachedConsts[0] == BankLine) { + UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex)); + continue; + } + if (CachedConsts.size() == 1) { + CachedConsts.push_back(BankLine); + UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex)); + continue; + } + if (CachedConsts[1] == BankLine) { + UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex)); + continue; + } + return false; + } + + for (unsigned i = 0, n = Consts.size(); i < n; ++i) { + switch(UsedKCache[i].first) { + case 0: + MI->getOperand(Consts[i].first).setReg( + AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[i].second)); + break; + case 1: + MI->getOperand(Consts[i].first).setReg( + AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[i].second)); + break; + default: + llvm_unreachable("Wrong Cache Line"); + } + } + return true; + } + + MachineBasicBlock::iterator + MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { + MachineBasicBlock::iterator ClauseHead = I; + std::vector<std::pair<unsigned, unsigned> > KCacheBanks; + bool PushBeforeModifier = false; + unsigned AluInstCount = 0; + for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) { + if (IsTrivialInst(I)) + continue; + if (!isALU(I)) + break; + if (AluInstCount > TII->getMaxAlusPerClause()) + break; + if (I->getOpcode() == AMDGPU::PRED_X) { + if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH) + PushBeforeModifier = true; + AluInstCount ++; + continue; + } + if (I->getOpcode() == AMDGPU::KILLGT) { + I++; + break; + } + if (TII->isALUInstr(I->getOpcode()) && + !SubstituteKCacheBank(I, KCacheBanks)) + break; + AluInstCount += OccupiedDwords(I); + } + unsigned Opcode = PushBeforeModifier ? + AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU; + BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode)) + .addImm(0) // ADDR + .addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0 + .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1 + .addImm(KCacheBanks.empty()?0:2) // KM0 + .addImm((KCacheBanks.size() < 2)?0:2) // KM1 + .addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0 + .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1 + .addImm(AluInstCount); // COUNT + return I; + } + +public: + R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID), + TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { } + + virtual bool runOnMachineFunction(MachineFunction &MF) { + for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); + BB != BB_E; ++BB) { + MachineBasicBlock &MBB = *BB; + MachineBasicBlock::iterator I = MBB.begin(); + if (I->getOpcode() == AMDGPU::CF_ALU) + continue; // BB was already parsed + for (MachineBasicBlock::iterator E = MBB.end(); I != E;) { + if (isALU(I)) + I = MakeALUClause(MBB, I); + else + ++I; + } + } + return false; + } + + const char *getPassName() const { + return "R600 Emit Clause Markers Pass"; + } +}; + +char R600EmitClauseMarkersPass::ID = 0; + +} + + +llvm::FunctionPass *llvm::createR600EmitClauseMarkers(TargetMachine &TM) { + return new R600EmitClauseMarkersPass(TM); +} + diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index a73691d..a66baca 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -28,7 +28,6 @@ using namespace llvm; R600TargetLowering::R600TargetLowering(TargetMachine &TM) : AMDGPUTargetLowering(TM), TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) { - setOperationAction(ISD::MUL, MVT::i64, Expand); addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); @@ -58,7 +57,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom); - setOperationAction(ISD::FPOW, MVT::f32, Custom); setOperationAction(ISD::ROTL, MVT::i32, Custom); @@ -95,6 +93,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setTargetDAGCombine(ISD::SELECT_CC); setBooleanContents(ZeroOrNegativeOneBooleanContent); + setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); setSchedulingPreference(Sched::VLIW); } @@ -316,7 +315,6 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::LOAD: return LowerLOAD(Op, DAG); - case ISD::FPOW: return LowerFPOW(Op, DAG); case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); case ISD::INTRINSIC_VOID: { SDValue Chain = Op.getOperand(0); @@ -918,15 +916,6 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const return DAG.getMergeValues(Ops, 2, DL); } -SDValue R600TargetLowering::LowerFPOW(SDValue Op, - SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); - EVT VT = Op.getValueType(); - SDValue LogBase = DAG.getNode(ISD::FLOG2, DL, VT, Op.getOperand(0)); - SDValue MulLogBase = DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), LogBase); - return DAG.getNode(ISD::FEXP2, DL, VT, MulLogBase); -} - /// XXX Only kernel functions are supported, so we can assume for now that /// every function is a kernel function, but in the future we should use /// separate calling conventions for kernel and non-kernel functions. diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h index 5cb4b91..2c09acb 100644 --- a/lib/Target/R600/R600ISelLowering.h +++ b/lib/Target/R600/R600ISelLowering.h @@ -59,7 +59,6 @@ private: SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerFPOW(SDValue Op, SelectionDAG &DAG) const; SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp index 0865098..8fd8385 100644 --- a/lib/Target/R600/R600InstrInfo.cpp +++ b/lib/Target/R600/R600InstrInfo.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "R600InstrInfo.h" +#include "AMDGPU.h" #include "AMDGPUSubtarget.h" #include "AMDGPUTargetMachine.h" #include "R600Defines.h" @@ -29,7 +30,8 @@ using namespace llvm; R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm) : AMDGPUInstrInfo(tm), - RI(tm, *this) + RI(tm, *this), + ST(tm.getSubtarget<AMDGPUSubtarget>()) { } const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { @@ -139,6 +141,34 @@ bool R600InstrInfo::isALUInstr(unsigned Opcode) const { (TargetFlags & R600_InstFlag::OP3)); } +bool R600InstrInfo::isTransOnly(unsigned Opcode) const { + return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY); +} + +bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { + return isTransOnly(MI->getOpcode()); +} + +bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { + return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST; +} + +bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { + const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>(); + return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode()); +} + +bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { + return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) || + (get(Opcode).TSFlags & R600_InstFlag::TEX_INST); +} + +bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { + const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>(); + return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) || + usesTextureCache(MI->getOpcode()); +} + bool R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) const { @@ -183,10 +213,19 @@ R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const { int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); if (SrcIdx < 0) break; - if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) { + unsigned Reg = MI->getOperand(SrcIdx).getReg(); + if (Reg == AMDGPU::ALU_CONST) { unsigned Const = MI->getOperand( getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); Consts.push_back(Const); + continue; + } + if (AMDGPU::R600_KC0RegClass.contains(Reg) || + AMDGPU::R600_KC1RegClass.contains(Reg)) { + unsigned Index = RI.getEncodingValue(Reg) & 0xff; + unsigned Chan = RI.getHWRegChan(Reg); + Consts.push_back((Index << 2) | Chan); + continue; } } } @@ -645,6 +684,9 @@ const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const { return &AMDGPU::IndirectRegRegClass; } +unsigned R600InstrInfo::getMaxAlusPerClause() const { + return 115; +} MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, @@ -681,7 +723,8 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB //scheduling to the backend, we can change the default to 0. MIB.addImm(1) // $last .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel - .addImm(0); // $literal + .addImm(0) // $literal + .addImm(0); // $bank_swizzle return MIB; } diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h index bf9569e..babe4b8 100644 --- a/lib/Target/R600/R600InstrInfo.h +++ b/lib/Target/R600/R600InstrInfo.h @@ -33,6 +33,7 @@ namespace llvm { class R600InstrInfo : public AMDGPUInstrInfo { private: const R600RegisterInfo RI; + const AMDGPUSubtarget &ST; int getBranchInstr(const MachineOperand &op) const; @@ -53,6 +54,14 @@ namespace llvm { /// \returns true if this \p Opcode represents an ALU instruction. bool isALUInstr(unsigned Opcode) const; + bool isTransOnly(unsigned Opcode) const; + bool isTransOnly(const MachineInstr *MI) const; + + bool usesVertexCache(unsigned Opcode) const; + bool usesVertexCache(const MachineInstr *MI) const; + bool usesTextureCache(unsigned Opcode) const; + bool usesTextureCache(const MachineInstr *MI) const; + bool fitsConstReadLimitations(const std::vector<unsigned>&) const; bool canBundle(const std::vector<MachineInstr *> &) const; @@ -145,6 +154,7 @@ namespace llvm { virtual const TargetRegisterClass *getSuperIndirectRegClass() const; + unsigned getMaxAlusPerClause() const; ///buildDefaultInstruction - This function returns a MachineInstr with /// all the instruction modifiers initialized to their default values. diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td index 8c50d54..1060b0a 100644 --- a/lib/Target/R600/R600Instructions.td +++ b/lib/Target/R600/R600Instructions.td @@ -13,11 +13,12 @@ include "R600Intrinsics.td" -class InstR600 <bits<11> inst, dag outs, dag ins, string asm, list<dag> pattern, +class InstR600 <dag outs, dag ins, string asm, list<dag> pattern, InstrItinClass itin> : AMDGPUInst <outs, ins, asm, pattern> { field bits<64> Inst; + bit TransOnly = 0; bit Trig = 0; bit Op3 = 0; bit isVector = 0; @@ -25,9 +26,9 @@ class InstR600 <bits<11> inst, dag outs, dag ins, string asm, list<dag> pattern, bit Op1 = 0; bit Op2 = 0; bit HasNativeOperands = 0; + bit VTXInst = 0; + bit TEXInst = 0; - bits<11> op_code = inst; - //let Inst = inst; let Namespace = "AMDGPU"; let OutOperandList = outs; let InOperandList = ins; @@ -35,6 +36,7 @@ class InstR600 <bits<11> inst, dag outs, dag ins, string asm, list<dag> pattern, let Pattern = pattern; let Itinerary = itin; + let TSFlags{0} = TransOnly; let TSFlags{4} = Trig; let TSFlags{5} = Op3; @@ -45,11 +47,12 @@ class InstR600 <bits<11> inst, dag outs, dag ins, string asm, list<dag> pattern, let TSFlags{9} = HasNativeOperands; let TSFlags{10} = Op1; let TSFlags{11} = Op2; + let TSFlags{12} = VTXInst; + let TSFlags{13} = TEXInst; } class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> : - AMDGPUInst <outs, ins, asm, pattern> { - field bits<64> Inst; + InstR600 <outs, ins, asm, pattern, NullALU> { let Namespace = "AMDGPU"; } @@ -74,6 +77,9 @@ class InstFlag<string PM = "printOperand", int Default = 0> def SEL : OperandWithDefaultOps <i32, (ops (i32 -1))> { let PrintMethod = "printSel"; } +def BANK_SWIZZLE : OperandWithDefaultOps <i32, (ops (i32 0))> { + let PrintMethod = "printSel"; +} def LITERAL : InstFlag<"printLiteral">; @@ -137,7 +143,7 @@ class R600ALU_Word1 { field bits<32> Word1; bits<11> dst; - bits<3> bank_swizzle = 0; + bits<3> bank_swizzle; bits<1> dst_rel; bits<1> clamp; @@ -234,6 +240,80 @@ class VTX_WORD1_GPR { let Word1{31} = SRF_MODE_ALL; } +class TEX_WORD0 { + field bits<32> Word0; + + bits<5> TEX_INST; + bits<2> INST_MOD; + bits<1> FETCH_WHOLE_QUAD; + bits<8> RESOURCE_ID; + bits<7> SRC_GPR; + bits<1> SRC_REL; + bits<1> ALT_CONST; + bits<2> RESOURCE_INDEX_MODE; + bits<2> SAMPLER_INDEX_MODE; + + let Word0{4-0} = TEX_INST; + let Word0{6-5} = INST_MOD; + let Word0{7} = FETCH_WHOLE_QUAD; + let Word0{15-8} = RESOURCE_ID; + let Word0{22-16} = SRC_GPR; + let Word0{23} = SRC_REL; + let Word0{24} = ALT_CONST; + let Word0{26-25} = RESOURCE_INDEX_MODE; + let Word0{28-27} = SAMPLER_INDEX_MODE; +} + +class TEX_WORD1 { + field bits<32> Word1; + + bits<7> DST_GPR; + bits<1> DST_REL; + bits<3> DST_SEL_X; + bits<3> DST_SEL_Y; + bits<3> DST_SEL_Z; + bits<3> DST_SEL_W; + bits<7> LOD_BIAS; + bits<1> COORD_TYPE_X; + bits<1> COORD_TYPE_Y; + bits<1> COORD_TYPE_Z; + bits<1> COORD_TYPE_W; + + let Word1{6-0} = DST_GPR; + let Word1{7} = DST_REL; + let Word1{11-9} = DST_SEL_X; + let Word1{14-12} = DST_SEL_Y; + let Word1{17-15} = DST_SEL_Z; + let Word1{20-18} = DST_SEL_W; + let Word1{27-21} = LOD_BIAS; + let Word1{28} = COORD_TYPE_X; + let Word1{29} = COORD_TYPE_Y; + let Word1{30} = COORD_TYPE_Z; + let Word1{31} = COORD_TYPE_W; +} + +class TEX_WORD2 { + field bits<32> Word2; + + bits<5> OFFSET_X; + bits<5> OFFSET_Y; + bits<5> OFFSET_Z; + bits<5> SAMPLER_ID; + bits<3> SRC_SEL_X; + bits<3> SRC_SEL_Y; + bits<3> SRC_SEL_Z; + bits<3> SRC_SEL_W; + + let Word2{4-0} = OFFSET_X; + let Word2{9-5} = OFFSET_Y; + let Word2{14-10} = OFFSET_Z; + let Word2{19-15} = SAMPLER_ID; + let Word2{22-20} = SRC_SEL_X; + let Word2{25-23} = SRC_SEL_Y; + let Word2{28-26} = SRC_SEL_Z; + let Word2{31-29} = SRC_SEL_W; +} + /* XXX: R600 subtarget uses a slightly different encoding than the other subtargets. We currently handle this in R600MCCodeEmitter, but we may @@ -272,14 +352,14 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { // and R600InstrInfo::getOperandIdx(). class R600_1OP <bits<11> inst, string opName, list<dag> pattern, InstrItinClass itin = AnyALU> : - InstR600 <0, - (outs R600_Reg32:$dst), + InstR600 <(outs R600_Reg32:$dst), (ins WRITE:$write, OMOD:$omod, REL:$dst_rel, CLAMP:$clamp, R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, ABS:$src0_abs, SEL:$src0_sel, - LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal), - !strconcat(opName, + LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, + BANK_SWIZZLE:$bank_swizzle), + !strconcat(" ", opName, "$clamp $dst$write$dst_rel$omod, " - "$src0_neg$src0_abs$src0$src0_sel$src0_abs$src0_rel, " + "$src0_neg$src0_abs$src0$src0_abs$src0_rel, " "$literal $pred_sel$last"), pattern, itin>, @@ -311,17 +391,17 @@ class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node, // R600InstrInfo::buildDefaultInstruction(), and R600InstrInfo::getOperandIdx(). class R600_2OP <bits<11> inst, string opName, list<dag> pattern, InstrItinClass itin = AnyALU> : - InstR600 <inst, - (outs R600_Reg32:$dst), + InstR600 <(outs R600_Reg32:$dst), (ins UEM:$update_exec_mask, UP:$update_pred, WRITE:$write, OMOD:$omod, REL:$dst_rel, CLAMP:$clamp, R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, ABS:$src0_abs, SEL:$src0_sel, R600_Reg32:$src1, NEG:$src1_neg, REL:$src1_rel, ABS:$src1_abs, SEL:$src1_sel, - LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal), - !strconcat(opName, + LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, + BANK_SWIZZLE:$bank_swizzle), + !strconcat(" ", opName, "$clamp $update_exec_mask$update_pred$dst$write$dst_rel$omod, " - "$src0_neg$src0_abs$src0$src0_sel$src0_abs$src0_rel, " - "$src1_neg$src1_abs$src1$src1_sel$src1_abs$src1_rel, " + "$src0_neg$src0_abs$src0$src0_abs$src0_rel, " + "$src1_neg$src1_abs$src1$src1_abs$src1_rel, " "$literal $pred_sel$last"), pattern, itin>, @@ -349,17 +429,17 @@ class R600_2OP_Helper <bits<11> inst, string opName, SDPatternOperator node, // R600InstrInfo::getOperandIdx(). class R600_3OP <bits<5> inst, string opName, list<dag> pattern, InstrItinClass itin = AnyALU> : - InstR600 <0, - (outs R600_Reg32:$dst), + InstR600 <(outs R600_Reg32:$dst), (ins REL:$dst_rel, CLAMP:$clamp, R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, SEL:$src0_sel, R600_Reg32:$src1, NEG:$src1_neg, REL:$src1_rel, SEL:$src1_sel, R600_Reg32:$src2, NEG:$src2_neg, REL:$src2_rel, SEL:$src2_sel, - LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal), - !strconcat(opName, "$clamp $dst$dst_rel, " - "$src0_neg$src0$src0_sel$src0_rel, " - "$src1_neg$src1$src1_sel$src1_rel, " - "$src2_neg$src2$src2_sel$src2_rel, " + LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, + BANK_SWIZZLE:$bank_swizzle), + !strconcat(" ", opName, "$clamp $dst$dst_rel, " + "$src0_neg$src0$src0_rel, " + "$src1_neg$src1$src1_rel, " + "$src2_neg$src2$src2_rel, " "$literal $pred_sel$last"), pattern, itin>, @@ -376,8 +456,7 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern, class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern, InstrItinClass itin = VecALU> : - InstR600 <inst, - (outs R600_Reg32:$dst), + InstR600 <(outs R600_Reg32:$dst), ins, asm, pattern, @@ -385,13 +464,35 @@ class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern, class R600_TEX <bits<11> inst, string opName, list<dag> pattern, InstrItinClass itin = AnyALU> : - InstR600 <inst, - (outs R600_Reg128:$dst), - (ins R600_Reg128:$src0, i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget), - !strconcat(opName, "$dst, $src0, $resourceId, $samplerId, $textureTarget"), + InstR600 <(outs R600_Reg128:$DST_GPR), + (ins R600_Reg128:$SRC_GPR, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, i32imm:$textureTarget), + !strconcat(opName, "$DST_GPR, $SRC_GPR, $RESOURCE_ID, $SAMPLER_ID, $textureTarget"), pattern, - itin>{ - let Inst {10-0} = inst; + itin>, TEX_WORD0, TEX_WORD1, TEX_WORD2 { + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; + + let TEX_INST = inst{4-0}; + let SRC_REL = 0; + let DST_REL = 0; + let DST_SEL_X = 0; + let DST_SEL_Y = 1; + let DST_SEL_Z = 2; + let DST_SEL_W = 3; + let LOD_BIAS = 0; + + let INST_MOD = 0; + let FETCH_WHOLE_QUAD = 0; + let ALT_CONST = 0; + let SAMPLER_INDEX_MODE = 0; + let RESOURCE_INDEX_MODE = 0; + + let COORD_TYPE_X = 0; + let COORD_TYPE_Y = 0; + let COORD_TYPE_Z = 0; + let COORD_TYPE_W = 0; + + let TEXInst = 1; } } // End mayLoad = 1, mayStore = 0, hasSideEffects = 0 @@ -644,7 +745,9 @@ multiclass SteamOutputExportPattern<Instruction ExportInst, 4095, imm:$mask, buf3inst, 0)>; } -let usesCustomInserter = 1 in { +// Export Instructions should not be duplicated by TailDuplication pass +// (which assumes that duplicable instruction are affected by exec mask) +let usesCustomInserter = 1, isNotDuplicable = 1 in { class ExportSwzInst : InstR600ISA<( outs), @@ -671,6 +774,197 @@ class ExportBufInst : InstR600ISA<( let Inst{63-32} = Word1; } +//===----------------------------------------------------------------------===// +// Control Flow Instructions +//===----------------------------------------------------------------------===// + +class CF_ALU_WORD0 { + field bits<32> Word0; + + bits<22> ADDR; + bits<4> KCACHE_BANK0; + bits<4> KCACHE_BANK1; + bits<2> KCACHE_MODE0; + + let Word0{21-0} = ADDR; + let Word0{25-22} = KCACHE_BANK0; + let Word0{29-26} = KCACHE_BANK1; + let Word0{31-30} = KCACHE_MODE0; +} + +class CF_ALU_WORD1 { + field bits<32> Word1; + + bits<2> KCACHE_MODE1; + bits<8> KCACHE_ADDR0; + bits<8> KCACHE_ADDR1; + bits<7> COUNT; + bits<1> ALT_CONST; + bits<4> CF_INST; + bits<1> WHOLE_QUAD_MODE; + bits<1> BARRIER; + + let Word1{1-0} = KCACHE_MODE1; + let Word1{9-2} = KCACHE_ADDR0; + let Word1{17-10} = KCACHE_ADDR1; + let Word1{24-18} = COUNT; + let Word1{25} = ALT_CONST; + let Word1{29-26} = CF_INST; + let Word1{30} = WHOLE_QUAD_MODE; + let Word1{31} = BARRIER; +} + +class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs), +(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1, i32imm:$KCACHE_MODE0, i32imm:$KCACHE_MODE1, +i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1, i32imm:$COUNT), +!strconcat(OpName, " $COUNT, @$ADDR, " +"KC0[CB$KCACHE_BANK0:$KCACHE_ADDR0-$KCACHE_ADDR0+32]" +", KC1[CB$KCACHE_BANK1:$KCACHE_ADDR1-$KCACHE_ADDR1+32]"), +[] >, CF_ALU_WORD0, CF_ALU_WORD1 { + field bits<64> Inst; + + let CF_INST = inst; + let ALT_CONST = 0; + let WHOLE_QUAD_MODE = 0; + let BARRIER = 1; + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; +} + +class CF_WORD0_R600 { + field bits<32> Word0; + + bits<32> ADDR; + + let Word0 = ADDR; +} + +class CF_WORD1_R600 { + field bits<32> Word1; + + bits<3> POP_COUNT; + bits<5> CF_CONST; + bits<2> COND; + bits<3> COUNT; + bits<6> CALL_COUNT; + bits<1> COUNT_3; + bits<1> END_OF_PROGRAM; + bits<1> VALID_PIXEL_MODE; + bits<7> CF_INST; + bits<1> WHOLE_QUAD_MODE; + bits<1> BARRIER; + + let Word1{2-0} = POP_COUNT; + let Word1{7-3} = CF_CONST; + let Word1{9-8} = COND; + let Word1{12-10} = COUNT; + let Word1{18-13} = CALL_COUNT; + let Word1{19} = COUNT_3; + let Word1{21} = END_OF_PROGRAM; + let Word1{22} = VALID_PIXEL_MODE; + let Word1{29-23} = CF_INST; + let Word1{30} = WHOLE_QUAD_MODE; + let Word1{31} = BARRIER; +} + +class CF_CLAUSE_R600 <bits<7> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs), +ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 { + field bits<64> Inst; + + let CF_INST = inst; + let BARRIER = 1; + let CF_CONST = 0; + let VALID_PIXEL_MODE = 0; + let COND = 0; + let CALL_COUNT = 0; + let COUNT_3 = 0; + let END_OF_PROGRAM = 0; + let WHOLE_QUAD_MODE = 0; + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; +} + +class CF_WORD0_EG { + field bits<32> Word0; + + bits<24> ADDR; + bits<3> JUMPTABLE_SEL; + + let Word0{23-0} = ADDR; + let Word0{26-24} = JUMPTABLE_SEL; +} + +class CF_WORD1_EG { + field bits<32> Word1; + + bits<3> POP_COUNT; + bits<5> CF_CONST; + bits<2> COND; + bits<6> COUNT; + bits<1> VALID_PIXEL_MODE; + bits<1> END_OF_PROGRAM; + bits<8> CF_INST; + bits<1> BARRIER; + + let Word1{2-0} = POP_COUNT; + let Word1{7-3} = CF_CONST; + let Word1{9-8} = COND; + let Word1{15-10} = COUNT; + let Word1{20} = VALID_PIXEL_MODE; + let Word1{21} = END_OF_PROGRAM; + let Word1{29-22} = CF_INST; + let Word1{31} = BARRIER; +} + +class CF_CLAUSE_EG <bits<8> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs), +ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG { + field bits<64> Inst; + + let CF_INST = inst; + let BARRIER = 1; + let JUMPTABLE_SEL = 0; + let CF_CONST = 0; + let VALID_PIXEL_MODE = 0; + let COND = 0; + let END_OF_PROGRAM = 0; + + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; +} + +def CF_ALU : ALU_CLAUSE<8, "ALU">; +def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">; + +def FETCH_CLAUSE : AMDGPUInst <(outs), +(ins i32imm:$addr), "Fetch clause starting at $addr:", [] > { + field bits<8> Inst; + bits<8> num; + let Inst = num; +} + +def ALU_CLAUSE : AMDGPUInst <(outs), +(ins i32imm:$addr), "ALU clause starting at $addr:", [] > { + field bits<8> Inst; + bits<8> num; + let Inst = num; +} + +def LITERALS : AMDGPUInst <(outs), +(ins LITERAL:$literal1, LITERAL:$literal2), "$literal1, $literal2", [] > { + field bits<64> Inst; + bits<32> literal1; + bits<32> literal2; + + let Inst{31-0} = literal1; + let Inst{63-32} = literal2; +} + +def PAD : AMDGPUInst <(outs), (ins), "PAD", [] > { + field bits<64> Inst; +} + let Predicates = [isR600toCayman] in { //===----------------------------------------------------------------------===// @@ -689,58 +983,42 @@ def MIN : R600_2OP_Helper <0x4, "MIN", AMDGPUfmin>; // XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics. def SETE : R600_2OP < 0x08, "SETE", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, - COND_EQ))] + [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_EQ))] >; def SGT : R600_2OP < 0x09, "SETGT", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, - COND_GT))] + [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GT))] >; def SGE : R600_2OP < 0xA, "SETGE", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, - COND_GE))] + [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GE))] >; def SNE : R600_2OP < 0xB, "SETNE", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, - COND_NE))] + [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_NE))] >; def SETE_DX10 : R600_2OP < 0xC, "SETE_DX10", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, (i32 -1), (i32 0), - COND_EQ))] + [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_EQ))] >; def SETGT_DX10 : R600_2OP < 0xD, "SETGT_DX10", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, (i32 -1), (i32 0), - COND_GT))] + [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GT))] >; def SETGE_DX10 : R600_2OP < 0xE, "SETGE_DX10", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, (i32 -1), (i32 0), - COND_GE))] + [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GE))] >; def SETNE_DX10 : R600_2OP < 0xF, "SETNE_DX10", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, (i32 -1), (i32 0), - COND_NE))] + [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_NE))] >; def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>; @@ -798,38 +1076,32 @@ def MIN_UINT : R600_2OP_Helper <0x39, "MIN_UINT", AMDGPUumin>; def SETE_INT : R600_2OP < 0x3A, "SETE_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETEQ))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETEQ))] >; def SETGT_INT : R600_2OP < 0x3B, "SETGT_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGT))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETGT))] >; def SETGE_INT : R600_2OP < 0x3C, "SETGE_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGE))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETGE))] >; def SETNE_INT : R600_2OP < 0x3D, "SETNE_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETNE))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETNE))] >; def SETGT_UINT : R600_2OP < 0x3E, "SETGT_UINT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGT))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETUGT))] >; def SETGE_UINT : R600_2OP < 0x3F, "SETGE_UINT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGE))] + [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETUGE))] >; def PRED_SETE_INT : R600_2OP <0x42, "PRED_SETE_INT", []>; @@ -839,26 +1111,17 @@ def PRED_SETNE_INT : R600_2OP <0x45, "PRED_SETNE_INT", []>; def CNDE_INT : R600_3OP < 0x1C, "CNDE_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), 0, - (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2), - COND_EQ))] + [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_EQ))] >; def CNDGE_INT : R600_3OP < 0x1E, "CNDGE_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), 0, - (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2), - COND_GE))] + [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GE))] >; def CNDGT_INT : R600_3OP < 0x1D, "CNDGT_INT", - [(set (i32 R600_Reg32:$dst), - (selectcc (i32 R600_Reg32:$src0), 0, - (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2), - COND_GT))] + [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GT))] >; //===----------------------------------------------------------------------===// @@ -867,25 +1130,33 @@ def CNDGT_INT : R600_3OP < def TEX_LD : R600_TEX < 0x03, "TEX_LD", - [(set R600_Reg128:$dst, (int_AMDGPU_txf R600_Reg128:$src0, imm:$src1, imm:$src2, imm:$src3, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txf v4f32:$SRC_GPR, + imm:$OFFSET_X, imm:$OFFSET_Y, imm:$OFFSET_Z, imm:$RESOURCE_ID, + imm:$SAMPLER_ID, imm:$textureTarget))] > { -let AsmString = "TEX_LD $dst, $src0, $src1, $src2, $src3, $resourceId, $samplerId, $textureTarget"; -let InOperandList = (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2, i32imm:$src3, i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget); +let AsmString = "TEX_LD $DST_GPR, $SRC_GPR, $OFFSET_X, $OFFSET_Y, $OFFSET_Z," + "$RESOURCE_ID, $SAMPLER_ID, $textureTarget"; +let InOperandList = (ins R600_Reg128:$SRC_GPR, i32imm:$OFFSET_X, + i32imm:$OFFSET_Y, i32imm:$OFFSET_Z, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, + i32imm:$textureTarget); } def TEX_GET_TEXTURE_RESINFO : R600_TEX < 0x04, "TEX_GET_TEXTURE_RESINFO", - [(set R600_Reg128:$dst, (int_AMDGPU_txq R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txq v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_GET_GRADIENTS_H : R600_TEX < 0x07, "TEX_GET_GRADIENTS_H", - [(set R600_Reg128:$dst, (int_AMDGPU_ddx R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_ddx v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_GET_GRADIENTS_V : R600_TEX < 0x08, "TEX_GET_GRADIENTS_V", - [(set R600_Reg128:$dst, (int_AMDGPU_ddy R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_ddy v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_SET_GRADIENTS_H : R600_TEX < @@ -900,32 +1171,38 @@ def TEX_SET_GRADIENTS_V : R600_TEX < def TEX_SAMPLE : R600_TEX < 0x10, "TEX_SAMPLE", - [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_SAMPLE_C : R600_TEX < 0x18, "TEX_SAMPLE_C", - [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] >; def TEX_SAMPLE_L : R600_TEX < 0x11, "TEX_SAMPLE_L", - [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_SAMPLE_C_L : R600_TEX < 0x19, "TEX_SAMPLE_C_L", - [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] >; def TEX_SAMPLE_LB : R600_TEX < 0x12, "TEX_SAMPLE_LB", - [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0,imm:$resourceId, imm:$samplerId, imm:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] >; def TEX_SAMPLE_C_LB : R600_TEX < 0x1A, "TEX_SAMPLE_C_LB", - [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0, imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))] + [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] >; def TEX_SAMPLE_G : R600_TEX < @@ -954,32 +1231,22 @@ class MULADD_Common <bits<5> inst> : R600_3OP < class MULADD_IEEE_Common <bits<5> inst> : R600_3OP < inst, "MULADD_IEEE", - [(set (f32 R600_Reg32:$dst), - (fadd (fmul R600_Reg32:$src0, R600_Reg32:$src1), R600_Reg32:$src2))] + [(set f32:$dst, (fadd (fmul f32:$src0, f32:$src1), f32:$src2))] >; class CNDE_Common <bits<5> inst> : R600_3OP < inst, "CNDE", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), FP_ZERO, - (f32 R600_Reg32:$src1), (f32 R600_Reg32:$src2), - COND_EQ))] + [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_EQ))] >; class CNDGT_Common <bits<5> inst> : R600_3OP < inst, "CNDGT", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), FP_ZERO, - (f32 R600_Reg32:$src1), (f32 R600_Reg32:$src2), - COND_GT))] + [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GT))] >; class CNDGE_Common <bits<5> inst> : R600_3OP < inst, "CNDGE", - [(set R600_Reg32:$dst, - (selectcc (f32 R600_Reg32:$src0), FP_ZERO, - (f32 R600_Reg32:$src1), (f32 R600_Reg32:$src2), - COND_GE))] + [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GE))] >; multiclass DOT4_Common <bits<11> inst> { @@ -987,7 +1254,7 @@ multiclass DOT4_Common <bits<11> inst> { def _pseudo : R600_REDUCTION <inst, (ins R600_Reg128:$src0, R600_Reg128:$src1), "DOT4 $dst $src0, $src1", - [(set R600_Reg32:$dst, (int_AMDGPU_dp4 R600_Reg128:$src0, R600_Reg128:$src1))] + [(set f32:$dst, (int_AMDGPU_dp4 v4f32:$src0, v4f32:$src1))] >; def _real : R600_2OP <inst, "DOT4", []>; @@ -997,11 +1264,10 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { multiclass CUBE_Common <bits<11> inst> { def _pseudo : InstR600 < - inst, (outs R600_Reg128:$dst), (ins R600_Reg128:$src), "CUBE $dst $src", - [(set R600_Reg128:$dst, (int_AMDGPU_cube R600_Reg128:$src))], + [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src))], VecALU > { let isPseudo = 1; @@ -1013,23 +1279,38 @@ multiclass CUBE_Common <bits<11> inst> { class EXP_IEEE_Common <bits<11> inst> : R600_1OP_Helper < inst, "EXP_IEEE", fexp2 ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class FLT_TO_INT_Common <bits<11> inst> : R600_1OP_Helper < inst, "FLT_TO_INT", fp_to_sint ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class INT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper < inst, "INT_TO_FLT", sint_to_fp ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class FLT_TO_UINT_Common <bits<11> inst> : R600_1OP_Helper < inst, "FLT_TO_UINT", fp_to_uint ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class UINT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper < inst, "UINT_TO_FLT", uint_to_fp ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class LOG_CLAMPED_Common <bits<11> inst> : R600_1OP < inst, "LOG_CLAMPED", [] @@ -1037,50 +1318,84 @@ class LOG_CLAMPED_Common <bits<11> inst> : R600_1OP < class LOG_IEEE_Common <bits<11> inst> : R600_1OP_Helper < inst, "LOG_IEEE", flog2 ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class LSHL_Common <bits<11> inst> : R600_2OP_Helper <inst, "LSHL", shl>; class LSHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "LSHR", srl>; class ASHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "ASHR", sra>; class MULHI_INT_Common <bits<11> inst> : R600_2OP_Helper < inst, "MULHI_INT", mulhs ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class MULHI_UINT_Common <bits<11> inst> : R600_2OP_Helper < inst, "MULHI", mulhu ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class MULLO_INT_Common <bits<11> inst> : R600_2OP_Helper < inst, "MULLO_INT", mul ->; -class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []>; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} +class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []> { + let TransOnly = 1; + let Itinerary = TransALU; +} class RECIP_CLAMPED_Common <bits<11> inst> : R600_1OP < inst, "RECIP_CLAMPED", [] ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class RECIP_IEEE_Common <bits<11> inst> : R600_1OP < - inst, "RECIP_IEEE", [(set R600_Reg32:$dst, (fdiv FP_ONE, R600_Reg32:$src0))] ->; + inst, "RECIP_IEEE", [(set f32:$dst, (fdiv FP_ONE, f32:$src0))] +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper < inst, "RECIP_UINT", AMDGPUurecip ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper < inst, "RECIPSQRT_CLAMPED", int_AMDGPU_rsq ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP < inst, "RECIPSQRT_IEEE", [] ->; +> { + let TransOnly = 1; + let Itinerary = TransALU; +} class SIN_Common <bits<11> inst> : R600_1OP < inst, "SIN", []>{ let Trig = 1; + let TransOnly = 1; + let Itinerary = TransALU; } class COS_Common <bits<11> inst> : R600_1OP < inst, "COS", []> { let Trig = 1; + let TransOnly = 1; + let Itinerary = TransALU; } //===----------------------------------------------------------------------===// @@ -1089,19 +1404,20 @@ class COS_Common <bits<11> inst> : R600_1OP < multiclass DIV_Common <InstR600 recip_ieee> { def : Pat< - (int_AMDGPU_div R600_Reg32:$src0, R600_Reg32:$src1), - (MUL_IEEE R600_Reg32:$src0, (recip_ieee R600_Reg32:$src1)) + (int_AMDGPU_div f32:$src0, f32:$src1), + (MUL_IEEE $src0, (recip_ieee $src1)) >; def : Pat< - (fdiv R600_Reg32:$src0, R600_Reg32:$src1), - (MUL_IEEE R600_Reg32:$src0, (recip_ieee R600_Reg32:$src1)) + (fdiv f32:$src0, f32:$src1), + (MUL_IEEE $src0, (recip_ieee $src1)) >; } -class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee> : Pat < - (int_TGSI_lit_z R600_Reg32:$src_x, R600_Reg32:$src_y, R600_Reg32:$src_w), - (exp_ieee (mul_lit (log_clamped (MAX R600_Reg32:$src_y, (f32 ZERO))), R600_Reg32:$src_w, R600_Reg32:$src_x)) +class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee> + : Pat < + (int_TGSI_lit_z f32:$src_x, f32:$src_y, f32:$src_w), + (exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x)) >; //===----------------------------------------------------------------------===// @@ -1141,13 +1457,13 @@ let Predicates = [isR600] in { def RECIP_UINT_r600 : RECIP_UINT_Common <0x78>; defm DIV_r600 : DIV_Common<RECIP_IEEE_r600>; + def : POW_Common <LOG_IEEE_r600, EXP_IEEE_r600, MUL>; def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>; - def : Pat<(fsqrt R600_Reg32:$src), - (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_r600 R600_Reg32:$src))>; + def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>; def R600_ExportSwz : ExportSwzInst { - let Word1{20-17} = 1; // BURST_COUNT + let Word1{20-17} = 0; // BURST_COUNT let Word1{21} = eop; let Word1{22} = 1; // VALID_PIXEL_MODE let Word1{30-23} = inst; @@ -1156,25 +1472,77 @@ let Predicates = [isR600] in { defm : ExportPattern<R600_ExportSwz, 39>; def R600_ExportBuf : ExportBufInst { - let Word1{20-17} = 1; // BURST_COUNT + let Word1{20-17} = 0; // BURST_COUNT let Word1{21} = eop; let Word1{22} = 1; // VALID_PIXEL_MODE let Word1{30-23} = inst; let Word1{31} = 1; // BARRIER } defm : SteamOutputExportPattern<R600_ExportBuf, 0x20, 0x21, 0x22, 0x23>; + + def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$COUNT), + "TEX $COUNT @$ADDR"> { + let POP_COUNT = 0; + } + def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$COUNT), + "VTX $COUNT @$ADDR"> { + let POP_COUNT = 0; + } + def WHILE_LOOP_R600 : CF_CLAUSE_R600<6, (ins i32imm:$ADDR), + "LOOP_START_DX10 @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def END_LOOP_R600 : CF_CLAUSE_R600<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def LOOP_BREAK_R600 : CF_CLAUSE_R600<9, (ins i32imm:$ADDR), + "LOOP_BREAK @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def CF_CONTINUE_R600 : CF_CLAUSE_R600<8, (ins i32imm:$ADDR), + "CONTINUE @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def CF_JUMP_R600 : CF_CLAUSE_R600<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "JUMP @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "ELSE @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_CALL_FS_R600 : CF_CLAUSE_R600<19, (ins), "CALL_FS"> { + let ADDR = 0; + let COUNT = 0; + let POP_COUNT = 0; + } + def POP_R600 : CF_CLAUSE_R600<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "POP @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_END_R600 : CF_CLAUSE_R600<0, (ins), "CF_END"> { + let COUNT = 0; + let POP_COUNT = 0; + let ADDR = 0; + let END_OF_PROGRAM = 1; + } + } // Helper pattern for normalizing inputs to triginomic instructions for R700+ // cards. class COS_PAT <InstR600 trig> : Pat< - (fcos R600_Reg32:$src), - (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), R600_Reg32:$src)) + (fcos f32:$src), + (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src)) >; class SIN_PAT <InstR600 trig> : Pat< - (fsin R600_Reg32:$src), - (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), R600_Reg32:$src)) + (fsin f32:$src), + (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src)) >; //===----------------------------------------------------------------------===// @@ -1212,10 +1580,10 @@ def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>; def SIN_eg : SIN_Common<0x8D>; def COS_eg : COS_Common<0x8E>; +def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>; def : SIN_PAT <SIN_eg>; def : COS_PAT <COS_eg>; -def : Pat<(fsqrt R600_Reg32:$src), - (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_eg R600_Reg32:$src))>; +def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>; } // End Predicates = [isEG] //===----------------------------------------------------------------------===// @@ -1239,15 +1607,16 @@ let Predicates = [isEGorCayman] in { // (16,8) = (Input << 8) >> 24 = (Input & 0xffffff) >> 16 // (24,8) = (Input << 0) >> 24 = (Input & 0xffffffff) >> 24 def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT", - [(set R600_Reg32:$dst, (int_AMDIL_bit_extract_u32 R600_Reg32:$src0, - R600_Reg32:$src1, - R600_Reg32:$src2))], + [(set i32:$dst, (int_AMDIL_bit_extract_u32 i32:$src0, i32:$src1, + i32:$src2))], VecALU >; + def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", []>; + defm : BFIPatterns <BFI_INT_eg>; + def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", - [(set R600_Reg32:$dst, (AMDGPUbitalign R600_Reg32:$src0, R600_Reg32:$src1, - R600_Reg32:$src2))], + [(set i32:$dst, (AMDGPUbitalign i32:$src0, i32:$src1, i32:$src2))], VecALU >; @@ -1292,14 +1661,12 @@ let hasSideEffects = 1 in { // XXX: Lowering SELECT_CC will sometimes generate fp_to_[su]int nodes, // which do not need to be truncated since the fp values are 0.0f or 1.0f. // We should look into handling these cases separately. - def : Pat<(fp_to_sint R600_Reg32:$src0), - (FLT_TO_INT_eg (TRUNC R600_Reg32:$src0))>; + def : Pat<(fp_to_sint f32:$src0), (FLT_TO_INT_eg (TRUNC $src0))>; - def : Pat<(fp_to_uint R600_Reg32:$src0), - (FLT_TO_UINT_eg (TRUNC R600_Reg32:$src0))>; + def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>; def EG_ExportSwz : ExportSwzInst { - let Word1{19-16} = 1; // BURST_COUNT + let Word1{19-16} = 0; // BURST_COUNT let Word1{20} = 1; // VALID_PIXEL_MODE let Word1{21} = eop; let Word1{29-22} = inst; @@ -1309,7 +1676,7 @@ let hasSideEffects = 1 in { defm : ExportPattern<EG_ExportSwz, 83>; def EG_ExportBuf : ExportBufInst { - let Word1{19-16} = 1; // BURST_COUNT + let Word1{19-16} = 0; // BURST_COUNT let Word1{20} = 1; // VALID_PIXEL_MODE let Word1{21} = eop; let Word1{29-22} = inst; @@ -1318,6 +1685,57 @@ let hasSideEffects = 1 in { } defm : SteamOutputExportPattern<EG_ExportBuf, 0x40, 0x41, 0x42, 0x43>; + def CF_TC_EG : CF_CLAUSE_EG<1, (ins i32imm:$ADDR, i32imm:$COUNT), + "TEX $COUNT @$ADDR"> { + let POP_COUNT = 0; + } + def CF_VC_EG : CF_CLAUSE_EG<2, (ins i32imm:$ADDR, i32imm:$COUNT), + "VTX $COUNT @$ADDR"> { + let POP_COUNT = 0; + } + def WHILE_LOOP_EG : CF_CLAUSE_EG<6, (ins i32imm:$ADDR), + "LOOP_START_DX10 @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def END_LOOP_EG : CF_CLAUSE_EG<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def LOOP_BREAK_EG : CF_CLAUSE_EG<9, (ins i32imm:$ADDR), + "LOOP_BREAK @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def CF_CONTINUE_EG : CF_CLAUSE_EG<8, (ins i32imm:$ADDR), + "CONTINUE @$ADDR"> { + let POP_COUNT = 0; + let COUNT = 0; + } + def CF_JUMP_EG : CF_CLAUSE_EG<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "JUMP @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_ELSE_EG : CF_CLAUSE_EG<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "ELSE @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_CALL_FS_EG : CF_CLAUSE_EG<19, (ins), "CALL_FS"> { + let ADDR = 0; + let COUNT = 0; + let POP_COUNT = 0; + } + def POP_EG : CF_CLAUSE_EG<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT), + "POP @$ADDR POP:$POP_COUNT"> { + let COUNT = 0; + } + def CF_END_EG : CF_CLAUSE_EG<0, (ins), "CF_END"> { + let COUNT = 0; + let POP_COUNT = 0; + let ADDR = 0; + let END_OF_PROGRAM = 1; + } + //===----------------------------------------------------------------------===// // Memory read/write instructions //===----------------------------------------------------------------------===// @@ -1347,14 +1765,14 @@ class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name, def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg < (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), 0x1, "RAT_WRITE_CACHELESS_32_eg", - [(global_store (i32 R600_TReg32_X:$rw_gpr), R600_TReg32_X:$index_gpr)] + [(global_store i32:$rw_gpr, i32:$index_gpr)] >; //128-bit store def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg < (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), 0xf, "RAT_WRITE_CACHELESS_128", - [(global_store (v4i32 R600_Reg128:$rw_gpr), R600_TReg32_X:$index_gpr)] + [(global_store v4i32:$rw_gpr, i32:$index_gpr)] >; class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> @@ -1408,6 +1826,8 @@ class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> // VTX_WORD3 (Padding) // // Inst{127-96} = 0; + + let VTXInst = 1; } class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> @@ -1477,19 +1897,19 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern> //===----------------------------------------------------------------------===// def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0, - [(set (i32 R600_TReg32_X:$dst), (load_param_zexti8 ADDRVTX_READ:$ptr))] + [(set i32:$dst, (load_param_zexti8 ADDRVTX_READ:$ptr))] >; def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0, - [(set (i32 R600_TReg32_X:$dst), (load_param_zexti16 ADDRVTX_READ:$ptr))] + [(set i32:$dst, (load_param_zexti16 ADDRVTX_READ:$ptr))] >; def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0, - [(set (i32 R600_TReg32_X:$dst), (load_param ADDRVTX_READ:$ptr))] + [(set i32:$dst, (load_param ADDRVTX_READ:$ptr))] >; def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0, - [(set (v4i32 R600_Reg128:$dst), (load_param ADDRVTX_READ:$ptr))] + [(set v4i32:$dst, (load_param ADDRVTX_READ:$ptr))] >; //===----------------------------------------------------------------------===// @@ -1498,17 +1918,17 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0, // 8-bit reads def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1, - [(set (i32 R600_TReg32_X:$dst), (zextloadi8_global ADDRVTX_READ:$ptr))] + [(set i32:$dst, (zextloadi8_global ADDRVTX_READ:$ptr))] >; // 32-bit reads def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1, - [(set (i32 R600_TReg32_X:$dst), (global_load ADDRVTX_READ:$ptr))] + [(set i32:$dst, (global_load ADDRVTX_READ:$ptr))] >; // 128-bit reads def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1, - [(set (v4i32 R600_Reg128:$dst), (global_load ADDRVTX_READ:$ptr))] + [(set v4i32:$dst, (global_load ADDRVTX_READ:$ptr))] >; //===----------------------------------------------------------------------===// @@ -1517,7 +1937,7 @@ def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1, //===----------------------------------------------------------------------===// def CONSTANT_LOAD_eg : VTX_READ_32_eg <1, - [(set (i32 R600_TReg32_X:$dst), (constant_load ADDRVTX_READ:$ptr))] + [(set i32:$dst, (constant_load ADDRVTX_READ:$ptr))] >; } @@ -1540,28 +1960,34 @@ def MULLO_UINT_cm : MULLO_UINT_Common<0x91>; def MULHI_UINT_cm : MULHI_UINT_Common<0x92>; def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>; def EXP_IEEE_cm : EXP_IEEE_Common<0x81>; -def LOG_IEEE_ : LOG_IEEE_Common<0x83>; +def LOG_IEEE_cm : LOG_IEEE_Common<0x83>; def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>; def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>; def SIN_cm : SIN_Common<0x8D>; def COS_cm : COS_Common<0x8E>; } // End isVector = 1 +def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>; def : SIN_PAT <SIN_cm>; def : COS_PAT <COS_cm>; defm DIV_cm : DIV_Common<RECIP_IEEE_cm>; // RECIP_UINT emulation for Cayman +// The multiplication scales from [0,1] to the unsigned integer range def : Pat < - (AMDGPUurecip R600_Reg32:$src0), - (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg R600_Reg32:$src0)), - (MOV_IMM_I32 0x4f800000))) + (AMDGPUurecip i32:$src0), + (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)), + (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1))) >; + def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> { + let ADDR = 0; + let POP_COUNT = 0; + let COUNT = 0; + } -def : Pat<(fsqrt R600_Reg32:$src), - (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm R600_Reg32:$src))>; +def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>; } // End isCayman @@ -1583,21 +2009,21 @@ def PREDICATED_BREAK : ILFormat<(outs), (ins GPRI32:$src), let isPseudo = 1 in { def PRED_X : InstR600 < - 0, (outs R600_Predicate_Bit:$dst), + (outs R600_Predicate_Bit:$dst), (ins R600_Reg32:$src0, i32imm:$src1, i32imm:$flags), "", [], NullALU> { let FlagOperandIdx = 3; } let isTerminator = 1, isBranch = 1 in { -def JUMP_COND : InstR600 <0x10, +def JUMP_COND : InstR600 < (outs), (ins brtarget:$target, R600_Predicate_Bit:$p), "JUMP $target ($p)", [], AnyALU >; -def JUMP : InstR600 <0x10, +def JUMP : InstR600 < (outs), (ins brtarget:$target), "JUMP $target", @@ -1624,20 +2050,28 @@ def MASK_WRITE : AMDGPUShaderInst < } // End mayLoad = 0, mayStore = 0, hasSideEffects = 1 -def TXD: AMDGPUShaderInst < +def TXD: InstR600 < (outs R600_Reg128:$dst), - (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget), + (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, + i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget), "TXD $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget", - [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$resourceId, imm:$samplerId, imm:$textureTarget))] ->; + [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2, + imm:$resourceId, imm:$samplerId, imm:$textureTarget))], + NullALU > { + let TEXInst = 1; +} -def TXD_SHADOW: AMDGPUShaderInst < +def TXD_SHADOW: InstR600 < (outs R600_Reg128:$dst), - (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget), + (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, + i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget), "TXD_SHADOW $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget", - [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))] ->; - + [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2, + imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))], + NullALU +> { + let TEXInst = 1; +} } // End isPseudo = 1 } // End usesCustomInserter = 1 @@ -1674,7 +2108,7 @@ def CONST_COPY : Instruction { def TEX_VTX_CONSTBUF : InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "VTX_READ_eg $dst, $ptr", - [(set R600_Reg128:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>, + [(set v4i32:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>, VTX_WORD1_GPR, VTX_WORD0 { let VC_INST = 0; @@ -1723,11 +2157,12 @@ def TEX_VTX_CONSTBUF : // VTX_WORD3 (Padding) // // Inst{127-96} = 0; + let VTXInst = 1; } def TEX_VTX_TEXBUF: InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr", - [(set R600_Reg128:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>, + [(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>, VTX_WORD1_GPR, VTX_WORD0 { let VC_INST = 0; @@ -1776,6 +2211,7 @@ let Inst{63-32} = Word1; // VTX_WORD3 (Padding) // // Inst{127-96} = 0; + let VTXInst = 1; } @@ -1852,9 +2288,8 @@ let isTerminator=1 in { // CND*_INT Pattterns for f32 True / False values class CND_INT_f32 <InstR600 cnd, CondCode cc> : Pat < - (selectcc (i32 R600_Reg32:$src0), 0, (f32 R600_Reg32:$src1), - R600_Reg32:$src2, cc), - (cnd R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2) + (selectcc i32:$src0, 0, f32:$src1, f32:$src2, cc), + (cnd $src0, $src1, $src2) >; def : CND_INT_f32 <CNDE_INT, SETEQ>; @@ -1863,9 +2298,8 @@ def : CND_INT_f32 <CNDGE_INT, SETGE>; //CNDGE_INT extra pattern def : Pat < - (selectcc (i32 R600_Reg32:$src0), -1, (i32 R600_Reg32:$src1), - (i32 R600_Reg32:$src2), COND_GT), - (CNDGE_INT R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2) + (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_GT), + (CNDGE_INT $src0, $src1, $src2) >; // KIL Patterns @@ -1875,56 +2309,56 @@ def KILP : Pat < >; def KIL : Pat < - (int_AMDGPU_kill R600_Reg32:$src0), - (MASK_WRITE (KILLGT (f32 ZERO), (f32 R600_Reg32:$src0))) + (int_AMDGPU_kill f32:$src0), + (MASK_WRITE (KILLGT (f32 ZERO), $src0)) >; // SGT Reverse args def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LT), - (SGT R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LT), + (SGT $src1, $src0) >; // SGE Reverse args def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LE), - (SGE R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LE), + (SGE $src1, $src0) >; // SETGT_DX10 reverse args def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, COND_LT), - (SETGT_DX10 R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc f32:$src0, f32:$src1, -1, 0, COND_LT), + (SETGT_DX10 $src1, $src0) >; // SETGE_DX10 reverse args def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, COND_LE), - (SETGE_DX10 R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc f32:$src0, f32:$src1, -1, 0, COND_LE), + (SETGE_DX10 $src1, $src0) >; // SETGT_INT reverse args def : Pat < - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLT), - (SETGT_INT R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc i32:$src0, i32:$src1, -1, 0, SETLT), + (SETGT_INT $src1, $src0) >; // SETGE_INT reverse args def : Pat < - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLE), - (SETGE_INT R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc i32:$src0, i32:$src1, -1, 0, SETLE), + (SETGE_INT $src1, $src0) >; // SETGT_UINT reverse args def : Pat < - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULT), - (SETGT_UINT R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc i32:$src0, i32:$src1, -1, 0, SETULT), + (SETGT_UINT $src1, $src0) >; // SETGE_UINT reverse args def : Pat < - (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULE), - (SETGE_UINT R600_Reg32:$src1, R600_Reg32:$src0) + (selectcc i32:$src0, i32:$src1, -1, 0, SETULE), + (SETGE_UINT $src1, $src0) >; // The next two patterns are special cases for handling 'true if ordered' and @@ -1937,50 +2371,50 @@ def : Pat < //SETE - 'true if ordered' def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETO), - (SETE R600_Reg32:$src0, R600_Reg32:$src1) + (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETO), + (SETE $src0, $src1) >; //SETE_DX10 - 'true if ordered' def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETO), - (SETE_DX10 R600_Reg32:$src0, R600_Reg32:$src1) + (selectcc f32:$src0, f32:$src1, -1, 0, SETO), + (SETE_DX10 $src0, $src1) >; //SNE - 'true if unordered' def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETUO), - (SNE R600_Reg32:$src0, R600_Reg32:$src1) + (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETUO), + (SNE $src0, $src1) >; //SETNE_DX10 - 'true if ordered' def : Pat < - (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUO), - (SETNE_DX10 R600_Reg32:$src0, R600_Reg32:$src1) + (selectcc f32:$src0, f32:$src1, -1, 0, SETUO), + (SETNE_DX10 $src0, $src1) >; -def : Extract_Element <f32, v4f32, R600_Reg128, 0, sub0>; -def : Extract_Element <f32, v4f32, R600_Reg128, 1, sub1>; -def : Extract_Element <f32, v4f32, R600_Reg128, 2, sub2>; -def : Extract_Element <f32, v4f32, R600_Reg128, 3, sub3>; +def : Extract_Element <f32, v4f32, 0, sub0>; +def : Extract_Element <f32, v4f32, 1, sub1>; +def : Extract_Element <f32, v4f32, 2, sub2>; +def : Extract_Element <f32, v4f32, 3, sub3>; -def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 0, sub0>; -def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 1, sub1>; -def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 2, sub2>; -def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 3, sub3>; +def : Insert_Element <f32, v4f32, 0, sub0>; +def : Insert_Element <f32, v4f32, 1, sub1>; +def : Insert_Element <f32, v4f32, 2, sub2>; +def : Insert_Element <f32, v4f32, 3, sub3>; -def : Extract_Element <i32, v4i32, R600_Reg128, 0, sub0>; -def : Extract_Element <i32, v4i32, R600_Reg128, 1, sub1>; -def : Extract_Element <i32, v4i32, R600_Reg128, 2, sub2>; -def : Extract_Element <i32, v4i32, R600_Reg128, 3, sub3>; +def : Extract_Element <i32, v4i32, 0, sub0>; +def : Extract_Element <i32, v4i32, 1, sub1>; +def : Extract_Element <i32, v4i32, 2, sub2>; +def : Extract_Element <i32, v4i32, 3, sub3>; -def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 0, sub0>; -def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 1, sub1>; -def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 2, sub2>; -def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 3, sub3>; +def : Insert_Element <i32, v4i32, 0, sub0>; +def : Insert_Element <i32, v4i32, 1, sub1>; +def : Insert_Element <i32, v4i32, 2, sub2>; +def : Insert_Element <i32, v4i32, 3, sub3>; -def : Vector4_Build <v4f32, R600_Reg128, f32, R600_Reg32>; -def : Vector4_Build <v4i32, R600_Reg128, i32, R600_Reg32>; +def : Vector4_Build <v4f32, f32>; +def : Vector4_Build <v4i32, i32>; // bitconvert patterns diff --git a/lib/Target/R600/R600MachineFunctionInfo.cpp b/lib/Target/R600/R600MachineFunctionInfo.cpp index b07a585..018b403 100644 --- a/lib/Target/R600/R600MachineFunctionInfo.cpp +++ b/lib/Target/R600/R600MachineFunctionInfo.cpp @@ -13,5 +13,6 @@ using namespace llvm; R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF) - : MachineFunctionInfo() { - } + : AMDGPUMachineFunction(MF) { } + + diff --git a/lib/Target/R600/R600MachineFunctionInfo.h b/lib/Target/R600/R600MachineFunctionInfo.h index 13a46b8..70fddbb 100644 --- a/lib/Target/R600/R600MachineFunctionInfo.h +++ b/lib/Target/R600/R600MachineFunctionInfo.h @@ -14,18 +14,18 @@ #define R600MACHINEFUNCTIONINFO_H #include "llvm/ADT/BitVector.h" -#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "AMDGPUMachineFunction.h" #include <vector> namespace llvm { -class R600MachineFunctionInfo : public MachineFunctionInfo { - +class R600MachineFunctionInfo : public AMDGPUMachineFunction { public: R600MachineFunctionInfo(const MachineFunction &MF); SmallVector<unsigned, 4> LiveOuts; std::vector<unsigned> IndirectRegs; + unsigned StackSize; }; } // End llvm namespace diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp index 9074364..a777142 100644 --- a/lib/Target/R600/R600MachineScheduler.cpp +++ b/lib/Target/R600/R600MachineScheduler.cpp @@ -37,7 +37,7 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { CurInstKind = IDOther; CurEmitted = 0; OccupedSlotsMask = 15; - InstKindLimit[IDAlu] = 120; // 120 minus 8 for security + InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp new file mode 100644 index 0000000..05e96f1 --- /dev/null +++ b/lib/Target/R600/R600Packetizer.cpp @@ -0,0 +1,446 @@ +//===----- R600Packetizer.cpp - VLIW packetizer ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This pass implements instructions packetization for R600. It unsets isLast +/// bit of instructions inside a bundle and substitutes src register with +/// PreviousVector when applicable. +// +//===----------------------------------------------------------------------===// + +#ifndef R600PACKETIZER_CPP +#define R600PACKETIZER_CPP + +#define DEBUG_TYPE "packets" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/CodeGen/DFAPacketizer.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include "AMDGPU.h" +#include "R600InstrInfo.h" + +namespace llvm { + +class R600Packetizer : public MachineFunctionPass { + +public: + static char ID; + R600Packetizer(const TargetMachine &TM) : MachineFunctionPass(ID) {} + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addRequired<MachineDominatorTree>(); + AU.addPreserved<MachineDominatorTree>(); + AU.addRequired<MachineLoopInfo>(); + AU.addPreserved<MachineLoopInfo>(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + const char *getPassName() const { + return "R600 Packetizer"; + } + + bool runOnMachineFunction(MachineFunction &Fn); +}; +char R600Packetizer::ID = 0; + +class R600PacketizerList : public VLIWPacketizerList { + +private: + const R600InstrInfo *TII; + const R600RegisterInfo &TRI; + + enum BankSwizzle { + ALU_VEC_012 = 0, + ALU_VEC_021, + ALU_VEC_120, + ALU_VEC_102, + ALU_VEC_201, + ALU_VEC_210 + }; + + unsigned getSlot(const MachineInstr *MI) const { + return TRI.getHWRegChan(MI->getOperand(0).getReg()); + } + + std::vector<unsigned> getPreviousVector(MachineBasicBlock::iterator I) const { + std::vector<unsigned> Result; + I--; + if (!TII->isALUInstr(I->getOpcode()) && !I->isBundle()) + return Result; + MachineBasicBlock::instr_iterator BI = I.getInstrIterator(); + if (I->isBundle()) + BI++; + while (BI->isBundledWithPred() && !TII->isPredicated(BI)) { + int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE); + if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm()) + Result.push_back(BI->getOperand(0).getReg()); + BI++; + } + return Result; + } + + void substitutePV(MachineInstr *MI, const std::vector<unsigned> &PV) const { + R600Operands::Ops Ops[] = { + R600Operands::SRC0, + R600Operands::SRC1, + R600Operands::SRC2 + }; + for (unsigned i = 0; i < 3; i++) { + int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]); + if (OperandIdx < 0) + continue; + unsigned Src = MI->getOperand(OperandIdx).getReg(); + for (unsigned j = 0, e = PV.size(); j < e; j++) { + if (Src == PV[j]) { + unsigned Chan = TRI.getHWRegChan(Src); + unsigned PVReg; + switch (Chan) { + case 0: + PVReg = AMDGPU::PV_X; + break; + case 1: + PVReg = AMDGPU::PV_Y; + break; + case 2: + PVReg = AMDGPU::PV_Z; + break; + case 3: + PVReg = AMDGPU::PV_W; + break; + default: + llvm_unreachable("Invalid Chan"); + } + MI->getOperand(OperandIdx).setReg(PVReg); + break; + } + } + } + } +public: + // Ctor. + R600PacketizerList(MachineFunction &MF, MachineLoopInfo &MLI, + MachineDominatorTree &MDT) + : VLIWPacketizerList(MF, MLI, MDT, true), + TII (static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo())), + TRI(TII->getRegisterInfo()) { } + + // initPacketizerState - initialize some internal flags. + void initPacketizerState() { } + + // ignorePseudoInstruction - Ignore bundling of pseudo instructions. + bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB) { + return false; + } + + // isSoloInstruction - return true if instruction MI can not be packetized + // with any other instruction, which means that MI itself is a packet. + bool isSoloInstruction(MachineInstr *MI) { + if (TII->isVector(*MI)) + return true; + if (!TII->isALUInstr(MI->getOpcode())) + return true; + if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TRANS_ONLY) + return true; + if (TII->isTransOnly(MI)) + return true; + return false; + } + + // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ + // together. + bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { + MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr(); + if (getSlot(MII) <= getSlot(MIJ)) + return false; + // Does MII and MIJ share the same pred_sel ? + int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL), + OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL); + unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0, + PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0; + if (PredI != PredJ) + return false; + if (SUJ->isSucc(SUI)) { + for (unsigned i = 0, e = SUJ->Succs.size(); i < e; ++i) { + const SDep &Dep = SUJ->Succs[i]; + if (Dep.getSUnit() != SUI) + continue; + if (Dep.getKind() == SDep::Anti) + continue; + if (Dep.getKind() == SDep::Output) + if (MII->getOperand(0).getReg() != MIJ->getOperand(0).getReg()) + continue; + return false; + } + } + return true; + } + + // isLegalToPruneDependencies - Is it legal to prune dependece between SUI + // and SUJ. + bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;} + + void setIsLastBit(MachineInstr *MI, unsigned Bit) const { + unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST); + MI->getOperand(LastOp).setImm(Bit); + } + + MachineBasicBlock::iterator addToPacket(MachineInstr *MI) { + CurrentPacketMIs.push_back(MI); + bool FitsConstLimits = TII->canBundle(CurrentPacketMIs); + DEBUG( + if (!FitsConstLimits) { + dbgs() << "Couldn't pack :\n"; + MI->dump(); + dbgs() << "with the following packets :\n"; + for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) { + CurrentPacketMIs[i]->dump(); + dbgs() << "\n"; + } + dbgs() << "because of Consts read limitations\n"; + }); + const std::vector<unsigned> &PV = getPreviousVector(MI); + bool FitsReadPortLimits = fitsReadPortLimitation(CurrentPacketMIs, PV); + DEBUG( + if (!FitsReadPortLimits) { + dbgs() << "Couldn't pack :\n"; + MI->dump(); + dbgs() << "with the following packets :\n"; + for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) { + CurrentPacketMIs[i]->dump(); + dbgs() << "\n"; + } + dbgs() << "because of Read port limitations\n"; + }); + bool isBundlable = FitsConstLimits && FitsReadPortLimits; + CurrentPacketMIs.pop_back(); + if (!isBundlable) { + endPacket(MI->getParent(), MI); + substitutePV(MI, getPreviousVector(MI)); + return VLIWPacketizerList::addToPacket(MI); + } + if (!CurrentPacketMIs.empty()) + setIsLastBit(CurrentPacketMIs.back(), 0); + substitutePV(MI, PV); + return VLIWPacketizerList::addToPacket(MI); + } +private: + std::vector<std::pair<int, unsigned> > + ExtractSrcs(const MachineInstr *MI, const std::vector<unsigned> &PV) const { + R600Operands::Ops Ops[] = { + R600Operands::SRC0, + R600Operands::SRC1, + R600Operands::SRC2 + }; + std::vector<std::pair<int, unsigned> > Result; + for (unsigned i = 0; i < 3; i++) { + int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]); + if (OperandIdx < 0){ + Result.push_back(std::pair<int, unsigned>(-1,0)); + continue; + } + unsigned Src = MI->getOperand(OperandIdx).getReg(); + if (std::find(PV.begin(), PV.end(), Src) != PV.end()) { + Result.push_back(std::pair<int, unsigned>(-1,0)); + continue; + } + unsigned Reg = TRI.getEncodingValue(Src) & 0xff; + if (Reg > 127) { + Result.push_back(std::pair<int, unsigned>(-1,0)); + continue; + } + unsigned Chan = TRI.getHWRegChan(Src); + Result.push_back(std::pair<int, unsigned>(Reg, Chan)); + } + return Result; + } + + std::vector<std::pair<int, unsigned> > + Swizzle(std::vector<std::pair<int, unsigned> > Src, + BankSwizzle Swz) const { + switch (Swz) { + case ALU_VEC_012: + break; + case ALU_VEC_021: + std::swap(Src[1], Src[2]); + break; + case ALU_VEC_102: + std::swap(Src[0], Src[1]); + break; + case ALU_VEC_120: + std::swap(Src[0], Src[1]); + std::swap(Src[0], Src[2]); + break; + case ALU_VEC_201: + std::swap(Src[0], Src[2]); + std::swap(Src[0], Src[1]); + break; + case ALU_VEC_210: + std::swap(Src[0], Src[2]); + break; + } + return Src; + } + + bool isLegal(const std::vector<MachineInstr *> &IG, + const std::vector<BankSwizzle> &Swz, + const std::vector<unsigned> &PV) const { + assert (Swz.size() == IG.size()); + int Vector[4][3]; + memset(Vector, -1, sizeof(Vector)); + for (unsigned i = 0, e = IG.size(); i < e; i++) { + const std::vector<std::pair<int, unsigned> > &Srcs = + Swizzle(ExtractSrcs(IG[i], PV), Swz[i]); + for (unsigned j = 0; j < 3; j++) { + const std::pair<int, unsigned> &Src = Srcs[j]; + if (Src.first < 0) + continue; + if (Vector[Src.second][j] < 0) + Vector[Src.second][j] = Src.first; + if (Vector[Src.second][j] != Src.first) + return false; + } + } + return true; + } + + bool recursiveFitsFPLimitation( + std::vector<MachineInstr *> IG, + const std::vector<unsigned> &PV, + std::vector<BankSwizzle> &SwzCandidate, + std::vector<MachineInstr *> CurrentlyChecked) + const { + if (!isLegal(CurrentlyChecked, SwzCandidate, PV)) + return false; + if (IG.size() == CurrentlyChecked.size()) { + return true; + } + BankSwizzle AvailableSwizzle[] = { + ALU_VEC_012, + ALU_VEC_021, + ALU_VEC_120, + ALU_VEC_102, + ALU_VEC_201, + ALU_VEC_210 + }; + CurrentlyChecked.push_back(IG[CurrentlyChecked.size()]); + for (unsigned i = 0; i < 6; i++) { + SwzCandidate.push_back(AvailableSwizzle[i]); + if (recursiveFitsFPLimitation(IG, PV, SwzCandidate, CurrentlyChecked)) + return true; + SwzCandidate.pop_back(); + } + return false; + } + + bool fitsReadPortLimitation( + std::vector<MachineInstr *> IG, + const std::vector<unsigned> &PV) + const { + //Todo : support shared src0 - src1 operand + std::vector<BankSwizzle> SwzCandidate; + bool Result = recursiveFitsFPLimitation(IG, PV, SwzCandidate, + std::vector<MachineInstr *>()); + if (!Result) + return false; + for (unsigned i = 0, e = IG.size(); i < e; i++) { + MachineInstr *MI = IG[i]; + unsigned Op = TII->getOperandIdx(MI->getOpcode(), + R600Operands::BANK_SWIZZLE); + MI->getOperand(Op).setImm(SwzCandidate[i]); + } + return true; + } +}; + +bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { + const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); + MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); + MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); + + // Instantiate the packetizer. + R600PacketizerList Packetizer(Fn, MLI, MDT); + + // DFA state table should not be empty. + assert(Packetizer.getResourceTracker() && "Empty DFA table!"); + + // + // Loop over all basic blocks and remove KILL pseudo-instructions + // These instructions confuse the dependence analysis. Consider: + // D0 = ... (Insn 0) + // R0 = KILL R0, D0 (Insn 1) + // R0 = ... (Insn 2) + // Here, Insn 1 will result in the dependence graph not emitting an output + // dependence between Insn 0 and Insn 2. This can lead to incorrect + // packetization + // + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); + MBB != MBBe; ++MBB) { + MachineBasicBlock::iterator End = MBB->end(); + MachineBasicBlock::iterator MI = MBB->begin(); + while (MI != End) { + if (MI->isKill()) { + MachineBasicBlock::iterator DeleteMI = MI; + ++MI; + MBB->erase(DeleteMI); + End = MBB->end(); + continue; + } + ++MI; + } + } + + // Loop over all of the basic blocks. + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); + MBB != MBBe; ++MBB) { + // Find scheduling regions and schedule / packetize each region. + unsigned RemainingCount = MBB->size(); + for(MachineBasicBlock::iterator RegionEnd = MBB->end(); + RegionEnd != MBB->begin();) { + // The next region starts above the previous region. Look backward in the + // instruction stream until we find the nearest boundary. + MachineBasicBlock::iterator I = RegionEnd; + for(;I != MBB->begin(); --I, --RemainingCount) { + if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn)) + break; + } + I = MBB->begin(); + + // Skip empty scheduling regions. + if (I == RegionEnd) { + RegionEnd = llvm::prior(RegionEnd); + --RemainingCount; + continue; + } + // Skip regions with one instruction. + if (I == llvm::prior(RegionEnd)) { + RegionEnd = llvm::prior(RegionEnd); + continue; + } + + Packetizer.PacketizeMIs(MBB, I, RegionEnd); + RegionEnd = I; + } + } + + return true; + +} + +} + +llvm::FunctionPass *llvm::createR600Packetizer(TargetMachine &tm) { + return new R600Packetizer(tm); +} + +#endif // R600PACKETIZER_CPP diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td index ce5994c..5a2e65c 100644 --- a/lib/Target/R600/R600RegisterInfo.td +++ b/lib/Target/R600/R600RegisterInfo.td @@ -43,6 +43,37 @@ foreach Index = 0-127 in { Index>; } +// KCACHE_BANK0 +foreach Index = 159-128 in { + foreach Chan = [ "X", "Y", "Z", "W" ] in { + // 32-bit Temporary Registers + def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#Index#"-128]."#Chan, Index, Chan>; + } + // 128-bit Temporary Registers + def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#Index#"-128].XYZW", + [!cast<Register>("KC0_"#Index#"_X"), + !cast<Register>("KC0_"#Index#"_Y"), + !cast<Register>("KC0_"#Index#"_Z"), + !cast<Register>("KC0_"#Index#"_W")], + Index>; +} + +// KCACHE_BANK1 +foreach Index = 191-160 in { + foreach Chan = [ "X", "Y", "Z", "W" ] in { + // 32-bit Temporary Registers + def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#Index#"-160]."#Chan, Index, Chan>; + } + // 128-bit Temporary Registers + def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#Index#"-160].XYZW", + [!cast<Register>("KC1_"#Index#"_X"), + !cast<Register>("KC1_"#Index#"_Y"), + !cast<Register>("KC1_"#Index#"_Z"), + !cast<Register>("KC1_"#Index#"_W")], + Index>; +} + + // Array Base Register holding input in FS foreach Index = 448-480 in { def ArrayBase#Index : R600Reg<"ARRAY_BASE", Index>; @@ -57,8 +88,14 @@ def NEG_ONE : R600Reg<"-1.0", 249>; def ONE_INT : R600Reg<"1", 250>; def HALF : R600Reg<"0.5", 252>; def NEG_HALF : R600Reg<"-0.5", 252>; -def ALU_LITERAL_X : R600Reg<"literal.x", 253>; -def PV_X : R600Reg<"pv.x", 254>; +def ALU_LITERAL_X : R600RegWithChan<"literal.x", 253, "X">; +def ALU_LITERAL_Y : R600RegWithChan<"literal.x", 253, "Y">; +def ALU_LITERAL_Z : R600RegWithChan<"literal.x", 253, "Z">; +def ALU_LITERAL_W : R600RegWithChan<"literal.x", 253, "W">; +def PV_X : R600RegWithChan<"PV.x", 254, "X">; +def PV_Y : R600RegWithChan<"PV.y", 254, "Y">; +def PV_Z : R600RegWithChan<"PV.z", 254, "Z">; +def PV_W : R600RegWithChan<"PV.w", 254, "W">; def PREDICATE_BIT : R600Reg<"PredicateBit", 0>; def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>; def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>; @@ -80,6 +117,38 @@ def R600_Addr : RegisterClass <"AMDGPU", [i32], 127, (add (sequence "Addr%u_X", } // End isAllocatable = 0 +def R600_KC0_X : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC0_%u_X", 128, 159))>; + +def R600_KC0_Y : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC0_%u_Y", 128, 159))>; + +def R600_KC0_Z : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC0_%u_Z", 128, 159))>; + +def R600_KC0_W : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC0_%u_W", 128, 159))>; + +def R600_KC0 : RegisterClass <"AMDGPU", [f32, i32], 32, + (interleave R600_KC0_X, R600_KC0_Y, + R600_KC0_Z, R600_KC0_W)>; + +def R600_KC1_X : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC1_%u_X", 160, 191))>; + +def R600_KC1_Y : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC1_%u_Y", 160, 191))>; + +def R600_KC1_Z : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC1_%u_Z", 160, 191))>; + +def R600_KC1_W : RegisterClass <"AMDGPU", [f32, i32], 32, + (add (sequence "KC1_%u_W", 160, 191))>; + +def R600_KC1 : RegisterClass <"AMDGPU", [f32, i32], 32, + (interleave R600_KC1_X, R600_KC1_Y, + R600_KC1_Z, R600_KC1_W)>; + def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32, (add (sequence "T%u_X", 0, 127), AR_X)>; diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/R600/R600Schedule.td index 7ede181..78a460a 100644 --- a/lib/Target/R600/R600Schedule.td +++ b/lib/Target/R600/R600Schedule.td @@ -24,7 +24,7 @@ def AnyALU : InstrItinClass; def VecALU : InstrItinClass; def TransALU : InstrItinClass; -def R600_EG_Itin : ProcessorItineraries < +def R600_VLIW5_Itin : ProcessorItineraries < [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL], [], [ @@ -34,3 +34,14 @@ def R600_EG_Itin : ProcessorItineraries < InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]> ] >; + +def R600_VLIW4_Itin : ProcessorItineraries < + [ALU_X, ALU_Y, ALU_Z, ALU_W, ALU_NULL], + [], + [ + InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>, + InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>, + InstrItinData<TransALU, [InstrStage<1, [ALU_NULL]>]>, + InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]> + ] +>; diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h new file mode 100644 index 0000000..716b093 --- /dev/null +++ b/lib/Target/R600/SIDefines.h @@ -0,0 +1,22 @@ +//===-- SIDefines.h - SI Helper Macros ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +/// \file +//===----------------------------------------------------------------------===// + +#ifndef SIDEFINES_H_ +#define SIDEFINES_H_ + +#define R_00B028_SPI_SHADER_PGM_RSRC1_PS 0x00B028 +#define R_00B128_SPI_SHADER_PGM_RSRC1_VS 0x00B128 +#define R_00B228_SPI_SHADER_PGM_RSRC1_GS 0x00B228 +#define R_00B848_COMPUTE_PGM_RSRC1 0x00B848 +#define S_00B028_VGPRS(x) (((x) & 0x3F) << 0) +#define S_00B028_SGPRS(x) (((x) & 0x0F) << 6) +#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC + +#endif // SIDEFINES_H_ diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 93f8c38..1a07aff 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -49,6 +49,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass); addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); + addRegisterClass(MVT::i128, &AMDGPU::SReg_128RegClass); addRegisterClass(MVT::v8i32, &AMDGPU::VReg_256RegClass); addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); @@ -70,11 +71,15 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + setOperationAction(ISD::STORE, MVT::i32, Custom); + setOperationAction(ISD::STORE, MVT::i64, Custom); + setTargetDAGCombine(ISD::SELECT_CC); setTargetDAGCombine(ISD::SETCC); - setSchedulingPreference(Sched::Source); + setSchedulingPreference(Sched::RegPressure); } SDValue SITargetLowering::LowerFormalArguments( @@ -208,28 +213,15 @@ SDValue SITargetLowering::LowerFormalArguments( MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MachineInstr * MI, MachineBasicBlock * BB) const { - MachineRegisterInfo & MRI = BB->getParent()->getRegInfo(); - MachineBasicBlock::iterator I = MI; switch (MI->getOpcode()) { default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); case AMDGPU::BRANCH: return BB; - case AMDGPU::SI_WQM: - LowerSI_WQM(MI, *BB, I, MRI); - break; } return BB; } -void SITargetLowering::LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB, - MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const { - BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WQM_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); - - MI->eraseFromParent(); -} - EVT SITargetLowering::getSetCCResultType(EVT VT) const { return MVT::i1; } @@ -247,6 +239,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); } return SDValue(); } @@ -345,6 +338,32 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, return Chain; } +#define RSRC_DATA_FORMAT 0xf00000000000 + +SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { + StoreSDNode *StoreNode = cast<StoreSDNode>(Op); + SDValue Chain = Op.getOperand(0); + SDValue Value = Op.getOperand(1); + SDValue VirtualAddress = Op.getOperand(2); + DebugLoc DL = Op.getDebugLoc(); + + if (StoreNode->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) { + return SDValue(); + } + + SDValue SrcSrc = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, + DAG.getConstant(0, MVT::i64), + DAG.getConstant(RSRC_DATA_FORMAT, MVT::i64)); + + SDValue Ops[2]; + Ops[0] = DAG.getNode(AMDGPUISD::BUFFER_STORE, DL, MVT::Other, Chain, + Value, SrcSrc, VirtualAddress); + Ops[1] = Chain; + + return DAG.getMergeValues(Ops, 2, DL); + +} + SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -437,9 +456,12 @@ int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { float F; } Imm; - if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) + if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) { + if (Node->getZExtValue() >> 32) { + return -1; + } Imm.I = Node->getSExtValue(); - else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) + } else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) Imm.F = Node->getValueAPF().convertToFloat(); else return -1; // It isn't an immediate @@ -497,22 +519,23 @@ bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op, MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); SDNode *Node = Op.getNode(); - int OpClass; + const TargetRegisterClass *OpClass; if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) { const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode()); - OpClass = Desc.OpInfo[Op.getResNo()].RegClass; + int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass; + if (OpClassID == -1) + OpClass = getRegClassFor(Op.getSimpleValueType()); + else + OpClass = TRI->getRegClass(OpClassID); } else if (Node->getOpcode() == ISD::CopyFromReg) { RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode()); - OpClass = MRI.getRegClass(Reg->getReg())->getID(); + OpClass = MRI.getRegClass(Reg->getReg()); } else return false; - if (OpClass == -1) - return false; - - return TRI->getRegClass(RegClass)->hasSubClassEq(TRI->getRegClass(OpClass)); + return TRI->getRegClass(RegClass)->hasSubClassEq(OpClass); } /// \brief Make sure that we don't exeed the number of allowed scalars @@ -546,8 +569,9 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, Operand = SDValue(Node, 0); } -SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, - SelectionDAG &DAG) const { +/// \brief Try to fold the Nodes operands into the Node +SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, + SelectionDAG &DAG) const { // Original encoding (either e32 or e64) int Opcode = Node->getMachineOpcode(); @@ -556,6 +580,13 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, unsigned NumDefs = Desc->getNumDefs(); unsigned NumOps = Desc->getNumOperands(); + // Commuted opcode if available + int OpcodeRev = Desc->isCommutable() ? TII->commuteOpcode(Opcode) : -1; + const MCInstrDesc *DescRev = OpcodeRev == -1 ? 0 : &TII->get(OpcodeRev); + + assert(!DescRev || DescRev->getNumDefs() == NumDefs); + assert(!DescRev || DescRev->getNumOperands() == NumOps); + // e64 version if available, -1 otherwise int OpcodeE64 = AMDGPU::getVOPe64(Opcode); const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64); @@ -608,41 +639,54 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, // Is this a VSrc or SSrc operand ? unsigned RegClass = Desc->OpInfo[Op].RegClass; - if (!isVSrc(RegClass) && !isSSrc(RegClass)) { + if (isVSrc(RegClass) || isSSrc(RegClass)) { + // Try to fold the immediates + if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) { + // Folding didn't worked, make sure we don't hit the SReg limit + ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed); + } + continue; + } + + if (i == 1 && DescRev && fitsRegClass(DAG, Ops[0], RegClass)) { - if (i == 1 && Desc->isCommutable() && - fitsRegClass(DAG, Ops[0], RegClass) && - foldImm(Ops[1], Immediate, ScalarSlotUsed)) { + unsigned OtherRegClass = Desc->OpInfo[NumDefs].RegClass; + assert(isVSrc(OtherRegClass) || isSSrc(OtherRegClass)); - assert(isVSrc(Desc->OpInfo[NumDefs].RegClass) || - isSSrc(Desc->OpInfo[NumDefs].RegClass)); + // Test if it makes sense to swap operands + if (foldImm(Ops[1], Immediate, ScalarSlotUsed) || + (!fitsRegClass(DAG, Ops[1], RegClass) && + fitsRegClass(DAG, Ops[1], OtherRegClass))) { // Swap commutable operands SDValue Tmp = Ops[1]; Ops[1] = Ops[0]; Ops[0] = Tmp; - } else if (DescE64 && !Immediate) { - // Test if it makes sense to switch to e64 encoding - - RegClass = DescE64->OpInfo[Op].RegClass; - int32_t TmpImm = -1; - if ((isVSrc(RegClass) || isSSrc(RegClass)) && - foldImm(Ops[i], TmpImm, ScalarSlotUsed)) { - - Immediate = -1; - Promote2e64 = true; - Desc = DescE64; - DescE64 = 0; - } + Desc = DescRev; + DescRev = 0; + continue; } - continue; } - // Try to fold the immediates - if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) { - // Folding didn't worked, make sure we don't hit the SReg limit - ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed); + if (DescE64 && !Immediate) { + + // Test if it makes sense to switch to e64 encoding + unsigned OtherRegClass = DescE64->OpInfo[Op].RegClass; + if (!isVSrc(OtherRegClass) && !isSSrc(OtherRegClass)) + continue; + + int32_t TmpImm = -1; + if (foldImm(Ops[i], TmpImm, ScalarSlotUsed) || + (!fitsRegClass(DAG, Ops[i], RegClass) && + fitsRegClass(DAG, Ops[1], OtherRegClass))) { + + // Switch to e64 encoding + Immediate = -1; + Promote2e64 = true; + Desc = DescE64; + DescE64 = 0; + } } } @@ -656,10 +700,118 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i) Ops.push_back(Node->getOperand(i)); - // Either create a complete new or update the current instruction - if (Promote2e64) - return DAG.getMachineNode(OpcodeE64, Node->getDebugLoc(), - Node->getVTList(), Ops.data(), Ops.size()); - else - return DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size()); + // Create a complete new instruction + return DAG.getMachineNode(Desc->Opcode, Node->getDebugLoc(), + Node->getVTList(), Ops); +} + +/// \brief Helper function for adjustWritemask +unsigned SubIdx2Lane(unsigned Idx) { + switch (Idx) { + default: return 0; + case AMDGPU::sub0: return 0; + case AMDGPU::sub1: return 1; + case AMDGPU::sub2: return 2; + case AMDGPU::sub3: return 3; + } +} + +/// \brief Adjust the writemask of MIMG instructions +void SITargetLowering::adjustWritemask(MachineSDNode *&Node, + SelectionDAG &DAG) const { + SDNode *Users[4] = { }; + unsigned Writemask = 0, Lane = 0; + + // Try to figure out the used register components + for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); + I != E; ++I) { + + // Abort if we can't understand the usage + if (!I->isMachineOpcode() || + I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) + return; + + Lane = SubIdx2Lane(I->getConstantOperandVal(1)); + + // Abort if we have more than one user per component + if (Users[Lane]) + return; + + Users[Lane] = *I; + Writemask |= 1 << Lane; + } + + // Abort if all components are used + if (Writemask == 0xf) + return; + + // Adjust the writemask in the node + std::vector<SDValue> Ops; + Ops.push_back(DAG.getTargetConstant(Writemask, MVT::i32)); + for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) + Ops.push_back(Node->getOperand(i)); + Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size()); + + // If we only got one lane, replace it with a copy + if (Writemask == (1U << Lane)) { + SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32); + SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, + DebugLoc(), MVT::f32, + SDValue(Node, 0), RC); + DAG.ReplaceAllUsesWith(Users[Lane], Copy); + return; + } + + // Update the users of the node with the new indices + for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { + + SDNode *User = Users[i]; + if (!User) + continue; + + SDValue Op = DAG.getTargetConstant(Idx, MVT::i32); + DAG.UpdateNodeOperands(User, User->getOperand(0), Op); + + switch (Idx) { + default: break; + case AMDGPU::sub0: Idx = AMDGPU::sub1; break; + case AMDGPU::sub1: Idx = AMDGPU::sub2; break; + case AMDGPU::sub2: Idx = AMDGPU::sub3; break; + } + } +} + +/// \brief Fold the instructions after slecting them +SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, + SelectionDAG &DAG) const { + + if (AMDGPU::isMIMG(Node->getMachineOpcode()) != -1) + adjustWritemask(Node, DAG); + + return foldOperands(Node, DAG); +} + +/// \brief Assign the register class depending on the number of +/// bits set in the writemask +void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, + SDNode *Node) const { + if (AMDGPU::isMIMG(MI->getOpcode()) == -1) + return; + + unsigned VReg = MI->getOperand(0).getReg(); + unsigned Writemask = MI->getOperand(1).getImm(); + unsigned BitsSet = 0; + for (unsigned i = 0; i < 4; ++i) + BitsSet += Writemask & (1 << i) ? 1 : 0; + + const TargetRegisterClass *RC; + switch (BitsSet) { + default: return; + case 1: RC = &AMDGPU::VReg_32RegClass; break; + case 2: RC = &AMDGPU::VReg_64RegClass; break; + case 3: RC = &AMDGPU::VReg_96RegClass; break; + } + + MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); + MRI.setRegClass(VReg, RC); } diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h index d656225..de637be 100644 --- a/lib/Target/R600/SIISelLowering.h +++ b/lib/Target/R600/SIISelLowering.h @@ -24,9 +24,7 @@ class SITargetLowering : public AMDGPUTargetLowering { const SIInstrInfo * TII; const TargetRegisterInfo * TRI; - void LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB, - MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const; - + SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; @@ -36,6 +34,9 @@ class SITargetLowering : public AMDGPUTargetLowering { void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, unsigned RegClass, bool &ScalarSlotUsed) const; + SDNode *foldOperands(MachineSDNode *N, SelectionDAG &DAG) const; + void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const; + public: SITargetLowering(TargetMachine &tm); @@ -52,6 +53,8 @@ public: virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const; + virtual void AdjustInstrPostInstrSelection(MachineInstr *MI, + SDNode *Node) const; int32_t analyzeImmediate(const SDNode *N) const; }; diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td index 3891ddb..f737ddd 100644 --- a/lib/Target/R600/SIInstrFormats.td +++ b/lib/Target/R600/SIInstrFormats.td @@ -284,33 +284,33 @@ let Uses = [EXEC] in { class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> : Enc64<outs, ins, asm, pattern> { - bits<8> VDATA; - bits<12> OFFSET; - bits<1> OFFEN; - bits<1> IDXEN; - bits<1> GLC; - bits<1> ADDR64; - bits<1> LDS; - bits<8> VADDR; - bits<7> SRSRC; - bits<1> SLC; - bits<1> TFE; - bits<8> SOFFSET; - - let Inst{11-0} = OFFSET; - let Inst{12} = OFFEN; - let Inst{13} = IDXEN; - let Inst{14} = GLC; - let Inst{15} = ADDR64; - let Inst{16} = LDS; + bits<12> offset; + bits<1> offen; + bits<1> idxen; + bits<1> glc; + bits<1> addr64; + bits<1> lds; + bits<8> vaddr; + bits<8> vdata; + bits<7> srsrc; + bits<1> slc; + bits<1> tfe; + bits<8> soffset; + + let Inst{11-0} = offset; + let Inst{12} = offen; + let Inst{13} = idxen; + let Inst{14} = glc; + let Inst{15} = addr64; + let Inst{16} = lds; let Inst{24-18} = op; let Inst{31-26} = 0x38; //encoding - let Inst{39-32} = VADDR; - let Inst{47-40} = VDATA; - let Inst{52-48} = SRSRC{6-2}; - let Inst{54} = SLC; - let Inst{55} = TFE; - let Inst{63-56} = SOFFSET; + let Inst{39-32} = vaddr; + let Inst{47-40} = vdata; + let Inst{52-48} = srsrc{6-2}; + let Inst{54} = slc; + let Inst{55} = tfe; + let Inst{63-56} = soffset; let VM_CNT = 1; let EXP_CNT = 1; diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index de2373b..9a04c60 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -58,6 +58,10 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0 }; + const int16_t Sub0_2[] = { + AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0 + }; + const int16_t Sub0_1[] = { AMDGPU::sub0, AMDGPU::sub1, 0 }; @@ -65,6 +69,26 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, unsigned Opcode; const int16_t *SubIndices; + if (AMDGPU::M0 == DestReg) { + // Check if M0 isn't already set to this value + for (MachineBasicBlock::reverse_iterator E = MBB.rend(), + I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) { + + if (!I->definesRegister(AMDGPU::M0)) + continue; + + unsigned Opc = I->getOpcode(); + if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32) + break; + + if (!I->readsRegister(SrcReg)) + break; + + // The copy isn't necessary + return; + } + } + if (AMDGPU::SReg_32RegClass.contains(DestReg)) { assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) @@ -105,6 +129,11 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opcode = AMDGPU::V_MOV_B32_e32; SubIndices = Sub0_1; + } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { + assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); + Opcode = AMDGPU::V_MOV_B32_e32; + SubIndices = Sub0_2; + } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || AMDGPU::SReg_128RegClass.contains(SrcReg)); @@ -138,6 +167,21 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } } +unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const { + + int NewOpc; + + // Try to map original to commuted opcode + if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1) + return NewOpc; + + // Try to map commuted to original opcode + if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1) + return NewOpc; + + return Opcode; +} + MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { @@ -145,7 +189,12 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, !MI->getOperand(2).isReg()) return 0; - return TargetInstrInfo::commuteInstruction(MI, NewMI); + MI = TargetInstrInfo::commuteInstruction(MI, NewMI); + + if (MI) + MI->setDesc(get(commuteOpcode(MI->getOpcode()))); + + return MI; } MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg, diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h index 5789af5..87eff4d 100644 --- a/lib/Target/R600/SIInstrInfo.h +++ b/lib/Target/R600/SIInstrInfo.h @@ -35,6 +35,8 @@ public: unsigned DestReg, unsigned SrcReg, bool KillSrc) const; + unsigned commuteOpcode(unsigned Opcode) const; + virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI=false) const; @@ -76,6 +78,9 @@ public: namespace AMDGPU { int getVOPe64(uint16_t Opcode); + int getCommuteRev(uint16_t Opcode); + int getCommuteOrig(uint16_t Opcode); + int isMIMG(uint16_t Opcode); } // End namespace AMDGPU diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td index 2f10c38..aafc331 100644 --- a/lib/Target/R600/SIInstrInfo.td +++ b/lib/Target/R600/SIInstrInfo.td @@ -26,6 +26,10 @@ def HI32 : SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32); }]>; +def SIbuffer_store : SDNode<"AMDGPUISD::BUFFER_STORE", + SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>, + [SDNPHasChain, SDNPMayStore]>; + def IMM8bitDWORD : ImmLeaf < i32, [{ return (Imm & ~0x3FC) == 0; @@ -138,6 +142,11 @@ class VOP <string opName> { string OpName = opName; } +class VOP2_REV <string revOp, bit isOrig> { + string RevOp = revOp; + bit IsOrig = isOrig; +} + multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src, string opName, list<dag> pattern> { @@ -166,11 +175,11 @@ multiclass VOP1_64 <bits<8> op, string opName, list<dag> pattern> : VOP1_Helper <op, VReg_64, VSrc_64, opName, pattern>; multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc, - string opName, list<dag> pattern> { + string opName, list<dag> pattern, string revOp> { def _e32 : VOP2 < op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1), opName#"_e32 $dst, $src0, $src1", pattern - >, VOP <opName>; + >, VOP <opName>, VOP2_REV<revOp#"_e32", !eq(revOp, opName)>; def _e64 : VOP3 < {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, @@ -179,23 +188,26 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc, i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] - >, VOP <opName> { + >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> { let SRC2 = SIOperand.ZERO; } } -multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern> - : VOP2_Helper <op, VReg_32, VSrc_32, opName, pattern>; +multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern, + string revOp = opName> + : VOP2_Helper <op, VReg_32, VSrc_32, opName, pattern, revOp>; -multiclass VOP2_64 <bits<6> op, string opName, list<dag> pattern> - : VOP2_Helper <op, VReg_64, VSrc_64, opName, pattern>; +multiclass VOP2_64 <bits<6> op, string opName, list<dag> pattern, + string revOp = opName> + : VOP2_Helper <op, VReg_64, VSrc_64, opName, pattern, revOp>; -multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> { +multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern, + string revOp = opName> { def _e32 : VOP2 < op, (outs VReg_32:$dst), (ins VSrc_32:$src0, VReg_32:$src1), opName#"_e32 $dst, $src0, $src1", pattern - >, VOP <opName>; + >, VOP <opName>, VOP2_REV<revOp#"_e32", !eq(revOp, opName)>; def _e64 : VOP3b < {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, @@ -204,7 +216,7 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> { i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] - >, VOP <opName> { + >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> { let SRC2 = SIOperand.ZERO; /* the VOP2 variant puts the carry out into VCC, the VOP3 variant can write it into any SGPR. We currently don't use the carry out, @@ -247,14 +259,14 @@ multiclass VOPC_64 <bits<8> op, string opName, class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 < op, (outs VReg_32:$dst), (ins VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2, - i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), + InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg), opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern >, VOP <opName>; class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 < op, (outs VReg_64:$dst), (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2, - i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), + InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg), opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern >, VOP <opName>; @@ -277,17 +289,39 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU class MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> : MUBUF < op, - (outs regClass:$dst), + (outs regClass:$vdata), (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, i1imm:$lds, VReg_32:$vaddr, SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset), - asm#" $dst, $offset, $offen, $idxen, $glc, $addr64, " + asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, " #"$lds, $vaddr, $srsrc, $slc, $tfe, $soffset", []> { let mayLoad = 1; let mayStore = 0; } +class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass, + ValueType VT> : + MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr), + name#" $vdata, $srsrc + $vaddr", + [(SIbuffer_store (VT vdataClass:$vdata), (i128 SReg_128:$srsrc), + (i64 VReg_64:$vaddr))]> { + + let mayLoad = 0; + let mayStore = 1; + + // Encoding + let offset = 0; + let offen = 0; + let idxen = 0; + let glc = 0; + let addr64 = 1; + let lds = 0; + let slc = 0; + let tfe = 0; + let soffset = 128; // ZERO +} + class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF < op, (outs regClass:$dst), @@ -305,13 +339,14 @@ class MIMG_Load_Helper <bits<7> op, string asm> : MIMG < op, (outs VReg_128:$vdata), (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, - i1imm:$tfe, i1imm:$lwe, i1imm:$slc, VReg_32:$vaddr, + i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp), asm#" $vdata, $dmask, $unorm, $glc, $da, $r128," #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp", []> { let mayLoad = 1; let mayStore = 0; + let hasPostISelHook = 1; } //===----------------------------------------------------------------------===// @@ -327,4 +362,31 @@ def getVOPe64 : InstrMapping { let ValueCols = [["8"]]; } +// Maps an original opcode to its commuted version +def getCommuteRev : InstrMapping { + let FilterClass = "VOP2_REV"; + let RowFields = ["RevOp"]; + let ColFields = ["IsOrig"]; + let KeyCol = ["1"]; + let ValueCols = [["0"]]; +} + +// Maps an commuted opcode to its original version +def getCommuteOrig : InstrMapping { + let FilterClass = "VOP2_REV"; + let RowFields = ["RevOp"]; + let ColFields = ["IsOrig"]; + let KeyCol = ["0"]; + let ValueCols = [["1"]]; +} + +// Test if the supplied opcode is an MIMG instruction +def isMIMG : InstrMapping { + let FilterClass = "MIMG_Load_Helper"; + let RowFields = ["Inst"]; + let ColFields = ["Size"]; + let KeyCol = ["8"]; + let ValueCols = [["8"]]; +} + include "SIInstructions.td" diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index 05b04a9..3ff4548 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -108,7 +108,7 @@ VGPR0 = V_CNDMASK VCC, VGPR0, VGPR1 def S_CMPK_EQ_I32 : SOPK < 0x00000003, (outs SCCReg:$dst), (ins SReg_32:$src0, i32imm:$src1), "S_CMPK_EQ_I32", - [(set SCCReg:$dst, (setcc SReg_32:$src0, imm:$src1, SETEQ))] + [(set i1:$dst, (setcc i32:$src0, imm:$src1, SETEQ))] >; */ @@ -408,8 +408,14 @@ def BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", def BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>; //def BUFFER_STORE_BYTE : MUBUF_ <0x00000018, "BUFFER_STORE_BYTE", []>; //def BUFFER_STORE_SHORT : MUBUF_ <0x0000001a, "BUFFER_STORE_SHORT", []>; -//def BUFFER_STORE_DWORD : MUBUF_ <0x0000001c, "BUFFER_STORE_DWORD", []>; -//def BUFFER_STORE_DWORDX2 : MUBUF_DWORDX2 <0x0000001d, "BUFFER_STORE_DWORDX2", []>; + +def BUFFER_STORE_DWORD : MUBUF_Store_Helper < + 0x0000001c, "BUFFER_STORE_DWORD", VReg_32, i32 +>; + +def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper < + 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, i64 +>; //def BUFFER_STORE_DWORDX4 : MUBUF_DWORDX4 <0x0000001e, "BUFFER_STORE_DWORDX4", []>; //def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>; //def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>; @@ -594,12 +600,12 @@ defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>; //defm V_CVT_I32_F64 : VOP1_32 <0x00000003, "V_CVT_I32_F64", []>; //defm V_CVT_F64_I32 : VOP1_64 <0x00000004, "V_CVT_F64_I32", []>; defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32", - [(set VReg_32:$dst, (sint_to_fp VSrc_32:$src0))] + [(set f32:$dst, (sint_to_fp i32:$src0))] >; -//defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", []>; -//defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>; +defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", []>; +defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>; defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32", - [(set (i32 VReg_32:$dst), (fp_to_sint VSrc_32:$src0))] + [(set i32:$dst, (fp_to_sint f32:$src0))] >; defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>; ////def V_CVT_F16_F32 : VOP1_F16 <0x0000000a, "V_CVT_F16_F32", []>; @@ -616,35 +622,35 @@ defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>; //defm V_CVT_U32_F64 : VOP1_32 <0x00000015, "V_CVT_U32_F64", []>; //defm V_CVT_F64_U32 : VOP1_64 <0x00000016, "V_CVT_F64_U32", []>; defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32", - [(set VReg_32:$dst, (AMDGPUfract VSrc_32:$src0))] + [(set f32:$dst, (AMDGPUfract f32:$src0))] >; defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32", []>; defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32", - [(set VReg_32:$dst, (fceil VSrc_32:$src0))] + [(set f32:$dst, (fceil f32:$src0))] >; defm V_RNDNE_F32 : VOP1_32 <0x00000023, "V_RNDNE_F32", - [(set VReg_32:$dst, (frint VSrc_32:$src0))] + [(set f32:$dst, (frint f32:$src0))] >; defm V_FLOOR_F32 : VOP1_32 <0x00000024, "V_FLOOR_F32", - [(set VReg_32:$dst, (ffloor VSrc_32:$src0))] + [(set f32:$dst, (ffloor f32:$src0))] >; defm V_EXP_F32 : VOP1_32 <0x00000025, "V_EXP_F32", - [(set VReg_32:$dst, (fexp2 VSrc_32:$src0))] + [(set f32:$dst, (fexp2 f32:$src0))] >; defm V_LOG_CLAMP_F32 : VOP1_32 <0x00000026, "V_LOG_CLAMP_F32", []>; defm V_LOG_F32 : VOP1_32 <0x00000027, "V_LOG_F32", - [(set VReg_32:$dst, (flog2 VSrc_32:$src0))] + [(set f32:$dst, (flog2 f32:$src0))] >; defm V_RCP_CLAMP_F32 : VOP1_32 <0x00000028, "V_RCP_CLAMP_F32", []>; defm V_RCP_LEGACY_F32 : VOP1_32 <0x00000029, "V_RCP_LEGACY_F32", []>; defm V_RCP_F32 : VOP1_32 <0x0000002a, "V_RCP_F32", - [(set VReg_32:$dst, (fdiv FP_ONE, VSrc_32:$src0))] + [(set f32:$dst, (fdiv FP_ONE, f32:$src0))] >; defm V_RCP_IFLAG_F32 : VOP1_32 <0x0000002b, "V_RCP_IFLAG_F32", []>; defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32", []>; defm V_RSQ_LEGACY_F32 : VOP1_32 < 0x0000002d, "V_RSQ_LEGACY_F32", - [(set VReg_32:$dst, (int_AMDGPU_rsq VSrc_32:$src0))] + [(set f32:$dst, (int_AMDGPU_rsq f32:$src0))] >; defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>; defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", []>; @@ -787,14 +793,13 @@ def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst), (ins VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2, InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg), "V_CNDMASK_B32_e64 $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", - [(set (i32 VReg_32:$dst), (select (i1 SSrc_64:$src2), - VSrc_32:$src1, VSrc_32:$src0))] + [(set i32:$dst, (select i1:$src2, i32:$src1, i32:$src0))] >; //f32 pattern for V_CNDMASK_B32_e64 def : Pat < - (f32 (select (i1 SSrc_64:$src2), VSrc_32:$src1, VSrc_32:$src0)), - (V_CNDMASK_B32_e64 VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2) + (f32 (select i1:$src2, f32:$src1, f32:$src0)), + (V_CNDMASK_B32_e64 $src0, $src1, $src2) >; defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>; @@ -802,26 +807,26 @@ defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>; let isCommutable = 1 in { defm V_ADD_F32 : VOP2_32 <0x00000003, "V_ADD_F32", - [(set VReg_32:$dst, (fadd VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (fadd f32:$src0, f32:$src1))] >; -} // End isCommutable = 1 defm V_SUB_F32 : VOP2_32 <0x00000004, "V_SUB_F32", - [(set VReg_32:$dst, (fsub VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (fsub f32:$src0, f32:$src1))] >; +defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", [], "V_SUB_F32">; +} // End isCommutable = 1 -defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", []>; defm V_MAC_LEGACY_F32 : VOP2_32 <0x00000006, "V_MAC_LEGACY_F32", []>; let isCommutable = 1 in { defm V_MUL_LEGACY_F32 : VOP2_32 < 0x00000007, "V_MUL_LEGACY_F32", - [(set VReg_32:$dst, (int_AMDGPU_mul VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (int_AMDGPU_mul f32:$src0, f32:$src1))] >; defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", - [(set VReg_32:$dst, (fmul VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (fmul f32:$src0, f32:$src1))] >; } // End isCommutable = 1 @@ -834,11 +839,11 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", let isCommutable = 1 in { defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32", - [(set VReg_32:$dst, (AMDGPUfmin VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))] >; defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32", - [(set VReg_32:$dst, (AMDGPUfmax VSrc_32:$src0, VReg_32:$src1))] + [(set f32:$dst, (AMDGPUfmax f32:$src0, f32:$src1))] >; defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>; @@ -848,27 +853,29 @@ defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", []>; defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", []>; defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", []>; -} // End isCommutable = 1 +defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", + [(set i32:$dst, (srl i32:$src0, i32:$src1))] +>; +defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", [], "V_LSHR_B32">; -defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", []>; -defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", []>; -defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32", []>; -defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", []>; -defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32", - [(set VReg_32:$dst, (shl VSrc_32:$src0, (i32 VReg_32:$src1)))] +defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32", + [(set i32:$dst, (sra i32:$src0, i32:$src1))] >; -defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", []>; +defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", [], "V_ASHR_I32">; -let isCommutable = 1 in { +defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32", + [(set i32:$dst, (shl i32:$src0, i32:$src1))] +>; +defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">; defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32", - [(set VReg_32:$dst, (and VSrc_32:$src0, VReg_32:$src1))] + [(set i32:$dst, (and i32:$src0, i32:$src1))] >; defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32", - [(set VReg_32:$dst, (or VSrc_32:$src0, VReg_32:$src1))] + [(set i32:$dst, (or i32:$src0, i32:$src1))] >; defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32", - [(set VReg_32:$dst, (xor VSrc_32:$src0, VReg_32:$src1))] + [(set i32:$dst, (xor i32:$src0, i32:$src1))] >; } // End isCommutable = 1 @@ -880,31 +887,30 @@ defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>; //defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>; //defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>; //defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>; -let Defs = [VCC] in { // Carry-out goes to VCC -let isCommutable = 1 in { +let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", - [(set VReg_32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))] + [(set i32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))] >; -} // End isCommutable = 1 defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32", - [(set VReg_32:$dst, (sub (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))] + [(set i32:$dst, (sub i32:$src0, i32:$src1))] >; +defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], "V_SUB_I32">; -defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", []>; let Uses = [VCC] in { // Carry-out comes from VCC defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", []>; defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", []>; -defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", []>; +defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], "V_SUBB_U32">; } // End Uses = [VCC] -} // End Defs = [VCC] +} // End isCommutable = 1, Defs = [VCC] + defm V_LDEXP_F32 : VOP2_32 <0x0000002b, "V_LDEXP_F32", []>; ////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "V_CVT_PKACCUM_U8_F32", []>; ////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "V_CVT_PKNORM_I16_F32", []>; ////def V_CVT_PKNORM_U16_F32 : VOP2_U16 <0x0000002e, "V_CVT_PKNORM_U16_F32", []>; defm V_CVT_PKRTZ_F16_F32 : VOP2_32 <0x0000002f, "V_CVT_PKRTZ_F16_F32", - [(set VReg_32:$dst, (int_SI_packf16 VSrc_32:$src0, VReg_32:$src1))] + [(set i32:$dst, (int_SI_packf16 f32:$src0, f32:$src1))] >; ////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "V_CVT_PK_U16_U32", []>; ////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "V_CVT_PK_I16_I32", []>; @@ -941,6 +947,7 @@ def V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>; def V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", []>; def V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", []>; def V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", []>; +defm : BFIPatterns <V_BFI_B32>; def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", []>; def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", []>; //def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>; @@ -971,14 +978,31 @@ def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>; def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>; def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>; def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>; + +let isCommutable = 1 in { + def V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>; def V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>; def V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>; +def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>; + +} // isCommutable = 1 + def : Pat < - (mul VSrc_32:$src0, VReg_32:$src1), - (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 0), 0, 0, 0, 0) + (mul i32:$src0, i32:$src1), + (V_MUL_LO_I32 $src0, $src1, (i32 0)) >; -def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>; + +def : Pat < + (mulhu i32:$src0, i32:$src1), + (V_MUL_HI_U32 $src0, $src1, (i32 0)) +>; + +def : Pat < + (mulhs i32:$src0, i32:$src1), + (V_MUL_HI_I32 $src0, $src1, (i32 0)) +>; + def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>; def V_DIV_SCALE_F64 : VOP3_64 <0x0000016e, "V_DIV_SCALE_F64", []>; def V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32", []>; @@ -1001,34 +1025,27 @@ def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32", []>; def S_CSELECT_B32 : SOP2 < 0x0000000a, (outs SReg_32:$dst), (ins SReg_32:$src0, SReg_32:$src1, SCCReg:$scc), "S_CSELECT_B32", - [(set (i32 SReg_32:$dst), (select (i1 SCCReg:$scc), - SReg_32:$src0, SReg_32:$src1))] + [] >; def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>; -// f32 pattern for S_CSELECT_B32 -def : Pat < - (f32 (select (i1 SCCReg:$scc), SReg_32:$src0, SReg_32:$src1)), - (S_CSELECT_B32 SReg_32:$src0, SReg_32:$src1, SCCReg:$scc) ->; - def S_AND_B32 : SOP2_32 <0x0000000e, "S_AND_B32", []>; def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64", - [(set SReg_64:$dst, (i64 (and SSrc_64:$src0, SSrc_64:$src1)))] + [(set i64:$dst, (and i64:$src0, i64:$src1))] >; def : Pat < - (i1 (and SSrc_64:$src0, SSrc_64:$src1)), - (S_AND_B64 SSrc_64:$src0, SSrc_64:$src1) + (i1 (and i1:$src0, i1:$src1)), + (S_AND_B64 $src0, $src1) >; def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32", []>; def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64", []>; def : Pat < - (i1 (or SSrc_64:$src0, SSrc_64:$src1)), - (S_OR_B64 SSrc_64:$src0, SSrc_64:$src1) + (i1 (or i1:$src0, i1:$src1)), + (S_OR_B64 $src0, $src1) >; def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>; def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", []>; @@ -1067,17 +1084,6 @@ def LOAD_CONST : AMDGPUShaderInst < [(set GPRF32:$dst, (int_AMDGPU_load_const imm:$src))] >; -let usesCustomInserter = 1 in { - -def SI_WQM : InstSI < - (outs), - (ins), - "SI_WQM", - [(int_SI_wqm)] ->; - -} // end usesCustomInserter - // SI Psuedo instructions. These are used by the CFG structurizer pass // and should be lowered to ISA instructions prior to codegen. @@ -1090,14 +1096,14 @@ def SI_IF : InstSI < (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target), "SI_IF $dst, $vcc, $target", - [(set SReg_64:$dst, (int_SI_if SReg_64:$vcc, bb:$target))] + [(set i64:$dst, (int_SI_if i1:$vcc, bb:$target))] >; def SI_ELSE : InstSI < (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target), "SI_ELSE $dst, $src, $target", - [(set SReg_64:$dst, (int_SI_else SReg_64:$src, bb:$target))]> { + [(set i64:$dst, (int_SI_else i64:$src, bb:$target))]> { let Constraints = "$src = $dst"; } @@ -1106,7 +1112,7 @@ def SI_LOOP : InstSI < (outs), (ins SReg_64:$saved, brtarget:$target), "SI_LOOP $saved, $target", - [(int_SI_loop SReg_64:$saved, bb:$target)] + [(int_SI_loop i64:$saved, bb:$target)] >; } // end isBranch = 1, isTerminator = 1 @@ -1115,35 +1121,35 @@ def SI_BREAK : InstSI < (outs SReg_64:$dst), (ins SReg_64:$src), "SI_ELSE $dst, $src", - [(set SReg_64:$dst, (int_SI_break SReg_64:$src))] + [(set i64:$dst, (int_SI_break i64:$src))] >; def SI_IF_BREAK : InstSI < (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src), "SI_IF_BREAK $dst, $vcc, $src", - [(set SReg_64:$dst, (int_SI_if_break SReg_64:$vcc, SReg_64:$src))] + [(set i64:$dst, (int_SI_if_break i1:$vcc, i64:$src))] >; def SI_ELSE_BREAK : InstSI < (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1), "SI_ELSE_BREAK $dst, $src0, $src1", - [(set SReg_64:$dst, (int_SI_else_break SReg_64:$src0, SReg_64:$src1))] + [(set i64:$dst, (int_SI_else_break i64:$src0, i64:$src1))] >; def SI_END_CF : InstSI < (outs), (ins SReg_64:$saved), "SI_END_CF $saved", - [(int_SI_end_cf SReg_64:$saved)] + [(int_SI_end_cf i64:$saved)] >; def SI_KILL : InstSI < (outs), (ins VReg_32:$src), "SI_KIL $src", - [(int_AMDGPU_kill VReg_32:$src)] + [(int_AMDGPU_kill f32:$src)] >; } // end mayLoad = 1, mayStore = 1, hasSideEffects = 1 @@ -1177,8 +1183,8 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>; } // end IsCodeGenOnly, isPseudo def : Pat< - (int_AMDGPU_cndlt VReg_32:$src0, VReg_32:$src1, VReg_32:$src2), - (V_CNDMASK_B32_e64 VReg_32:$src2, VReg_32:$src1, (V_CMP_GT_F32_e64 0, VReg_32:$src0)) + (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2), + (V_CNDMASK_B32_e64 $src2, $src1, (V_CMP_GT_F32_e64 0, $src0)) >; def : Pat < @@ -1188,99 +1194,80 @@ def : Pat < /* int_SI_vs_load_input */ def : Pat< - (int_SI_vs_load_input SReg_128:$tlst, IMM12bit:$attr_offset, - VReg_32:$buf_idx_vgpr), + (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset, + i32:$buf_idx_vgpr), (BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0, - VReg_32:$buf_idx_vgpr, SReg_128:$tlst, - 0, 0, 0) + $buf_idx_vgpr, $tlst, 0, 0, 0) >; /* int_SI_export */ def : Pat < (int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr, - VReg_32:$src0,VReg_32:$src1, VReg_32:$src2, VReg_32:$src3), + f32:$src0, f32:$src1, f32:$src2, f32:$src3), (EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm, - VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3) + $src0, $src1, $src2, $src3) >; +/********** ======================= **********/ +/********** Image sampling patterns **********/ +/********** ======================= **********/ /* int_SI_sample for simple 1D texture lookup */ def : Pat < - (int_SI_sample imm:$writemask, (v1i32 VReg_32:$addr), - SReg_256:$rsrc, SReg_128:$sampler, imm), - (IMAGE_SAMPLE imm:$writemask, 0, 0, 0, 0, 0, 0, 0, - (i32 (COPY_TO_REGCLASS VReg_32:$addr, VReg_32)), - SReg_256:$rsrc, SReg_128:$sampler) + (int_SI_sample v1i32:$addr, v32i8:$rsrc, v16i8:$sampler, imm), + (IMAGE_SAMPLE 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; -class SamplePattern<Intrinsic name, MIMG opcode, RegisterClass addr_class, - ValueType addr_type> : Pat < - (name imm:$writemask, (addr_type addr_class:$addr), - SReg_256:$rsrc, SReg_128:$sampler, imm), - (opcode imm:$writemask, 0, 0, 0, 0, 0, 0, 0, - (EXTRACT_SUBREG addr_class:$addr, sub0), - SReg_256:$rsrc, SReg_128:$sampler) +class SamplePattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat < + (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, imm), + (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; -class SampleRectPattern<Intrinsic name, MIMG opcode, RegisterClass addr_class, - ValueType addr_type> : Pat < - (name imm:$writemask, (addr_type addr_class:$addr), - SReg_256:$rsrc, SReg_128:$sampler, TEX_RECT), - (opcode imm:$writemask, 1, 0, 0, 0, 0, 0, 0, - (EXTRACT_SUBREG addr_class:$addr, sub0), - SReg_256:$rsrc, SReg_128:$sampler) +class SampleRectPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat < + (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_RECT), + (opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; -class SampleArrayPattern<Intrinsic name, MIMG opcode, RegisterClass addr_class, - ValueType addr_type> : Pat < - (name imm:$writemask, (addr_type addr_class:$addr), - SReg_256:$rsrc, SReg_128:$sampler, TEX_ARRAY), - (opcode imm:$writemask, 0, 0, 1, 0, 0, 0, 0, - (EXTRACT_SUBREG addr_class:$addr, sub0), - SReg_256:$rsrc, SReg_128:$sampler) +class SampleArrayPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat < + (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_ARRAY), + (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; class SampleShadowPattern<Intrinsic name, MIMG opcode, - RegisterClass addr_class, ValueType addr_type> : Pat < - (name imm:$writemask, (addr_type addr_class:$addr), - SReg_256:$rsrc, SReg_128:$sampler, TEX_SHADOW), - (opcode imm:$writemask, 0, 0, 0, 0, 0, 0, 0, - (EXTRACT_SUBREG addr_class:$addr, sub0), - SReg_256:$rsrc, SReg_128:$sampler) + ValueType vt> : Pat < + (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW), + (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; class SampleShadowArrayPattern<Intrinsic name, MIMG opcode, - RegisterClass addr_class, ValueType addr_type> : Pat < - (name imm:$writemask, (addr_type addr_class:$addr), - SReg_256:$rsrc, SReg_128:$sampler, TEX_SHADOW_ARRAY), - (opcode imm:$writemask, 0, 0, 1, 0, 0, 0, 0, - (EXTRACT_SUBREG addr_class:$addr, sub0), - SReg_256:$rsrc, SReg_128:$sampler) + ValueType vt> : Pat < + (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW_ARRAY), + (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler) >; /* int_SI_sample* for texture lookups consuming more address parameters */ -multiclass SamplePatterns<RegisterClass addr_class, ValueType addr_type> { - def : SamplePattern <int_SI_sample, IMAGE_SAMPLE, addr_class, addr_type>; - def : SampleRectPattern <int_SI_sample, IMAGE_SAMPLE, addr_class, addr_type>; - def : SampleArrayPattern <int_SI_sample, IMAGE_SAMPLE, addr_class, addr_type>; - def : SampleShadowPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_class, addr_type>; - def : SampleShadowArrayPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_class, addr_type>; - - def : SamplePattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_class, addr_type>; - def : SampleArrayPattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_class, addr_type>; - def : SampleShadowPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_class, addr_type>; - def : SampleShadowArrayPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_class, addr_type>; - - def : SamplePattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_class, addr_type>; - def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_class, addr_type>; - def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_class, addr_type>; - def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_class, addr_type>; +multiclass SamplePatterns<ValueType addr_type> { + def : SamplePattern <int_SI_sample, IMAGE_SAMPLE, addr_type>; + def : SampleRectPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>; + def : SampleArrayPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>; + def : SampleShadowPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>; + def : SampleShadowArrayPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>; + + def : SamplePattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>; + def : SampleArrayPattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>; + def : SampleShadowPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>; + def : SampleShadowArrayPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>; + + def : SamplePattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>; + def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>; + def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>; + def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>; } -defm : SamplePatterns<VReg_64, v2i32>; -defm : SamplePatterns<VReg_128, v4i32>; -defm : SamplePatterns<VReg_256, v8i32>; -defm : SamplePatterns<VReg_512, v16i32>; +defm : SamplePatterns<v2i32>; +defm : SamplePatterns<v4i32>; +defm : SamplePatterns<v8i32>; +defm : SamplePatterns<v16i32>; /********** ============================================ **********/ /********** Extraction, Insertion, Building and Casting **********/ @@ -1288,77 +1275,77 @@ defm : SamplePatterns<VReg_512, v16i32>; foreach Index = 0-2 in { def Extract_Element_v2i32_#Index : Extract_Element < - i32, v2i32, VReg_64, Index, !cast<SubRegIndex>(sub#Index) + i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v2i32_#Index : Insert_Element < - i32, v2i32, VReg_32, VReg_64, Index, !cast<SubRegIndex>(sub#Index) + i32, v2i32, Index, !cast<SubRegIndex>(sub#Index) >; def Extract_Element_v2f32_#Index : Extract_Element < - f32, v2f32, VReg_64, Index, !cast<SubRegIndex>(sub#Index) + f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v2f32_#Index : Insert_Element < - f32, v2f32, VReg_32, VReg_64, Index, !cast<SubRegIndex>(sub#Index) + f32, v2f32, Index, !cast<SubRegIndex>(sub#Index) >; } foreach Index = 0-3 in { def Extract_Element_v4i32_#Index : Extract_Element < - i32, v4i32, VReg_128, Index, !cast<SubRegIndex>(sub#Index) + i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v4i32_#Index : Insert_Element < - i32, v4i32, VReg_32, VReg_128, Index, !cast<SubRegIndex>(sub#Index) + i32, v4i32, Index, !cast<SubRegIndex>(sub#Index) >; def Extract_Element_v4f32_#Index : Extract_Element < - f32, v4f32, VReg_128, Index, !cast<SubRegIndex>(sub#Index) + f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v4f32_#Index : Insert_Element < - f32, v4f32, VReg_32, VReg_128, Index, !cast<SubRegIndex>(sub#Index) + f32, v4f32, Index, !cast<SubRegIndex>(sub#Index) >; } foreach Index = 0-7 in { def Extract_Element_v8i32_#Index : Extract_Element < - i32, v8i32, VReg_256, Index, !cast<SubRegIndex>(sub#Index) + i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v8i32_#Index : Insert_Element < - i32, v8i32, VReg_32, VReg_256, Index, !cast<SubRegIndex>(sub#Index) + i32, v8i32, Index, !cast<SubRegIndex>(sub#Index) >; def Extract_Element_v8f32_#Index : Extract_Element < - f32, v8f32, VReg_256, Index, !cast<SubRegIndex>(sub#Index) + f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v8f32_#Index : Insert_Element < - f32, v8f32, VReg_32, VReg_256, Index, !cast<SubRegIndex>(sub#Index) + f32, v8f32, Index, !cast<SubRegIndex>(sub#Index) >; } foreach Index = 0-15 in { def Extract_Element_v16i32_#Index : Extract_Element < - i32, v16i32, VReg_512, Index, !cast<SubRegIndex>(sub#Index) + i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v16i32_#Index : Insert_Element < - i32, v16i32, VReg_32, VReg_512, Index, !cast<SubRegIndex>(sub#Index) + i32, v16i32, Index, !cast<SubRegIndex>(sub#Index) >; def Extract_Element_v16f32_#Index : Extract_Element < - f32, v16f32, VReg_512, Index, !cast<SubRegIndex>(sub#Index) + f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) >; def Insert_Element_v16f32_#Index : Insert_Element < - f32, v16f32, VReg_32, VReg_512, Index, !cast<SubRegIndex>(sub#Index) + f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) >; } -def : Vector1_Build <v1i32, VReg_32, i32, VReg_32>; -def : Vector2_Build <v2i32, VReg_64, i32, VReg_32>; -def : Vector2_Build <v2f32, VReg_64, f32, VReg_32>; -def : Vector4_Build <v4i32, VReg_128, i32, VReg_32>; -def : Vector4_Build <v4f32, VReg_128, f32, VReg_32>; -def : Vector8_Build <v8i32, VReg_256, i32, VReg_32>; -def : Vector8_Build <v8f32, VReg_256, f32, VReg_32>; -def : Vector16_Build <v16i32, VReg_512, i32, VReg_32>; -def : Vector16_Build <v16f32, VReg_512, f32, VReg_32>; +def : Vector1_Build <v1i32, i32, VReg_32>; +def : Vector2_Build <v2i32, i32>; +def : Vector2_Build <v2f32, f32>; +def : Vector4_Build <v4i32, i32>; +def : Vector4_Build <v4f32, f32>; +def : Vector8_Build <v8i32, i32>; +def : Vector8_Build <v8f32, f32>; +def : Vector16_Build <v16i32, i32>; +def : Vector16_Build <v16f32, f32>; def : BitConvert <i32, f32, SReg_32>; def : BitConvert <i32, f32, VReg_32>; @@ -1371,20 +1358,20 @@ def : BitConvert <f32, i32, VReg_32>; /********** =================== **********/ def : Pat < - (int_AMDIL_clamp VReg_32:$src, (f32 FP_ZERO), (f32 FP_ONE)), - (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */), + (int_AMDIL_clamp f32:$src, (f32 FP_ZERO), (f32 FP_ONE)), + (V_ADD_F32_e64 $src, (i32 0 /* SRC1 */), 0 /* ABS */, 1 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */) >; def : Pat < - (fabs VReg_32:$src), - (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */), + (fabs f32:$src), + (V_ADD_F32_e64 $src, (i32 0 /* SRC1 */), 1 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */) >; def : Pat < - (fneg VReg_32:$src), - (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */), + (fneg f32:$src), + (V_ADD_F32_e64 $src, (i32 0 /* SRC1 */), 0 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 1 /* NEG */) >; @@ -1425,16 +1412,16 @@ def : Pat < /********** ===================== **********/ def : Pat < - (int_SI_fs_constant imm:$attr_chan, imm:$attr, M0Reg:$params), - (V_INTERP_MOV_F32 INTERP.P0, imm:$attr_chan, imm:$attr, M0Reg:$params) + (int_SI_fs_constant imm:$attr_chan, imm:$attr, i32:$params), + (V_INTERP_MOV_F32 INTERP.P0, imm:$attr_chan, imm:$attr, $params) >; def : Pat < - (int_SI_fs_interp imm:$attr_chan, imm:$attr, M0Reg:$params, VReg_64:$ij), - (V_INTERP_P2_F32 (V_INTERP_P1_F32 (EXTRACT_SUBREG VReg_64:$ij, sub0), - imm:$attr_chan, imm:$attr, M0Reg:$params), - (EXTRACT_SUBREG VReg_64:$ij, sub1), - imm:$attr_chan, imm:$attr, M0Reg:$params) + (int_SI_fs_interp imm:$attr_chan, imm:$attr, M0Reg:$params, v2i32:$ij), + (V_INTERP_P2_F32 (V_INTERP_P1_F32 (EXTRACT_SUBREG v2i32:$ij, sub0), + imm:$attr_chan, imm:$attr, i32:$params), + (EXTRACT_SUBREG $ij, sub1), + imm:$attr_chan, imm:$attr, $params) >; /********** ================== **********/ @@ -1442,102 +1429,111 @@ def : Pat < /********** ================== **********/ /* llvm.AMDGPU.pow */ -/* XXX: We are using IEEE MUL, not the 0 * anything = 0 MUL, is this correct? */ -def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_F32_e32, VReg_32>; +def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>; def : Pat < - (int_AMDGPU_div VSrc_32:$src0, VSrc_32:$src1), - (V_MUL_LEGACY_F32_e32 VSrc_32:$src0, (V_RCP_LEGACY_F32_e32 VSrc_32:$src1)) + (int_AMDGPU_div f32:$src0, f32:$src1), + (V_MUL_LEGACY_F32_e32 $src0, (V_RCP_LEGACY_F32_e32 $src1)) >; def : Pat< - (fdiv VSrc_32:$src0, VSrc_32:$src1), - (V_MUL_F32_e32 VSrc_32:$src0, (V_RCP_F32_e32 VSrc_32:$src1)) + (fdiv f32:$src0, f32:$src1), + (V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1)) >; def : Pat < - (fcos VSrc_32:$src0), - (V_COS_F32_e32 (V_MUL_F32_e32 VSrc_32:$src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) + (fcos f32:$src0), + (V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) >; def : Pat < - (fsin VSrc_32:$src0), - (V_SIN_F32_e32 (V_MUL_F32_e32 VSrc_32:$src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) + (fsin f32:$src0), + (V_SIN_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV))) >; def : Pat < - (int_AMDGPU_cube VReg_128:$src), + (int_AMDGPU_cube v4f32:$src), (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), - (V_CUBETC_F32 (EXTRACT_SUBREG VReg_128:$src, sub0), - (EXTRACT_SUBREG VReg_128:$src, sub1), - (EXTRACT_SUBREG VReg_128:$src, sub2), - 0, 0, 0, 0), sub0), - (V_CUBESC_F32 (EXTRACT_SUBREG VReg_128:$src, sub0), - (EXTRACT_SUBREG VReg_128:$src, sub1), - (EXTRACT_SUBREG VReg_128:$src, sub2), - 0, 0, 0, 0), sub1), - (V_CUBEMA_F32 (EXTRACT_SUBREG VReg_128:$src, sub0), - (EXTRACT_SUBREG VReg_128:$src, sub1), - (EXTRACT_SUBREG VReg_128:$src, sub2), - 0, 0, 0, 0), sub2), - (V_CUBEID_F32 (EXTRACT_SUBREG VReg_128:$src, sub0), - (EXTRACT_SUBREG VReg_128:$src, sub1), - (EXTRACT_SUBREG VReg_128:$src, sub2), - 0, 0, 0, 0), sub3) + (V_CUBETC_F32 (EXTRACT_SUBREG $src, sub0), + (EXTRACT_SUBREG $src, sub1), + (EXTRACT_SUBREG $src, sub2)), + sub0), + (V_CUBESC_F32 (EXTRACT_SUBREG $src, sub0), + (EXTRACT_SUBREG $src, sub1), + (EXTRACT_SUBREG $src, sub2)), + sub1), + (V_CUBEMA_F32 (EXTRACT_SUBREG $src, sub0), + (EXTRACT_SUBREG $src, sub1), + (EXTRACT_SUBREG $src, sub2)), + sub2), + (V_CUBEID_F32 (EXTRACT_SUBREG $src, sub0), + (EXTRACT_SUBREG $src, sub1), + (EXTRACT_SUBREG $src, sub2)), + sub3) >; def : Pat < - (i32 (sext (i1 SReg_64:$src0))), - (V_CNDMASK_B32_e64 (i32 0), (i32 -1), SReg_64:$src0) + (i32 (sext i1:$src0)), + (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0) >; // 1. Offset as 8bit DWORD immediate def : Pat < - (int_SI_load_const SReg_128:$sbase, IMM8bitDWORD:$offset), - (S_BUFFER_LOAD_DWORD_IMM SReg_128:$sbase, IMM8bitDWORD:$offset) + (int_SI_load_const v16i8:$sbase, IMM8bitDWORD:$offset), + (S_BUFFER_LOAD_DWORD_IMM $sbase, IMM8bitDWORD:$offset) >; // 2. Offset loaded in an 32bit SGPR def : Pat < - (int_SI_load_const SReg_128:$sbase, imm:$offset), - (S_BUFFER_LOAD_DWORD_SGPR SReg_128:$sbase, (S_MOV_B32 imm:$offset)) + (int_SI_load_const v16i8:$sbase, imm:$offset), + (S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset)) >; // 3. Offset in an 32Bit VGPR def : Pat < - (int_SI_load_const SReg_128:$sbase, VReg_32:$voff), - (BUFFER_LOAD_DWORD 0, 1, 0, 0, 0, 0, VReg_32:$voff, SReg_128:$sbase, 0, 0, 0) + (int_SI_load_const v16i8:$sbase, i32:$voff), + (BUFFER_LOAD_DWORD 0, 1, 0, 0, 0, 0, $voff, $sbase, 0, 0, 0) +>; + +// The multiplication scales from [0,1] to the unsigned integer range +def : Pat < + (AMDGPUurecip i32:$src0), + (V_CVT_U32_F32_e32 + (V_MUL_F32_e32 CONST.FP_UINT_MAX_PLUS_1, + (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0)))) >; /********** ================== **********/ /********** VOP3 Patterns **********/ /********** ================== **********/ -def : Pat <(f32 (fadd (fmul VSrc_32:$src0, VSrc_32:$src1), VSrc_32:$src2)), - (V_MAD_F32 VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2, - 0, 0, 0, 0)>; +def : Pat < + (f32 (fadd (fmul f32:$src0, f32:$src1), f32:$src2)), + (V_MAD_F32 $src0, $src1, $src2) +>; /********** ================== **********/ /********** SMRD Patterns **********/ /********** ================== **********/ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> { + // 1. Offset as 8bit DWORD immediate def : Pat < - (constant_load (SIadd64bit32bit SReg_64:$sbase, IMM8bitDWORD:$offset)), - (vt (Instr_IMM SReg_64:$sbase, IMM8bitDWORD:$offset)) + (constant_load (SIadd64bit32bit i64:$sbase, IMM8bitDWORD:$offset)), + (vt (Instr_IMM $sbase, IMM8bitDWORD:$offset)) >; // 2. Offset loaded in an 32bit SGPR def : Pat < - (constant_load (SIadd64bit32bit SReg_64:$sbase, imm:$offset)), - (vt (Instr_SGPR SReg_64:$sbase, (S_MOV_B32 imm:$offset))) + (constant_load (SIadd64bit32bit i64:$sbase, imm:$offset)), + (vt (Instr_SGPR $sbase, (S_MOV_B32 imm:$offset))) >; // 3. No offset at all def : Pat < - (constant_load SReg_64:$sbase), - (vt (Instr_IMM SReg_64:$sbase, 0)) + (constant_load i64:$sbase), + (vt (Instr_IMM $sbase, 0)) >; } @@ -1550,44 +1546,50 @@ defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>; /********** Indirect adressing **********/ /********** ====================== **********/ -multiclass SI_INDIRECT_Pattern <RegisterClass rc, ValueType vt, - SI_INDIRECT_DST IndDst> { +multiclass SI_INDIRECT_Pattern <ValueType vt, SI_INDIRECT_DST IndDst> { + // 1. Extract with offset def : Pat< - (vector_extract (vt rc:$vec), - (i64 (zext (i32 (add VReg_32:$idx, imm:$off)))) - ), - (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), rc:$vec, VReg_32:$idx, imm:$off)) + (vector_extract vt:$vec, (i64 (zext (add i32:$idx, imm:$off)))), + (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off)) >; // 2. Extract without offset def : Pat< - (vector_extract (vt rc:$vec), - (i64 (zext (i32 VReg_32:$idx))) - ), - (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), rc:$vec, VReg_32:$idx, 0)) + (vector_extract vt:$vec, (i64 (zext i32:$idx))), + (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0)) >; // 3. Insert with offset def : Pat< - (vector_insert (vt rc:$vec), (f32 VReg_32:$val), - (i64 (zext (i32 (add VReg_32:$idx, imm:$off)))) - ), - (vt (IndDst (IMPLICIT_DEF), rc:$vec, VReg_32:$idx, imm:$off, VReg_32:$val)) + (vector_insert vt:$vec, f32:$val, (i64 (zext (add i32:$idx, imm:$off)))), + (IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val) >; // 4. Insert without offset def : Pat< - (vector_insert (vt rc:$vec), (f32 VReg_32:$val), - (i64 (zext (i32 VReg_32:$idx))) - ), - (vt (IndDst (IMPLICIT_DEF), rc:$vec, VReg_32:$idx, 0, VReg_32:$val)) + (vector_insert vt:$vec, f32:$val, (i64 (zext i32:$idx))), + (IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val) >; } -defm : SI_INDIRECT_Pattern <VReg_64, v2f32, SI_INDIRECT_DST_V2>; -defm : SI_INDIRECT_Pattern <VReg_128, v4f32, SI_INDIRECT_DST_V4>; -defm : SI_INDIRECT_Pattern <VReg_256, v8f32, SI_INDIRECT_DST_V8>; -defm : SI_INDIRECT_Pattern <VReg_512, v16f32, SI_INDIRECT_DST_V16>; +defm : SI_INDIRECT_Pattern <v2f32, SI_INDIRECT_DST_V2>; +defm : SI_INDIRECT_Pattern <v4f32, SI_INDIRECT_DST_V4>; +defm : SI_INDIRECT_Pattern <v8f32, SI_INDIRECT_DST_V8>; +defm : SI_INDIRECT_Pattern <v16f32, SI_INDIRECT_DST_V16>; + +/********** =============== **********/ +/********** Conditions **********/ +/********** =============== **********/ + +def : Pat< + (i1 (setcc f32:$src0, f32:$src1, SETO)), + (V_CMP_O_F32_e64 $src0, $src1) +>; + +def : Pat< + (i1 (setcc f32:$src0, f32:$src1, SETUO)), + (V_CMP_U_F32_e64 $src0, $src1) +>; } // End isSI predicate diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td index 33bb815..16d9d81 100644 --- a/lib/Target/R600/SIIntrinsics.td +++ b/lib/Target/R600/SIIntrinsics.td @@ -16,11 +16,10 @@ let TargetPrefix = "SI", isTarget = 1 in { def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>; - def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrReadMem]>; - def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_v16i8_ty, llvm_i16_ty, llvm_i32_ty], [IntrReadMem]> ; - def int_SI_wqm : Intrinsic <[], [], []>; + def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>; + def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_v16i8_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ; - class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_i32_ty, llvm_anyvector_ty, llvm_v32i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrReadMem]>; + class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>; def int_SI_sample : Sample; def int_SI_sampleb : Sample; @@ -28,8 +27,8 @@ let TargetPrefix = "SI", isTarget = 1 in { /* Interpolation Intrinsics */ - def int_SI_fs_constant : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>; - def int_SI_fs_interp : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_v2i32_ty], [IntrReadMem]>; + def int_SI_fs_constant : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + def int_SI_fs_interp : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_v2i32_ty], [IntrNoMem]>; /* Control flow Intrinsics */ diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp index 9a027e7..2b60eb9 100644 --- a/lib/Target/R600/SILowerControlFlow.cpp +++ b/lib/Target/R600/SILowerControlFlow.cpp @@ -197,7 +197,8 @@ void SILowerControlFlowPass::Else(MachineInstr &MI) { unsigned Dst = MI.getOperand(0).getReg(); unsigned Src = MI.getOperand(1).getReg(); - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) + BuildMI(MBB, MBB.getFirstNonPHI(), DL, + TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) .addReg(Src); // Saved EXEC BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) @@ -409,6 +410,7 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { bool HaveKill = false; + bool NeedWQM = false; unsigned Depth = 0; for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); @@ -478,9 +480,22 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { case AMDGPU::SI_INDIRECT_DST_V16: IndirectDst(MI); break; + + case AMDGPU::V_INTERP_P1_F32: + case AMDGPU::V_INTERP_P2_F32: + case AMDGPU::V_INTERP_MOV_F32: + NeedWQM = true; + break; + } } } + if (NeedWQM) { + MachineBasicBlock &MBB = MF.front(); + BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), + AMDGPU::EXEC).addReg(AMDGPU::EXEC); + } + return true; } diff --git a/lib/Target/R600/SIMachineFunctionInfo.cpp b/lib/Target/R600/SIMachineFunctionInfo.cpp index 1a4e4cb..ee0e307 100644 --- a/lib/Target/R600/SIMachineFunctionInfo.cpp +++ b/lib/Target/R600/SIMachineFunctionInfo.cpp @@ -10,25 +10,9 @@ #include "SIMachineFunctionInfo.h" -#include "llvm/IR/Attributes.h" -#include "llvm/IR/Function.h" using namespace llvm; -const char *SIMachineFunctionInfo::ShaderTypeAttribute = "ShaderType"; - SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) - : MachineFunctionInfo(), - ShaderType(0), - PSInputAddr(0) { - - AttributeSet Set = MF.getFunction()->getAttributes(); - Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, - ShaderTypeAttribute); - - if (A.isStringAttribute()) { - StringRef Str = A.getValueAsString(); - if (Str.getAsInteger(0, ShaderType)) - llvm_unreachable("Can't parse shader type!"); - } -} + : AMDGPUMachineFunction(MF), + PSInputAddr(0) { } diff --git a/lib/Target/R600/SIMachineFunctionInfo.h b/lib/Target/R600/SIMachineFunctionInfo.h index 91a809b..6da9f7f 100644 --- a/lib/Target/R600/SIMachineFunctionInfo.h +++ b/lib/Target/R600/SIMachineFunctionInfo.h @@ -15,18 +15,15 @@ #ifndef SIMACHINEFUNCTIONINFO_H_ #define SIMACHINEFUNCTIONINFO_H_ -#include "llvm/CodeGen/MachineFunction.h" +#include "AMDGPUMachineFunction.h" namespace llvm { /// This class keeps track of the SPI_SP_INPUT_ADDR config register, which /// tells the hardware which interpolation parameters to load. -class SIMachineFunctionInfo : public MachineFunctionInfo { +class SIMachineFunctionInfo : public AMDGPUMachineFunction { public: - static const char *ShaderTypeAttribute; - SIMachineFunctionInfo(const MachineFunction &MF); - unsigned ShaderType; unsigned PSInputAddr; }; diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp index 88275c5..99278ae 100644 --- a/lib/Target/R600/SIRegisterInfo.cpp +++ b/lib/Target/R600/SIRegisterInfo.cpp @@ -30,6 +30,11 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { return Reserved; } +unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, + MachineFunction &MF) const { + return RC->getNumRegs(); +} + const TargetRegisterClass * SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const { switch (rc->getID()) { diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h index 40171e4..caec228 100644 --- a/lib/Target/R600/SIRegisterInfo.h +++ b/lib/Target/R600/SIRegisterInfo.h @@ -31,6 +31,9 @@ struct SIRegisterInfo : public AMDGPURegisterInfo { virtual BitVector getReservedRegs(const MachineFunction &MF) const; + virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC, + MachineFunction &MF) const; + /// \param RC is an AMDIL reg class. /// /// \returns the SI register class that is equivalent to \p RC. diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td index 4f14931..244d4c0 100644 --- a/lib/Target/R600/SIRegisterInfo.td +++ b/lib/Target/R600/SIRegisterInfo.td @@ -94,6 +94,12 @@ def VGPR_64 : RegisterTuples<[sub0, sub1], [(add (trunc VGPR_32, 255)), (add (shl VGPR_32, 1))]>; +// VGPR 96-bit registers +def VGPR_96 : RegisterTuples<[sub0, sub1, sub2], + [(add (trunc VGPR_32, 254)), + (add (shl VGPR_32, 1)), + (add (shl VGPR_32, 2))]>; + // VGPR 128-bit registers def VGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3], [(add (trunc VGPR_32, 253)), @@ -151,7 +157,7 @@ def SReg_64 : RegisterClass<"AMDGPU", [i64, i1], 64, (add SGPR_64, VCCReg, EXECReg) >; -def SReg_128 : RegisterClass<"AMDGPU", [v16i8], 128, (add SGPR_128)>; +def SReg_128 : RegisterClass<"AMDGPU", [v16i8, i128], 128, (add SGPR_128)>; def SReg_256 : RegisterClass<"AMDGPU", [v32i8], 256, (add SGPR_256)>; @@ -162,6 +168,10 @@ def VReg_32 : RegisterClass<"AMDGPU", [i32, f32, v1i32], 32, (add VGPR_32)>; def VReg_64 : RegisterClass<"AMDGPU", [i64, f64, v2i32, v2f32], 64, (add VGPR_64)>; +def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> { + let Size = 96; +} + def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>; def VReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 256, (add VGPR_256)>; diff --git a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h new file mode 100644 index 0000000..aac0e8d --- /dev/null +++ b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h @@ -0,0 +1,62 @@ +//===-- SparcBaseInfo.h - Top level definitions for Sparc ---- --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions +// for the Sparc target useful for the compiler back-end and the MC libraries. +// As such, it deliberately does not include references to LLVM core code gen +// types, passes, etc.. +// +//===----------------------------------------------------------------------===// + +#ifndef SPARCBASEINFO_H +#define SPARCBASEINFO_H + +namespace llvm { + +/// SPII - This namespace holds target specific flags for instruction info. +namespace SPII { + +/// Target Operand Flags. Sparc specific TargetFlags for MachineOperands and +/// SDNodes. +enum TOF { + MO_NO_FLAG, + + // Extract the low 10 bits of an address. + // Assembler: %lo(addr) + MO_LO, + + // Extract bits 31-10 of an address. Only for sethi. + // Assembler: %hi(addr) or %lm(addr) + MO_HI, + + // Extract bits 43-22 of an adress. Only for sethi. + // Assembler: %h44(addr) + MO_H44, + + // Extract bits 21-12 of an address. + // Assembler: %m44(addr) + MO_M44, + + // Extract bits 11-0 of an address. + // Assembler: %l44(addr) + MO_L44, + + // Extract bits 63-42 of an address. Only for sethi. + // Assembler: %hh(addr) + MO_HH, + + // Extract bits 41-32 of an address. + // Assembler: %hm(addr) + MO_HM +}; + +} // end namespace SPII +} // end namespace llvm + +#endif diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp index 7fdb0c3..1c64e1b 100644 --- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp +++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp @@ -50,14 +50,42 @@ static MCSubtargetInfo *createSparcMCSubtargetInfo(StringRef TT, StringRef CPU, return X; } +// Code models. Some only make sense for 64-bit code. +// +// SunCC Reloc CodeModel Constraints +// abs32 Static Small text+data+bss linked below 2^32 bytes +// abs44 Static Medium text+data+bss linked below 2^44 bytes +// abs64 Static Large text smaller than 2^31 bytes +// pic13 PIC_ Small GOT < 2^13 bytes +// pic32 PIC_ Medium GOT < 2^32 bytes +// +// All code models require that the text segment is smaller than 2GB. + static MCCodeGenInfo *createSparcMCCodeGenInfo(StringRef TT, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) { MCCodeGenInfo *X = new MCCodeGenInfo(); + + // The default 32-bit code model is abs32/pic32. + if (CM == CodeModel::Default) + CM = RM == Reloc::PIC_ ? CodeModel::Medium : CodeModel::Small; + X->InitMCCodeGenInfo(RM, CM, OL); return X; } +static MCCodeGenInfo *createSparcV9MCCodeGenInfo(StringRef TT, Reloc::Model RM, + CodeModel::Model CM, + CodeGenOpt::Level OL) { + MCCodeGenInfo *X = new MCCodeGenInfo(); + + // The default 64-bit code model is abs44/pic32. + if (CM == CodeModel::Default) + CM = CodeModel::Medium; + + X->InitMCCodeGenInfo(RM, CM, OL); + return X; +} extern "C" void LLVMInitializeSparcTargetMC() { // Register the MC asm info. RegisterMCAsmInfo<SparcELFMCAsmInfo> X(TheSparcTarget); @@ -67,7 +95,7 @@ extern "C" void LLVMInitializeSparcTargetMC() { TargetRegistry::RegisterMCCodeGenInfo(TheSparcTarget, createSparcMCCodeGenInfo); TargetRegistry::RegisterMCCodeGenInfo(TheSparcV9Target, - createSparcMCCodeGenInfo); + createSparcV9MCCodeGenInfo); // Register the MC instruction info. TargetRegistry::RegisterMCInstrInfo(TheSparcTarget, createSparcMCInstrInfo); diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp index e14b3cb..108eb90 100644 --- a/lib/Target/Sparc/SparcAsmPrinter.cpp +++ b/lib/Target/Sparc/SparcAsmPrinter.cpp @@ -16,6 +16,7 @@ #include "Sparc.h" #include "SparcInstrInfo.h" #include "SparcTargetMachine.h" +#include "MCTargetDesc/SparcBaseInfo.h" #include "llvm/ADT/SmallString.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineInstr.h" @@ -72,15 +73,39 @@ namespace { void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum, raw_ostream &O) { const MachineOperand &MO = MI->getOperand (opNum); - bool CloseParen = false; - if (MI->getOpcode() == SP::SETHIi && !MO.isReg() && !MO.isImm()) { - O << "%hi("; - CloseParen = true; - } else if ((MI->getOpcode() == SP::ORri || MI->getOpcode() == SP::ADDri) && - !MO.isReg() && !MO.isImm()) { - O << "%lo("; - CloseParen = true; + unsigned TF = MO.getTargetFlags(); +#ifndef NDEBUG + // Verify the target flags. + if (MO.isGlobal() || MO.isSymbol() || MO.isCPI()) { + if (MI->getOpcode() == SP::CALL) + assert(TF == SPII::MO_NO_FLAG && + "Cannot handle target flags on call address"); + else if (MI->getOpcode() == SP::SETHIi) + assert((TF == SPII::MO_HI || TF == SPII::MO_H44 || TF == SPII::MO_HH) && + "Invalid target flags for address operand on sethi"); + else + assert((TF == SPII::MO_LO || TF == SPII::MO_M44 || TF == SPII::MO_L44 || + TF == SPII::MO_HM) && + "Invalid target flags for small address operand"); } +#endif + + bool CloseParen = true; + switch (TF) { + default: + llvm_unreachable("Unknown target flags on operand"); + case SPII::MO_NO_FLAG: + CloseParen = false; + break; + case SPII::MO_LO: O << "%lo("; break; + case SPII::MO_HI: O << "%hi("; break; + case SPII::MO_H44: O << "%h44("; break; + case SPII::MO_M44: O << "%m44("; break; + case SPII::MO_L44: O << "%l44("; break; + case SPII::MO_HH: O << "%hh("; break; + case SPII::MO_HM: O << "%hm("; break; + } + switch (MO.getType()) { case MachineOperand::MO_Register: O << "%" << StringRef(getRegisterName(MO.getReg())).lower(); @@ -127,14 +152,7 @@ void SparcAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, return; // don't print "+0" O << "+"; - if (MI->getOperand(opNum+1).isGlobal() || - MI->getOperand(opNum+1).isCPI()) { - O << "%lo("; - printOperand(MI, opNum+1, O); - O << ")"; - } else { - printOperand(MI, opNum+1, O); - } + printOperand(MI, opNum+1, O); } bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum, diff --git a/lib/Target/Sparc/SparcCallingConv.td b/lib/Target/Sparc/SparcCallingConv.td index d471220..54784e0 100644 --- a/lib/Target/Sparc/SparcCallingConv.td +++ b/lib/Target/Sparc/SparcCallingConv.td @@ -12,17 +12,9 @@ //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// -// Return Value Calling Conventions +// SPARC v8 32-bit. //===----------------------------------------------------------------------===// -// Sparc 32-bit C return-value convention. -def RetCC_Sparc32 : CallingConv<[ - CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>, - CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>, - CCIfType<[f64], CCAssignToReg<[D0, D1]>> -]>; - -// Sparc 32-bit C Calling convention. def CC_Sparc32 : CallingConv<[ //Custom assign SRet to [sp+64]. CCIfSRet<CCCustom<"CC_Sparc_Assign_SRet">>, @@ -34,3 +26,94 @@ def CC_Sparc32 : CallingConv<[ // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> ]>; + +def RetCC_Sparc32 : CallingConv<[ + CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>, + CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>, + CCIfType<[f64], CCAssignToReg<[D0, D1]>> +]>; + + +//===----------------------------------------------------------------------===// +// SPARC v9 64-bit. +//===----------------------------------------------------------------------===// +// +// The 64-bit ABI conceptually assigns all function arguments to a parameter +// array starting at [%fp+BIAS+128] in the callee's stack frame. All arguments +// occupy a multiple of 8 bytes in the array. Integer arguments are extended to +// 64 bits by the caller. Floats are right-aligned in their 8-byte slot, the +// first 4 bytes in the slot are undefined. +// +// The integer registers %i0 to %i5 shadow the first 48 bytes of the parameter +// array at fixed offsets. Integer arguments are promoted to registers when +// possible. +// +// The floating point registers %f0 to %f31 shadow the first 128 bytes of the +// parameter array at fixed offsets. Float and double parameters are promoted +// to these registers when possible. +// +// Structs up to 16 bytes in size are passed by value. They are right-aligned +// in one or two 8-byte slots in the parameter array. Struct members are +// promoted to both floating point and integer registers when possible. A +// struct containing two floats would thus be passed in %f0 and %f1, while two +// float function arguments would occupy 8 bytes each, and be passed in %f1 and +// %f3. +// +// When a struct { int, float } is passed by value, the int goes in the high +// bits of an integer register while the float goes in a floating point +// register. +// +// The difference is encoded in LLVM IR using the inreg atttribute on function +// arguments: +// +// C: void f(float, float); +// IR: declare void f(float %f1, float %f3) +// +// C: void f(struct { float f0, f1; }); +// IR: declare void f(float inreg %f0, float inreg %f1) +// +// C: void f(int, float); +// IR: declare void f(int signext %i0, float %f3) +// +// C: void f(struct { int i0high; float f1; }); +// IR: declare void f(i32 inreg %i0high, float inreg %f1) +// +// Two ints in a struct are simply coerced to i64: +// +// C: void f(struct { int i0high, i0low; }); +// IR: declare void f(i64 %i0.coerced) +// +// The frontend and backend divide the task of producing ABI compliant code for +// C functions. The C frontend will: +// +// - Annotate integer arguments with zeroext or signext attributes. +// +// - Split structs into one or two 64-bit sized chunks, or 32-bit chunks with +// inreg attributes. +// +// - Pass structs larger than 16 bytes indirectly with an explicit pointer +// argument. The byval attribute is not used. +// +// The backend will: +// +// - Assign all arguments to 64-bit aligned stack slots, 32-bits for inreg. +// +// - Promote to integer or floating point registers depending on type. +// +// Function return values are passed exactly like function arguments, except a +// struct up to 32 bytes in size can be returned in registers. + +// Function arguments AND return values. +def CC_Sparc64 : CallingConv<[ + // The frontend uses the inreg flag to indicate i32 and float arguments from + // structs. These arguments are not promoted to 64 bits, but they can still + // be assigned to integer and float registers. + CCIfInReg<CCIfType<[i32, f32], CCCustom<"CC_Sparc64_Half">>>, + + // All integers are promoted to i64 by the caller. + CCIfType<[i32], CCPromoteToType<i64>>, + + // Custom assignment is required because stack space is reserved for all + // arguments whether they are passed in registers or not. + CCCustom<"CC_Sparc64_Full"> +]>; diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp index a0dae6e..7874240 100644 --- a/lib/Target/Sparc/SparcFrameLowering.cpp +++ b/lib/Target/Sparc/SparcFrameLowering.cpp @@ -37,18 +37,27 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const { // Get the number of bytes to allocate from the FrameInfo int NumBytes = (int) MFI->getStackSize(); - // Emit the correct save instruction based on the number of bytes in - // the frame. Minimum stack frame size according to V8 ABI is: - // 16 words for register window spill - // 1 word for address of returned aggregate-value - // + 6 words for passing parameters on the stack - // ---------- - // 23 words * 4 bytes per word = 92 bytes - NumBytes += 92; + if (SubTarget.is64Bit()) { + // All 64-bit stack frames must be 16-byte aligned, and must reserve space + // for spilling the 16 window registers at %sp+BIAS..%sp+BIAS+128. + NumBytes += 128; + // Frames with calls must also reserve space for 6 outgoing arguments + // whether they are used or not. LowerCall_64 takes care of that. + assert(NumBytes % 16 == 0 && "Stack size not 16-byte aligned"); + } else { + // Emit the correct save instruction based on the number of bytes in + // the frame. Minimum stack frame size according to V8 ABI is: + // 16 words for register window spill + // 1 word for address of returned aggregate-value + // + 6 words for passing parameters on the stack + // ---------- + // 23 words * 4 bytes per word = 92 bytes + NumBytes += 92; - // Round up to next doubleword boundary -- a double-word boundary - // is required by the ABI. - NumBytes = (NumBytes + 7) & ~7; + // Round up to next doubleword boundary -- a double-word boundary + // is required by the ABI. + NumBytes = RoundUpToAlignment(NumBytes, 8); + } NumBytes = -NumBytes; if (NumBytes >= -4096) { @@ -70,15 +79,18 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const { void SparcFrameLowering:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { - MachineInstr &MI = *I; - DebugLoc dl = MI.getDebugLoc(); - int Size = MI.getOperand(0).getImm(); - if (MI.getOpcode() == SP::ADJCALLSTACKDOWN) - Size = -Size; - const SparcInstrInfo &TII = - *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo()); - if (Size) - BuildMI(MBB, I, dl, TII.get(SP::ADDri), SP::O6).addReg(SP::O6).addImm(Size); + if (!hasReservedCallFrame(MF)) { + MachineInstr &MI = *I; + DebugLoc DL = MI.getDebugLoc(); + int Size = MI.getOperand(0).getImm(); + if (MI.getOpcode() == SP::ADJCALLSTACKDOWN) + Size = -Size; + const SparcInstrInfo &TII = + *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo()); + if (Size) + BuildMI(MBB, I, DL, TII.get(SP::ADDri), SP::O6).addReg(SP::O6) + .addImm(Size); + } MBB.erase(I); } diff --git a/lib/Target/Sparc/SparcFrameLowering.h b/lib/Target/Sparc/SparcFrameLowering.h index 464233e..c375662 100644 --- a/lib/Target/Sparc/SparcFrameLowering.h +++ b/lib/Target/Sparc/SparcFrameLowering.h @@ -22,10 +22,12 @@ namespace llvm { class SparcSubtarget; class SparcFrameLowering : public TargetFrameLowering { + const SparcSubtarget &SubTarget; public: - explicit SparcFrameLowering(const SparcSubtarget &/*sti*/) - : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0) { - } + explicit SparcFrameLowering(const SparcSubtarget &ST) + : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, + ST.is64Bit() ? 16 : 8, 0, ST.is64Bit() ? 16 : 8), + SubTarget(ST) {} /// emitProlog/emitEpilog - These methods insert prolog and epilog code into /// the function. diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index 5fa545d..a709685 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -73,7 +73,7 @@ SDNode* SparcDAGToDAGISel::getGlobalBaseReg() { bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base, SDValue &Offset) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { - Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), TLI.getPointerTy()); Offset = CurDAG->getTargetConstant(0, MVT::i32); return true; } @@ -87,7 +87,8 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr, if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) { // Constant offset from frame ref. - Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), + TLI.getPointerTy()); } else { Base = Addr.getOperand(0); } @@ -130,7 +131,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) { } R1 = Addr; - R2 = CurDAG->getRegister(SP::G0, MVT::i32); + R2 = CurDAG->getRegister(SP::G0, TLI.getPointerTy()); return true; } @@ -146,6 +147,9 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) { case ISD::SDIV: case ISD::UDIV: { + // sdivx / udivx handle 64-bit divides. + if (N->getValueType(0) == MVT::i64) + break; // FIXME: should use a custom expander to expose the SRA to the dag. SDValue DivLHS = N->getOperand(0); SDValue DivRHS = N->getOperand(1); diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 28ac02a..3863e2c 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -15,6 +15,7 @@ #include "SparcISelLowering.h" #include "SparcMachineFunctionInfo.h" #include "SparcTargetMachine.h" +#include "MCTargetDesc/SparcBaseInfo.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -74,25 +75,117 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, return true; } +// Allocate a full-sized argument for the 64-bit ABI. +static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) && + "Can't handle non-64 bits locations"); + + // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. + unsigned Offset = State.AllocateStack(8, 8); + unsigned Reg = 0; + + if (LocVT == MVT::i64 && Offset < 6*8) + // Promote integers to %i0-%i5. + Reg = SP::I0 + Offset/8; + else if (LocVT == MVT::f64 && Offset < 16*8) + // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). + Reg = SP::D0 + Offset/8; + else if (LocVT == MVT::f32 && Offset < 16*8) + // Promote floats to %f1, %f3, ... + Reg = SP::F1 + Offset/4; + + // Promote to register when possible, otherwise use the stack slot. + if (Reg) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return true; + } + + // This argument goes on the stack in an 8-byte slot. + // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to + // the right-aligned float. The first 4 bytes of the stack slot are undefined. + if (LocVT == MVT::f32) + Offset += 4; + + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + return true; +} + +// Allocate a half-sized argument for the 64-bit ABI. +// +// This is used when passing { float, int } structs by value in registers. +static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); + unsigned Offset = State.AllocateStack(4, 4); + + if (LocVT == MVT::f32 && Offset < 16*8) { + // Promote floats to %f0-%f31. + State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, + LocVT, LocInfo)); + return true; + } + + if (LocVT == MVT::i32 && Offset < 6*8) { + // Promote integers to %i0-%i5, using half the register. + unsigned Reg = SP::I0 + Offset/8; + LocVT = MVT::i64; + LocInfo = CCValAssign::AExt; + + // Set the Custom bit if this i32 goes in the high bits of a register. + if (Offset % 8 == 0) + State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, + LocVT, LocInfo)); + else + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return true; + } + + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + return true; +} + #include "SparcGenCallingConv.inc" +// The calling conventions in SparcCallingConv.td are described in terms of the +// callee's register window. This function translates registers to the +// corresponding caller window %o register. +static unsigned toCallerWindow(unsigned Reg) { + assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum"); + if (Reg >= SP::I0 && Reg <= SP::I7) + return Reg - SP::I0 + SP::O0; + return Reg; +} + SDValue SparcTargetLowering::LowerReturn(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, + CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + DebugLoc DL, SelectionDAG &DAG) const { + if (Subtarget->is64Bit()) + return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); + return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); +} +SDValue +SparcTargetLowering::LowerReturn_32(SDValue Chain, + CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc DL, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), DAG.getTarget(), RVLocs, *DAG.getContext()); - // Analize return values. + // Analyze return values. CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); SDValue Flag; @@ -105,7 +198,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain, CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); - Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together with flags. @@ -120,8 +213,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain, unsigned Reg = SFI->getSRetReturnReg(); if (!Reg) llvm_unreachable("sret virtual register not created in the entry block"); - SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); - Chain = DAG.getCopyToReg(Chain, dl, SP::I0, Val, Flag); + SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); + Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy())); RetAddrOffset = 12; // CallInst + Delay Slot + Unimp @@ -134,22 +227,114 @@ SparcTargetLowering::LowerReturn(SDValue Chain, if (Flag.getNode()) RetOps.push_back(Flag); - return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, + return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, &RetOps[0], RetOps.size()); } -/// LowerFormalArguments - V8 uses a very simple ABI, where all values are -/// passed in either one or two GPRs, including FP values. TODO: we should -/// pass FP values in FP registers for fastcc functions. +// Lower return values for the 64-bit ABI. +// Return values are passed the exactly the same way as function arguments. SDValue -SparcTargetLowering::LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> - &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) - const { +SparcTargetLowering::LowerReturn_64(SDValue Chain, + CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc DL, SelectionDAG &DAG) const { + // CCValAssign - represent the assignment of the return value to locations. + SmallVector<CCValAssign, 16> RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), + DAG.getTarget(), RVLocs, *DAG.getContext()); + + // Analyze return values. + CCInfo.AnalyzeReturn(Outs, CC_Sparc64); + + SDValue Flag; + SmallVector<SDValue, 4> RetOps(1, Chain); + + // The second operand on the return instruction is the return address offset. + // The return address is always %i7+8 with the 64-bit ABI. + RetOps.push_back(DAG.getConstant(8, MVT::i32)); + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + SDValue OutVal = OutVals[i]; + + // Integer return values must be sign or zero extended by the callee. + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); + break; + case CCValAssign::ZExt: + OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); + break; + case CCValAssign::AExt: + OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); + default: + break; + } + + // The custom bit on an i32 return value indicates that it should be passed + // in the high bits of the register. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { + OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, + DAG.getConstant(32, MVT::i32)); + + // The next value may go in the low bits of the same register. + // Handle both at once. + if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { + SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); + OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); + // Skip the next value, it's already done. + ++i; + } + } + + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); + + // Guarantee that all emitted copies are stuck together with flags. + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); + + return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, + &RetOps[0], RetOps.size()); +} +SDValue SparcTargetLowering:: +LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc DL, + SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + if (Subtarget->is64Bit()) + return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, + DL, DAG, InVals); + return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, + DL, DAG, InVals); +} + +/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are +/// passed in either one or two GPRs, including FP values. TODO: we should +/// pass FP values in FP registers for fastcc functions. +SDValue SparcTargetLowering:: +LowerFormalArguments_32(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, + SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); @@ -341,9 +526,132 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain, return Chain; } +// Lower formal arguments for the 64 bit ABI. +SDValue SparcTargetLowering:: +LowerFormalArguments_64(SDValue Chain, + CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc DL, + SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + + // Analyze arguments according to CC_Sparc64. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); + CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); + + // The argument array begins at %fp+BIAS+128, after the register save area. + const unsigned ArgArea = 128; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + if (VA.isRegLoc()) { + // This argument is passed in a register. + // All integer register arguments are promoted by the caller to i64. + + // Create a virtual register for the promoted live-in value. + unsigned VReg = MF.addLiveIn(VA.getLocReg(), + getRegClassFor(VA.getLocVT())); + SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); + + // Get the high bits for i32 struct elements. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) + Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, + DAG.getConstant(32, MVT::i32)); + + // The caller promoted the argument, so insert an Assert?ext SDNode so we + // won't promote the value again in this function. + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, + DAG.getValueType(VA.getValVT())); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, + DAG.getValueType(VA.getValVT())); + break; + default: + break; + } + + // Truncate the register down to the argument type. + if (VA.isExtInLoc()) + Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); + + InVals.push_back(Arg); + continue; + } + + // The registers are exhausted. This argument was passed on the stack. + assert(VA.isMemLoc()); + // The CC_Sparc64_Full/Half functions compute stack offsets relative to the + // beginning of the arguments area at %fp+BIAS+128. + unsigned Offset = VA.getLocMemOffset() + ArgArea; + unsigned ValSize = VA.getValVT().getSizeInBits() / 8; + // Adjust offset for extended arguments, SPARC is big-endian. + // The caller will have written the full slot with extended bytes, but we + // prefer our own extending loads. + if (VA.isExtInLoc()) + Offset += 8 - ValSize; + int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true); + InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, + DAG.getFrameIndex(FI, getPointerTy()), + MachinePointerInfo::getFixedStack(FI), + false, false, false, 0)); + } + + if (!IsVarArg) + return Chain; + + // This function takes variable arguments, some of which may have been passed + // in registers %i0-%i5. Variable floating point arguments are never passed + // in floating point registers. They go on %i0-%i5 or on the stack like + // integer arguments. + // + // The va_start intrinsic needs to know the offset to the first variable + // argument. + unsigned ArgOffset = CCInfo.getNextStackOffset(); + SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); + // Skip the 128 bytes of register save area. + FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + + Subtarget->getStackPointerBias()); + + // Save the variable arguments that were passed in registers. + // The caller is required to reserve stack space for 6 arguments regardless + // of how many arguments were actually passed. + SmallVector<SDValue, 8> OutChains; + for (; ArgOffset < 6*8; ArgOffset += 8) { + unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); + SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); + int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true); + OutChains.push_back(DAG.getStore(Chain, DL, VArg, + DAG.getFrameIndex(FI, getPointerTy()), + MachinePointerInfo::getFixedStack(FI), + false, false, 0)); + } + + if (!OutChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, + &OutChains[0], OutChains.size()); + + return Chain; +} + SDValue SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { + if (Subtarget->is64Bit()) + return LowerCall_64(CLI, InVals); + return LowerCall_32(CLI, InVals); +} + +// Lower a call for the 32-bit ABI. +SDValue +SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; DebugLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; @@ -546,11 +854,7 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - unsigned Reg = RegsToPass[i].first; - // Remap I0->I7 -> O0->O7. - if (Reg >= SP::I0 && Reg <= SP::I7) - Reg = Reg-SP::I0+SP::O0; - + unsigned Reg = toCallerWindow(RegsToPass[i].first); Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } @@ -572,13 +876,9 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Ops.push_back(Callee); if (hasStructRetAttr) Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - unsigned Reg = RegsToPass[i].first; - if (Reg >= SP::I0 && Reg <= SP::I7) - Reg = Reg-SP::I0+SP::O0; - - Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType())); - } + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), + RegsToPass[i].second.getValueType())); if (InFlag.getNode()) Ops.push_back(InFlag); @@ -598,13 +898,7 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { - unsigned Reg = RVLocs[i].getLocReg(); - - // Remap I0->I7 -> O0->O7. - if (Reg >= SP::I0 && Reg <= SP::I7) - Reg = Reg-SP::I0+SP::O0; - - Chain = DAG.getCopyFromReg(Chain, dl, Reg, + Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), RVLocs[i].getValVT(), InFlag).getValue(1); InFlag = Chain.getValue(2); InVals.push_back(Chain.getValue(0)); @@ -637,6 +931,259 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const return getDataLayout()->getTypeAllocSize(ElementTy); } + +// Fixup floating point arguments in the ... part of a varargs call. +// +// The SPARC v9 ABI requires that floating point arguments are treated the same +// as integers when calling a varargs function. This does not apply to the +// fixed arguments that are part of the function's prototype. +// +// This function post-processes a CCValAssign array created by +// AnalyzeCallOperands(). +static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, + ArrayRef<ISD::OutputArg> Outs) { + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + const CCValAssign &VA = ArgLocs[i]; + // FIXME: What about f32 arguments? C promotes them to f64 when calling + // varargs functions. + if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64) + continue; + // The fixed arguments to a varargs function still go in FP registers. + if (Outs[VA.getValNo()].IsFixed) + continue; + + // This floating point argument should be reassigned. + CCValAssign NewVA; + + // Determine the offset into the argument array. + unsigned Offset = 8 * (VA.getLocReg() - SP::D0); + assert(Offset < 16*8 && "Offset out of range, bad register enum?"); + + if (Offset < 6*8) { + // This argument should go in %i0-%i5. + unsigned IReg = SP::I0 + Offset/8; + // Full register, just bitconvert into i64. + NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), + IReg, MVT::i64, CCValAssign::BCvt); + } else { + // This needs to go to memory, we're out of integer registers. + NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), + Offset, VA.getLocVT(), VA.getLocInfo()); + } + ArgLocs[i] = NewVA; + } +} + +// Lower a call for the 64-bit ABI. +SDValue +SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc DL = CLI.DL; + SDValue Chain = CLI.Chain; + + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), + DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); + + // Get the size of the outgoing arguments stack space requirement. + // The stack offset computed by CC_Sparc64 includes all arguments. + // Called functions expect 6 argument words to exist in the stack frame, used + // or not. + unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); + + // Keep stack frames 16-byte aligned. + ArgsSize = RoundUpToAlignment(ArgsSize, 16); + + // Varargs calls require special treatment. + if (CLI.IsVarArg) + fixupVariableFloatArgs(ArgLocs, CLI.Outs); + + // Adjust the stack pointer to make room for the arguments. + // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls + // with more than 6 arguments. + Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true)); + + // Collect the set of registers to pass to the function and their values. + // This will be emitted as a sequence of CopyToReg nodes glued to the call + // instruction. + SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + + // Collect chains from all the memory opeations that copy arguments to the + // stack. They must follow the stack pointer adjustment above and precede the + // call instruction itself. + SmallVector<SDValue, 8> MemOpChains; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + const CCValAssign &VA = ArgLocs[i]; + SDValue Arg = CLI.OutVals[i]; + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown location info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); + break; + } + + if (VA.isRegLoc()) { + // The custom bit on an i32 return value indicates that it should be + // passed in the high bits of the register. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { + Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, + DAG.getConstant(32, MVT::i32)); + + // The next value may go in the low bits of the same register. + // Handle both at once. + if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && + ArgLocs[i+1].getLocReg() == VA.getLocReg()) { + SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, + CLI.OutVals[i+1]); + Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); + // Skip the next value, it's already done. + ++i; + } + } + RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); + continue; + } + + assert(VA.isMemLoc()); + + // Create a store off the stack pointer for this argument. + SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); + // The argument area starts at %fp+BIAS+128 in the callee frame, + // %sp+BIAS+128 in ours. + SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + + Subtarget->getStackPointerBias() + + 128); + PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); + MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff, + MachinePointerInfo(), + false, false, 0)); + } + + // Emit all stores, make sure they occur before the call. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // Build a sequence of CopyToReg nodes glued together with token chain and + // glue operands which copy the outgoing args into registers. The InGlue is + // necessary since all emitted instructions must be stuck together in order + // to pass the live physical registers. + SDValue InGlue; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, DL, + RegsToPass[i].first, RegsToPass[i].second, InGlue); + InGlue = Chain.getValue(1); + } + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + SDValue Callee = CLI.Callee; + if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy()); + else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy()); + + // Build the operands for the call instruction itself. + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // Make sure the CopyToReg nodes are glued to the call instruction which + // consumes the registers. + if (InGlue.getNode()) + Ops.push_back(InGlue); + + // Now the call itself. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); + InGlue = Chain.getValue(1); + + // Revert the stack pointer immediately after the call. + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), + DAG.getIntPtrConstant(0, true), InGlue); + InGlue = Chain.getValue(1); + + // Now extract the return values. This is more or less the same as + // LowerFormalArguments_64. + + // Assign locations to each value returned by this call. + SmallVector<CCValAssign, 16> RVLocs; + CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), + DAG.getTarget(), RVLocs, *DAG.getContext()); + RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + unsigned Reg = toCallerWindow(VA.getLocReg()); + + // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can + // reside in the same register in the high and low bits. Reuse the + // CopyFromReg previous node to avoid duplicate copies. + SDValue RV; + if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) + if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) + RV = Chain.getValue(0); + + // But usually we'll create a new CopyFromReg for a different register. + if (!RV.getNode()) { + RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); + Chain = RV.getValue(1); + InGlue = Chain.getValue(2); + } + + // Get the high bits for i32 struct elements. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) + RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, + DAG.getConstant(32, MVT::i32)); + + // The callee promoted the return value, so insert an Assert?ext SDNode so + // we won't promote the value again in this function. + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, + DAG.getValueType(VA.getValVT())); + break; + case CCValAssign::ZExt: + RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, + DAG.getValueType(VA.getValVT())); + break; + default: + break; + } + + // Truncate the register down to the return value type. + if (VA.isExtInLoc()) + RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); + + InVals.push_back(RV); + } + + return Chain; +} + //===----------------------------------------------------------------------===// // TargetLowering Implementation //===----------------------------------------------------------------------===// @@ -689,11 +1236,14 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) : TargetLowering(TM, new TargetLoweringObjectFileELF()) { + Subtarget = &TM.getSubtarget<SparcSubtarget>(); // Set up the register classes. addRegisterClass(MVT::i32, &SP::IntRegsRegClass); addRegisterClass(MVT::f32, &SP::FPRegsRegClass); addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); + if (Subtarget->is64Bit()) + addRegisterClass(MVT::i64, &SP::I64RegsRegClass); // Turn FP extload into load/fextend setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); @@ -703,9 +1253,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setTruncStoreAction(MVT::f64, MVT::f32, Expand); // Custom legalize GlobalAddress nodes into LO/HI parts. - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool , MVT::i32, Custom); + setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom); + setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom); + setOperationAction(ISD::ConstantPool, getPointerTy(), Custom); // Sparc doesn't have sext_inreg, replace them with shl/sra setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); @@ -749,9 +1299,13 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); + if (Subtarget->is64Bit()) { + setOperationAction(ISD::BR_CC, MVT::i64, Custom); + setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); + } + // FIXME: There are instructions available for ATOMIC_FENCE // on SparcV8 and later. - setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); setOperationAction(ISD::FSIN , MVT::f64, Expand); @@ -818,8 +1372,10 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { case SPISD::CMPICC: return "SPISD::CMPICC"; case SPISD::CMPFCC: return "SPISD::CMPFCC"; case SPISD::BRICC: return "SPISD::BRICC"; + case SPISD::BRXCC: return "SPISD::BRXCC"; case SPISD::BRFCC: return "SPISD::BRFCC"; case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; + case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; case SPISD::Hi: return "SPISD::Hi"; case SPISD::Lo: return "SPISD::Lo"; @@ -846,6 +1402,7 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, switch (Op.getOpcode()) { default: break; case SPISD::SELECT_ICC: + case SPISD::SELECT_XCC: case SPISD::SELECT_FCC: DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); @@ -866,7 +1423,8 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, if (isa<ConstantSDNode>(RHS) && cast<ConstantSDNode>(RHS)->isNullValue() && CC == ISD::SETNE && - ((LHS.getOpcode() == SPISD::SELECT_ICC && + (((LHS.getOpcode() == SPISD::SELECT_ICC || + LHS.getOpcode() == SPISD::SELECT_XCC) && LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || (LHS.getOpcode() == SPISD::SELECT_FCC && LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && @@ -881,46 +1439,89 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, } } +// Convert to a target node and set target flags. +SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, + SelectionDAG &DAG) const { + if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) + return DAG.getTargetGlobalAddress(GA->getGlobal(), + GA->getDebugLoc(), + GA->getValueType(0), + GA->getOffset(), TF); + + if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) + return DAG.getTargetConstantPool(CP->getConstVal(), + CP->getValueType(0), + CP->getAlignment(), + CP->getOffset(), TF); + + if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) + return DAG.getTargetExternalSymbol(ES->getSymbol(), + ES->getValueType(0), TF); + + llvm_unreachable("Unhandled address SDNode"); +} + +// Split Op into high and low parts according to HiTF and LoTF. +// Return an ADD node combining the parts. +SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, + unsigned HiTF, unsigned LoTF, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + EVT VT = Op.getValueType(); + SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); + SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); + return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); +} + +// Build SDNodes for producing an address from a GlobalAddress, ConstantPool, +// or ExternalSymbol SDNode. +SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + EVT VT = getPointerTy(); + + // Handle PIC mode first. + if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { + // This is the pic32 code model, the GOT is known to be smaller than 4GB. + SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); + SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); + return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, + MachinePointerInfo::getGOT(), false, false, false, 0); + } + + // This is one of the absolute code models. + switch(getTargetMachine().getCodeModel()) { + default: + llvm_unreachable("Unsupported absolute code model"); + case CodeModel::Small: + // abs32. + return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + case CodeModel::Medium: { + // abs44. + SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG); + H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); + SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG); + L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); + return DAG.getNode(ISD::ADD, DL, VT, H44, L44); + } + case CodeModel::Large: { + // abs64. + SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG); + Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); + SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); + } + } +} + SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - // FIXME there isn't really any debug info here - DebugLoc dl = Op.getDebugLoc(); - SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); - SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA); - SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA); - - if (getTargetMachine().getRelocationModel() != Reloc::PIC_) - return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); - - SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl, - getPointerTy()); - SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); - SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, - GlobalBase, RelAddr); - return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), - AbsAddr, MachinePointerInfo(), false, false, false, 0); + return makeAddress(Op, DAG); } SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); - // FIXME there isn't really any debug info here - DebugLoc dl = Op.getDebugLoc(); - const Constant *C = N->getConstVal(); - SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); - SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP); - SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP); - if (getTargetMachine().getRelocationModel() != Reloc::PIC_) - return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); - - SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl, - getPointerTy()); - SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); - SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, - GlobalBase, RelAddr); - return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), - AbsAddr, MachinePointerInfo(), false, false, false, 0); + return makeAddress(Op, DAG); } static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { @@ -954,12 +1555,13 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) { // Get the condition flag. SDValue CompareFlag; - if (LHS.getValueType() == MVT::i32) { - EVT VTs[] = { MVT::i32, MVT::Glue }; + if (LHS.getValueType().isInteger()) { + EVT VTs[] = { LHS.getValueType(), MVT::Glue }; SDValue Ops[2] = { LHS, RHS }; CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1); if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); - Opc = SPISD::BRICC; + // 32-bit compares use the icc flags, 64-bit uses the xcc flags. + Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; } else { CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); @@ -983,12 +1585,13 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { LookThroughSetCC(LHS, RHS, CC, SPCC); SDValue CompareFlag; - if (LHS.getValueType() == MVT::i32) { + if (LHS.getValueType().isInteger()) { // subcc returns a value EVT VTs[] = { LHS.getValueType(), MVT::Glue }; SDValue Ops[2] = { LHS, RHS }; CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1); - Opc = SPISD::SELECT_ICC; + Opc = LHS.getValueType() == MVT::i32 ? + SPISD::SELECT_ICC : SPISD::SELECT_XCC; if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); } else { CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); @@ -1006,14 +1609,13 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - DebugLoc dl = Op.getDebugLoc(); + DebugLoc DL = Op.getDebugLoc(); SDValue Offset = - DAG.getNode(ISD::ADD, dl, MVT::i32, - DAG.getRegister(SP::I6, MVT::i32), - DAG.getConstant(FuncInfo->getVarArgsFrameOffset(), - MVT::i32)); + DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), + DAG.getRegister(SP::I6, TLI.getPointerTy()), + DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset())); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1), + return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), MachinePointerInfo(SV), false, false, 0); } @@ -1022,33 +1624,22 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { EVT VT = Node->getValueType(0); SDValue InChain = Node->getOperand(0); SDValue VAListPtr = Node->getOperand(1); + EVT PtrVT = VAListPtr.getValueType(); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - DebugLoc dl = Node->getDebugLoc(); - SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr, + DebugLoc DL = Node->getDebugLoc(); + SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV), false, false, false, 0); - // Increment the pointer, VAList, to the next vaarg - SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList, - DAG.getConstant(VT.getSizeInBits()/8, - MVT::i32)); - // Store the incremented VAList to the legalized pointer - InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr, + // Increment the pointer, VAList, to the next vaarg. + SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, + DAG.getIntPtrConstant(VT.getSizeInBits()/8)); + // Store the incremented VAList to the legalized pointer. + InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr, MachinePointerInfo(SV), false, false, 0); - // Load the actual argument out of the pointer VAList, unless this is an - // f64 load. - if (VT != MVT::f64) - return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), - false, false, false, 0); - - // Otherwise, load it as i64, then do a bitconvert. - SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, MachinePointerInfo(), - false, false, false, 0); - - // Bit-Convert the value to f64. - SDValue Ops[2] = { - DAG.getNode(ISD::BITCAST, dl, MVT::f64, V), - V.getValue(1) - }; - return DAG.getMergeValues(Ops, 2, dl); + // Load the actual argument out of the pointer VAList. + // We can't count on greater alignment than the word size. + return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), + false, false, false, + std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8); } static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h index 09148ea..fd706be 100644 --- a/lib/Target/Sparc/SparcISelLowering.h +++ b/lib/Target/Sparc/SparcISelLowering.h @@ -19,14 +19,18 @@ #include "llvm/Target/TargetLowering.h" namespace llvm { + class SparcSubtarget; + namespace SPISD { enum { FIRST_NUMBER = ISD::BUILTIN_OP_END, - CMPICC, // Compare two GPR operands, set icc. + CMPICC, // Compare two GPR operands, set icc+xcc. CMPFCC, // Compare two FP operands, set fcc. BRICC, // Branch to dest on icc condition + BRXCC, // Branch to dest on xcc condition (64-bit only). BRFCC, // Branch to dest on fcc condition SELECT_ICC, // Select between two values using the current ICC flags. + SELECT_XCC, // Select between two values using the current XCC flags. SELECT_FCC, // Select between two values using the current FCC flags. Hi, Lo, // Hi/Lo operations, typically on a global address. @@ -42,6 +46,7 @@ namespace llvm { } class SparcTargetLowering : public TargetLowering { + const SparcSubtarget *Subtarget; public: SparcTargetLowering(TargetMachine &TM); virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; @@ -66,6 +71,7 @@ namespace llvm { getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const; virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; } virtual SDValue LowerFormalArguments(SDValue Chain, @@ -74,10 +80,26 @@ namespace llvm { const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; + SDValue LowerFormalArguments_32(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; + SDValue LowerFormalArguments_64(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const; + SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const; + SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerReturn(SDValue Chain, @@ -85,11 +107,25 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const; + SDValue LowerReturn_32(SDValue Chain, + CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc DL, SelectionDAG &DAG) const; + SDValue LowerReturn_64(SDValue Chain, + CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc DL, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const; + SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const; + SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, + SelectionDAG &DAG) const; + SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const; }; } // end namespace llvm diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td new file mode 100644 index 0000000..91805f9 --- /dev/null +++ b/lib/Target/Sparc/SparcInstr64Bit.td @@ -0,0 +1,333 @@ +//===-- SparcInstr64Bit.td - 64-bit instructions for Sparc Target ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains instruction definitions and patterns needed for 64-bit +// code generation on SPARC v9. +// +// Some SPARC v9 instructions are defined in SparcInstrInfo.td because they can +// also be used in 32-bit code running on a SPARC v9 CPU. +// +//===----------------------------------------------------------------------===// + +let Predicates = [Is64Bit] in { +// The same integer registers are used for i32 and i64 values. +// When registers hold i32 values, the high bits are don't care. +// This give us free trunc and anyext. +def : Pat<(i64 (anyext i32:$val)), (COPY_TO_REGCLASS $val, I64Regs)>; +def : Pat<(i32 (trunc i64:$val)), (COPY_TO_REGCLASS $val, IntRegs)>; + +} // Predicates = [Is64Bit] + + +//===----------------------------------------------------------------------===// +// 64-bit Shift Instructions. +//===----------------------------------------------------------------------===// +// +// The 32-bit shift instructions are still available. The left shift srl +// instructions shift all 64 bits, but it only accepts a 5-bit shift amount. +// +// The srl instructions only shift the low 32 bits and clear the high 32 bits. +// Finally, sra shifts the low 32 bits and sign-extends to 64 bits. + +let Predicates = [Is64Bit] in { + +def : Pat<(i64 (zext i32:$val)), (SRLri $val, 0)>; +def : Pat<(i64 (sext i32:$val)), (SRAri $val, 0)>; + +def : Pat<(i64 (and i64:$val, 0xffffffff)), (SRLri $val, 0)>; +def : Pat<(i64 (sext_inreg i64:$val, i32)), (SRAri $val, 0)>; + +defm SLLX : F3_S<"sllx", 0b100101, 1, shl, i64, I64Regs>; +defm SRLX : F3_S<"srlx", 0b100110, 1, srl, i64, I64Regs>; +defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>; + +} // Predicates = [Is64Bit] + + +//===----------------------------------------------------------------------===// +// 64-bit Immediates. +//===----------------------------------------------------------------------===// +// +// All 32-bit immediates can be materialized with sethi+or, but 64-bit +// immediates may require more code. There may be a point where it is +// preferable to use a constant pool load instead, depending on the +// microarchitecture. + +// The %g0 register is constant 0. +// This is useful for stx %g0, [...], for example. +def : Pat<(i64 0), (i64 G0)>, Requires<[Is64Bit]>; + +// Single-instruction patterns. + +// The ALU instructions want their simm13 operands as i32 immediates. +def as_i32imm : SDNodeXForm<imm, [{ + return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32); +}]>; +def : Pat<(i64 simm13:$val), (ORri (i64 G0), (as_i32imm $val))>; +def : Pat<(i64 SETHIimm:$val), (SETHIi (HI22 $val))>; + +// Double-instruction patterns. + +// All unsigned i32 immediates can be handled by sethi+or. +def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>; +def : Pat<(i64 uimm32:$val), (ORri (SETHIi (HI22 $val)), (LO10 $val))>, + Requires<[Is64Bit]>; + +// All negative i33 immediates can be handled by sethi+xor. +def nimm33 : PatLeaf<(imm), [{ + int64_t Imm = N->getSExtValue(); + return Imm < 0 && isInt<33>(Imm); +}]>; +// Bits 10-31 inverted. Same as assembler's %hix. +def HIX22 : SDNodeXForm<imm, [{ + uint64_t Val = (~N->getZExtValue() >> 10) & ((1u << 22) - 1); + return CurDAG->getTargetConstant(Val, MVT::i32); +}]>; +// Bits 0-9 with ones in bits 10-31. Same as assembler's %lox. +def LOX10 : SDNodeXForm<imm, [{ + return CurDAG->getTargetConstant(~(~N->getZExtValue() & 0x3ff), MVT::i32); +}]>; +def : Pat<(i64 nimm33:$val), (XORri (SETHIi (HIX22 $val)), (LOX10 $val))>, + Requires<[Is64Bit]>; + +// More possible patterns: +// +// (sllx sethi, n) +// (sllx simm13, n) +// +// 3 instrs: +// +// (xor (sllx sethi), simm13) +// (sllx (xor sethi, simm13)) +// +// 4 instrs: +// +// (or sethi, (sllx sethi)) +// (xnor sethi, (sllx sethi)) +// +// 5 instrs: +// +// (or (sllx sethi), (or sethi, simm13)) +// (xnor (sllx sethi), (or sethi, simm13)) +// (or (sllx sethi), (sllx sethi)) +// (xnor (sllx sethi), (sllx sethi)) +// +// Worst case is 6 instrs: +// +// (or (sllx (or sethi, simmm13)), (or sethi, simm13)) + +// Bits 42-63, same as assembler's %hh. +def HH22 : SDNodeXForm<imm, [{ + uint64_t Val = (N->getZExtValue() >> 42) & ((1u << 22) - 1); + return CurDAG->getTargetConstant(Val, MVT::i32); +}]>; +// Bits 32-41, same as assembler's %hm. +def HM10 : SDNodeXForm<imm, [{ + uint64_t Val = (N->getZExtValue() >> 32) & ((1u << 10) - 1); + return CurDAG->getTargetConstant(Val, MVT::i32); +}]>; +def : Pat<(i64 imm:$val), + (ORrr (SLLXri (ORri (SETHIi (HH22 $val)), (HM10 $val)), (i32 32)), + (ORri (SETHIi (HI22 $val)), (LO10 $val)))>, + Requires<[Is64Bit]>; + + +//===----------------------------------------------------------------------===// +// 64-bit Integer Arithmetic and Logic. +//===----------------------------------------------------------------------===// + +let Predicates = [Is64Bit] in { + +// Register-register instructions. + +def : Pat<(and i64:$a, i64:$b), (ANDrr $a, $b)>; +def : Pat<(or i64:$a, i64:$b), (ORrr $a, $b)>; +def : Pat<(xor i64:$a, i64:$b), (XORrr $a, $b)>; + +def : Pat<(and i64:$a, (not i64:$b)), (ANDNrr $a, $b)>; +def : Pat<(or i64:$a, (not i64:$b)), (ORNrr $a, $b)>; +def : Pat<(xor i64:$a, (not i64:$b)), (XNORrr $a, $b)>; + +def : Pat<(add i64:$a, i64:$b), (ADDrr $a, $b)>; +def : Pat<(sub i64:$a, i64:$b), (SUBrr $a, $b)>; + +// Add/sub with carry were renamed to addc/subc in SPARC v9. +def : Pat<(adde i64:$a, i64:$b), (ADDXrr $a, $b)>; +def : Pat<(sube i64:$a, i64:$b), (SUBXrr $a, $b)>; + +def : Pat<(addc i64:$a, i64:$b), (ADDCCrr $a, $b)>; +def : Pat<(subc i64:$a, i64:$b), (SUBCCrr $a, $b)>; + +def : Pat<(SPcmpicc i64:$a, i64:$b), (SUBCCrr $a, $b)>; + +// Register-immediate instructions. + +def : Pat<(and i64:$a, (i64 simm13:$b)), (ANDri $a, (as_i32imm $b))>; +def : Pat<(or i64:$a, (i64 simm13:$b)), (ORri $a, (as_i32imm $b))>; +def : Pat<(xor i64:$a, (i64 simm13:$b)), (XORri $a, (as_i32imm $b))>; + +def : Pat<(add i64:$a, (i64 simm13:$b)), (ADDri $a, (as_i32imm $b))>; +def : Pat<(sub i64:$a, (i64 simm13:$b)), (SUBri $a, (as_i32imm $b))>; + +def : Pat<(SPcmpicc i64:$a, (i64 simm13:$b)), (SUBCCri $a, (as_i32imm $b))>; + +} // Predicates = [Is64Bit] + + +//===----------------------------------------------------------------------===// +// 64-bit Integer Multiply and Divide. +//===----------------------------------------------------------------------===// + +let Predicates = [Is64Bit] in { + +def MULXrr : F3_1<2, 0b001001, + (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2), + "mulx $rs1, $rs2, $rd", + [(set i64:$rd, (mul i64:$rs1, i64:$rs2))]>; +def MULXri : F3_2<2, 0b001001, + (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i), + "mulx $rs1, $i, $rd", + [(set i64:$rd, (mul i64:$rs1, (i64 simm13:$i)))]>; + +// Division can trap. +let hasSideEffects = 1 in { +def SDIVXrr : F3_1<2, 0b101101, + (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2), + "sdivx $rs1, $rs2, $rd", + [(set i64:$rd, (sdiv i64:$rs1, i64:$rs2))]>; +def SDIVXri : F3_2<2, 0b101101, + (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i), + "sdivx $rs1, $i, $rd", + [(set i64:$rd, (sdiv i64:$rs1, (i64 simm13:$i)))]>; + +def UDIVXrr : F3_1<2, 0b001101, + (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2), + "udivx $rs1, $rs2, $rd", + [(set i64:$rd, (udiv i64:$rs1, i64:$rs2))]>; +def UDIVXri : F3_2<2, 0b001101, + (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$i), + "udivx $rs1, $i, $rd", + [(set i64:$rd, (udiv i64:$rs1, (i64 simm13:$i)))]>; +} // hasSideEffects = 1 + +} // Predicates = [Is64Bit] + + +//===----------------------------------------------------------------------===// +// 64-bit Loads and Stores. +//===----------------------------------------------------------------------===// +// +// All the 32-bit loads and stores are available. The extending loads are sign +// or zero-extending to 64 bits. The LDrr and LDri instructions load 32 bits +// zero-extended to i64. Their mnemonic is lduw in SPARC v9 (Load Unsigned +// Word). +// +// SPARC v9 adds 64-bit loads as well as a sign-extending ldsw i32 loads. + +let Predicates = [Is64Bit] in { + +// 64-bit loads. +def LDXrr : F3_1<3, 0b001011, + (outs I64Regs:$dst), (ins MEMrr:$addr), + "ldx [$addr], $dst", + [(set i64:$dst, (load ADDRrr:$addr))]>; +def LDXri : F3_2<3, 0b001011, + (outs I64Regs:$dst), (ins MEMri:$addr), + "ldx [$addr], $dst", + [(set i64:$dst, (load ADDRri:$addr))]>; + +// Extending loads to i64. +def : Pat<(i64 (zextloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>; +def : Pat<(i64 (zextloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>; +def : Pat<(i64 (extloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>; +def : Pat<(i64 (extloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>; +def : Pat<(i64 (sextloadi8 ADDRrr:$addr)), (LDSBrr ADDRrr:$addr)>; +def : Pat<(i64 (sextloadi8 ADDRri:$addr)), (LDSBri ADDRri:$addr)>; + +def : Pat<(i64 (zextloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>; +def : Pat<(i64 (zextloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>; +def : Pat<(i64 (extloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>; +def : Pat<(i64 (extloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>; +def : Pat<(i64 (sextloadi16 ADDRrr:$addr)), (LDSHrr ADDRrr:$addr)>; +def : Pat<(i64 (sextloadi16 ADDRri:$addr)), (LDSHri ADDRri:$addr)>; + +def : Pat<(i64 (zextloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>; +def : Pat<(i64 (zextloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>; +def : Pat<(i64 (extloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>; +def : Pat<(i64 (extloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>; + +// Sign-extending load of i32 into i64 is a new SPARC v9 instruction. +def LDSWrr : F3_1<3, 0b001011, + (outs I64Regs:$dst), (ins MEMrr:$addr), + "ldsw [$addr], $dst", + [(set i64:$dst, (sextloadi32 ADDRrr:$addr))]>; +def LDSWri : F3_2<3, 0b001011, + (outs I64Regs:$dst), (ins MEMri:$addr), + "ldsw [$addr], $dst", + [(set i64:$dst, (sextloadi32 ADDRri:$addr))]>; + +// 64-bit stores. +def STXrr : F3_1<3, 0b001110, + (outs), (ins MEMrr:$addr, I64Regs:$src), + "stx $src, [$addr]", + [(store i64:$src, ADDRrr:$addr)]>; +def STXri : F3_2<3, 0b001110, + (outs), (ins MEMri:$addr, I64Regs:$src), + "stx $src, [$addr]", + [(store i64:$src, ADDRri:$addr)]>; + +// Truncating stores from i64 are identical to the i32 stores. +def : Pat<(truncstorei8 i64:$src, ADDRrr:$addr), (STBrr ADDRrr:$addr, $src)>; +def : Pat<(truncstorei8 i64:$src, ADDRri:$addr), (STBri ADDRri:$addr, $src)>; +def : Pat<(truncstorei16 i64:$src, ADDRrr:$addr), (STHrr ADDRrr:$addr, $src)>; +def : Pat<(truncstorei16 i64:$src, ADDRri:$addr), (STHri ADDRri:$addr, $src)>; +def : Pat<(truncstorei32 i64:$src, ADDRrr:$addr), (STrr ADDRrr:$addr, $src)>; +def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), (STri ADDRri:$addr, $src)>; + +} // Predicates = [Is64Bit] + + +//===----------------------------------------------------------------------===// +// 64-bit Conditionals. +//===----------------------------------------------------------------------===// +// +// Flag-setting instructions like subcc and addcc set both icc and xcc flags. +// The icc flags correspond to the 32-bit result, and the xcc are for the +// full 64-bit result. +// +// We reuse CMPICC SDNodes for compares, but use new BRXCC branch nodes for +// 64-bit compares. See LowerBR_CC. + +let Predicates = [Is64Bit] in { + +let Uses = [ICC] in +def BPXCC : BranchSP<0, (ins brtarget:$dst, CCOp:$cc), + "bp$cc %xcc, $dst", + [(SPbrxcc bb:$dst, imm:$cc)]>; + +// Conditional moves on %xcc. +let Uses = [ICC], Constraints = "$f = $rd" in { +def MOVXCCrr : Pseudo<(outs IntRegs:$rd), + (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond), + "mov$cond %xcc, $rs2, $rd", + [(set i32:$rd, + (SPselectxcc i32:$rs2, i32:$f, imm:$cond))]>; +def MOVXCCri : Pseudo<(outs IntRegs:$rd), + (ins i32imm:$i, IntRegs:$f, CCOp:$cond), + "mov$cond %xcc, $i, $rd", + [(set i32:$rd, + (SPselecticc simm11:$i, i32:$f, imm:$cond))]>; +} // Uses, Constraints + +def : Pat<(SPselectxcc i64:$t, i64:$f, imm:$cond), + (MOVXCCrr $t, $f, imm:$cond)>; +def : Pat<(SPselectxcc (i64 simm11:$t), i64:$f, imm:$cond), + (MOVXCCri (as_i32imm $t), $f, imm:$cond)>; + +} // Predicates = [Is64Bit] diff --git a/lib/Target/Sparc/SparcInstrFormats.td b/lib/Target/Sparc/SparcInstrFormats.td index dce3312..e7fde08 100644 --- a/lib/Target/Sparc/SparcInstrFormats.td +++ b/lib/Target/Sparc/SparcInstrFormats.td @@ -111,4 +111,41 @@ class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins, let Inst{4-0} = rs2; } +// Shift by register rs2. +class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins, + string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> { + bit x = xVal; // 1 for 64-bit shifts. + bits<5> rs2; + + let op = opVal; + let op3 = op3val; + + let Inst{13} = 0; // i field = 0 + let Inst{12} = x; // extended registers. + let Inst{4-0} = rs2; +} +// Shift by immediate. +class F3_Si<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins, + string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> { + bit x = xVal; // 1 for 64-bit shifts. + bits<6> shcnt; // shcnt32 / shcnt64. + + let op = opVal; + let op3 = op3val; + + let Inst{13} = 1; // i field = 1 + let Inst{12} = x; // extended registers. + let Inst{5-0} = shcnt; +} + +// Define rr and ri shift instructions with patterns. +multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode, + ValueType VT, RegisterClass RC> { + def rr : F3_Sr<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs, IntRegs:$rs2), + !strconcat(OpcStr, " $rs, $rs2, $rd"), + [(set VT:$rd, (OpNode VT:$rs, i32:$rs2))]>; + def ri : F3_Si<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs, i32imm:$shcnt), + !strconcat(OpcStr, " $rs, $shcnt, $rd"), + [(set VT:$rd, (OpNode VT:$rs, (i32 imm:$shcnt)))]>; +} diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td index 90b698d..baefb06 100644 --- a/lib/Target/Sparc/SparcInstrInfo.td +++ b/lib/Target/Sparc/SparcInstrInfo.td @@ -21,6 +21,12 @@ include "SparcInstrFormats.td" // Feature predicates. //===----------------------------------------------------------------------===// +// True when generating 32-bit code. +def Is32Bit : Predicate<"!Subtarget.is64Bit()">; + +// True when generating 64-bit code. This also implies HasV9. +def Is64Bit : Predicate<"Subtarget.is64Bit()">; + // HasV9 - This predicate is true when the target processor supports V9 // instructions. Note that the machine may be running in 32-bit mode. def HasV9 : Predicate<"Subtarget.isV9()">; @@ -58,22 +64,21 @@ def HI22 : SDNodeXForm<imm, [{ }]>; def SETHIimm : PatLeaf<(imm), [{ - return (((unsigned)N->getZExtValue() >> 10) << 10) == - (unsigned)N->getZExtValue(); + return isShiftedUInt<22, 10>(N->getZExtValue()); }], HI22>; // Addressing modes. -def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>; -def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex], []>; +def ADDRrr : ComplexPattern<iPTR, 2, "SelectADDRrr", [], []>; +def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>; // Address operands -def MEMrr : Operand<i32> { +def MEMrr : Operand<iPTR> { let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops IntRegs, IntRegs); + let MIOperandInfo = (ops ptr_rc, ptr_rc); } -def MEMri : Operand<i32> { +def MEMri : Operand<iPTR> { let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops IntRegs, i32imm); + let MIOperandInfo = (ops ptr_rc, i32imm); } // Branch targets have OtherVT type. @@ -98,6 +103,7 @@ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>; def SPcmpicc : SDNode<"SPISD::CMPICC", SDTIntBinOp, [SDNPOutGlue]>; def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>; def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>; +def SPbrxcc : SDNode<"SPISD::BRXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>; def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>; def SPhi : SDNode<"SPISD::Hi", SDTIntUnaryOp>; @@ -107,6 +113,7 @@ def SPftoi : SDNode<"SPISD::FTOI", SDTSPFTOI>; def SPitof : SDNode<"SPISD::ITOF", SDTSPITOF>; def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>; +def SPselectxcc : SDNode<"SPISD::SELECT_XCC", SDTSPselectcc, [SDNPInGlue]>; def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInGlue]>; // These are target-independent nodes, but have target-specific formats. @@ -182,11 +189,11 @@ multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode> { def rr : F3_1<2, Op3Val, (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat(OpcStr, " $b, $c, $dst"), - [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set i32:$dst, (OpNode i32:$b, i32:$c))]>; def ri : F3_2<2, Op3Val, (outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c), !strconcat(OpcStr, " $b, $c, $dst"), - [(set IntRegs:$dst, (OpNode IntRegs:$b, simm13:$c))]>; + [(set i32:$dst, (OpNode i32:$b, (i32 simm13:$c)))]>; } /// F3_12np multiclass - Define a normal F3_1/F3_2 pattern in one shot, with no @@ -243,10 +250,10 @@ let Predicates = [HasNoV9] in { // Only emit these in V8 mode. "!FpMOVD $src, $dst", []>; def FpNEGD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src), "!FpNEGD $src, $dst", - [(set DFPRegs:$dst, (fneg DFPRegs:$src))]>; + [(set f64:$dst, (fneg f64:$src))]>; def FpABSD : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$src), "!FpABSD $src, $dst", - [(set DFPRegs:$dst, (fabs DFPRegs:$src))]>; + [(set f64:$dst, (fabs f64:$src))]>; } // SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after @@ -257,19 +264,16 @@ let Uses = [ICC], usesCustomInserter = 1 in { def SELECT_CC_Int_ICC : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond), "; SELECT_CC_Int_ICC PSEUDO!", - [(set IntRegs:$dst, (SPselecticc IntRegs:$T, IntRegs:$F, - imm:$Cond))]>; + [(set i32:$dst, (SPselecticc i32:$T, i32:$F, imm:$Cond))]>; def SELECT_CC_FP_ICC : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond), "; SELECT_CC_FP_ICC PSEUDO!", - [(set FPRegs:$dst, (SPselecticc FPRegs:$T, FPRegs:$F, - imm:$Cond))]>; + [(set f32:$dst, (SPselecticc f32:$T, f32:$F, imm:$Cond))]>; def SELECT_CC_DFP_ICC : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond), "; SELECT_CC_DFP_ICC PSEUDO!", - [(set DFPRegs:$dst, (SPselecticc DFPRegs:$T, DFPRegs:$F, - imm:$Cond))]>; + [(set f64:$dst, (SPselecticc f64:$T, f64:$F, imm:$Cond))]>; } let usesCustomInserter = 1, Uses = [FCC] in { @@ -277,19 +281,16 @@ let usesCustomInserter = 1, Uses = [FCC] in { def SELECT_CC_Int_FCC : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond), "; SELECT_CC_Int_FCC PSEUDO!", - [(set IntRegs:$dst, (SPselectfcc IntRegs:$T, IntRegs:$F, - imm:$Cond))]>; + [(set i32:$dst, (SPselectfcc i32:$T, i32:$F, imm:$Cond))]>; def SELECT_CC_FP_FCC : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond), "; SELECT_CC_FP_FCC PSEUDO!", - [(set FPRegs:$dst, (SPselectfcc FPRegs:$T, FPRegs:$F, - imm:$Cond))]>; + [(set f32:$dst, (SPselectfcc f32:$T, f32:$F, imm:$Cond))]>; def SELECT_CC_DFP_FCC : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond), "; SELECT_CC_DFP_FCC PSEUDO!", - [(set DFPRegs:$dst, (SPselectfcc DFPRegs:$T, DFPRegs:$F, - imm:$Cond))]>; + [(set f64:$dst, (SPselectfcc f64:$T, f64:$F, imm:$Cond))]>; } @@ -309,111 +310,111 @@ let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in { def LDSBrr : F3_1<3, 0b001001, (outs IntRegs:$dst), (ins MEMrr:$addr), "ldsb [$addr], $dst", - [(set IntRegs:$dst, (sextloadi8 ADDRrr:$addr))]>; + [(set i32:$dst, (sextloadi8 ADDRrr:$addr))]>; def LDSBri : F3_2<3, 0b001001, (outs IntRegs:$dst), (ins MEMri:$addr), "ldsb [$addr], $dst", - [(set IntRegs:$dst, (sextloadi8 ADDRri:$addr))]>; + [(set i32:$dst, (sextloadi8 ADDRri:$addr))]>; def LDSHrr : F3_1<3, 0b001010, (outs IntRegs:$dst), (ins MEMrr:$addr), "ldsh [$addr], $dst", - [(set IntRegs:$dst, (sextloadi16 ADDRrr:$addr))]>; + [(set i32:$dst, (sextloadi16 ADDRrr:$addr))]>; def LDSHri : F3_2<3, 0b001010, (outs IntRegs:$dst), (ins MEMri:$addr), "ldsh [$addr], $dst", - [(set IntRegs:$dst, (sextloadi16 ADDRri:$addr))]>; + [(set i32:$dst, (sextloadi16 ADDRri:$addr))]>; def LDUBrr : F3_1<3, 0b000001, (outs IntRegs:$dst), (ins MEMrr:$addr), "ldub [$addr], $dst", - [(set IntRegs:$dst, (zextloadi8 ADDRrr:$addr))]>; + [(set i32:$dst, (zextloadi8 ADDRrr:$addr))]>; def LDUBri : F3_2<3, 0b000001, (outs IntRegs:$dst), (ins MEMri:$addr), "ldub [$addr], $dst", - [(set IntRegs:$dst, (zextloadi8 ADDRri:$addr))]>; + [(set i32:$dst, (zextloadi8 ADDRri:$addr))]>; def LDUHrr : F3_1<3, 0b000010, (outs IntRegs:$dst), (ins MEMrr:$addr), "lduh [$addr], $dst", - [(set IntRegs:$dst, (zextloadi16 ADDRrr:$addr))]>; + [(set i32:$dst, (zextloadi16 ADDRrr:$addr))]>; def LDUHri : F3_2<3, 0b000010, (outs IntRegs:$dst), (ins MEMri:$addr), "lduh [$addr], $dst", - [(set IntRegs:$dst, (zextloadi16 ADDRri:$addr))]>; + [(set i32:$dst, (zextloadi16 ADDRri:$addr))]>; def LDrr : F3_1<3, 0b000000, (outs IntRegs:$dst), (ins MEMrr:$addr), "ld [$addr], $dst", - [(set IntRegs:$dst, (load ADDRrr:$addr))]>; + [(set i32:$dst, (load ADDRrr:$addr))]>; def LDri : F3_2<3, 0b000000, (outs IntRegs:$dst), (ins MEMri:$addr), "ld [$addr], $dst", - [(set IntRegs:$dst, (load ADDRri:$addr))]>; + [(set i32:$dst, (load ADDRri:$addr))]>; // Section B.2 - Load Floating-point Instructions, p. 92 def LDFrr : F3_1<3, 0b100000, (outs FPRegs:$dst), (ins MEMrr:$addr), "ld [$addr], $dst", - [(set FPRegs:$dst, (load ADDRrr:$addr))]>; + [(set f32:$dst, (load ADDRrr:$addr))]>; def LDFri : F3_2<3, 0b100000, (outs FPRegs:$dst), (ins MEMri:$addr), "ld [$addr], $dst", - [(set FPRegs:$dst, (load ADDRri:$addr))]>; + [(set f32:$dst, (load ADDRri:$addr))]>; def LDDFrr : F3_1<3, 0b100011, (outs DFPRegs:$dst), (ins MEMrr:$addr), "ldd [$addr], $dst", - [(set DFPRegs:$dst, (load ADDRrr:$addr))]>; + [(set f64:$dst, (load ADDRrr:$addr))]>; def LDDFri : F3_2<3, 0b100011, (outs DFPRegs:$dst), (ins MEMri:$addr), "ldd [$addr], $dst", - [(set DFPRegs:$dst, (load ADDRri:$addr))]>; + [(set f64:$dst, (load ADDRri:$addr))]>; // Section B.4 - Store Integer Instructions, p. 95 def STBrr : F3_1<3, 0b000101, (outs), (ins MEMrr:$addr, IntRegs:$src), "stb $src, [$addr]", - [(truncstorei8 IntRegs:$src, ADDRrr:$addr)]>; + [(truncstorei8 i32:$src, ADDRrr:$addr)]>; def STBri : F3_2<3, 0b000101, (outs), (ins MEMri:$addr, IntRegs:$src), "stb $src, [$addr]", - [(truncstorei8 IntRegs:$src, ADDRri:$addr)]>; + [(truncstorei8 i32:$src, ADDRri:$addr)]>; def STHrr : F3_1<3, 0b000110, (outs), (ins MEMrr:$addr, IntRegs:$src), "sth $src, [$addr]", - [(truncstorei16 IntRegs:$src, ADDRrr:$addr)]>; + [(truncstorei16 i32:$src, ADDRrr:$addr)]>; def STHri : F3_2<3, 0b000110, (outs), (ins MEMri:$addr, IntRegs:$src), "sth $src, [$addr]", - [(truncstorei16 IntRegs:$src, ADDRri:$addr)]>; + [(truncstorei16 i32:$src, ADDRri:$addr)]>; def STrr : F3_1<3, 0b000100, (outs), (ins MEMrr:$addr, IntRegs:$src), "st $src, [$addr]", - [(store IntRegs:$src, ADDRrr:$addr)]>; + [(store i32:$src, ADDRrr:$addr)]>; def STri : F3_2<3, 0b000100, (outs), (ins MEMri:$addr, IntRegs:$src), "st $src, [$addr]", - [(store IntRegs:$src, ADDRri:$addr)]>; + [(store i32:$src, ADDRri:$addr)]>; // Section B.5 - Store Floating-point Instructions, p. 97 def STFrr : F3_1<3, 0b100100, (outs), (ins MEMrr:$addr, FPRegs:$src), "st $src, [$addr]", - [(store FPRegs:$src, ADDRrr:$addr)]>; + [(store f32:$src, ADDRrr:$addr)]>; def STFri : F3_2<3, 0b100100, (outs), (ins MEMri:$addr, FPRegs:$src), "st $src, [$addr]", - [(store FPRegs:$src, ADDRri:$addr)]>; + [(store f32:$src, ADDRri:$addr)]>; def STDFrr : F3_1<3, 0b100111, (outs), (ins MEMrr:$addr, DFPRegs:$src), "std $src, [$addr]", - [(store DFPRegs:$src, ADDRrr:$addr)]>; + [(store f64:$src, ADDRrr:$addr)]>; def STDFri : F3_2<3, 0b100111, (outs), (ins MEMri:$addr, DFPRegs:$src), "std $src, [$addr]", - [(store DFPRegs:$src, ADDRri:$addr)]>; + [(store f64:$src, ADDRri:$addr)]>; // Section B.9 - SETHI Instruction, p. 104 def SETHIi: F2_1<0b100, (outs IntRegs:$dst), (ins i32imm:$src), "sethi $src, $dst", - [(set IntRegs:$dst, SETHIimm:$src)]>; + [(set i32:$dst, SETHIimm:$src)]>; // Section B.10 - NOP Instruction, p. 105 // (It's a special case of SETHI) @@ -426,7 +427,7 @@ defm AND : F3_12<"and", 0b000001, and>; def ANDNrr : F3_1<2, 0b000101, (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), "andn $b, $c, $dst", - [(set IntRegs:$dst, (and IntRegs:$b, (not IntRegs:$c)))]>; + [(set i32:$dst, (and i32:$b, (not i32:$c)))]>; def ANDNri : F3_2<2, 0b000101, (outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c), "andn $b, $c, $dst", []>; @@ -436,7 +437,7 @@ defm OR : F3_12<"or", 0b000010, or>; def ORNrr : F3_1<2, 0b000110, (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), "orn $b, $c, $dst", - [(set IntRegs:$dst, (or IntRegs:$b, (not IntRegs:$c)))]>; + [(set i32:$dst, (or i32:$b, (not i32:$c)))]>; def ORNri : F3_2<2, 0b000110, (outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c), "orn $b, $c, $dst", []>; @@ -445,7 +446,7 @@ defm XOR : F3_12<"xor", 0b000011, xor>; def XNORrr : F3_1<2, 0b000111, (outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), "xnor $b, $c, $dst", - [(set IntRegs:$dst, (not (xor IntRegs:$b, IntRegs:$c)))]>; + [(set i32:$dst, (not (xor i32:$b, i32:$c)))]>; def XNORri : F3_2<2, 0b000111, (outs IntRegs:$dst), (ins IntRegs:$b, i32imm:$c), "xnor $b, $c, $dst", []>; @@ -462,7 +463,7 @@ defm ADD : F3_12<"add", 0b000000, add>; def LEA_ADDri : F3_2<2, 0b000000, (outs IntRegs:$dst), (ins MEMri:$addr), "add ${addr:arith}, $dst", - [(set IntRegs:$dst, ADDRri:$addr)]>; + [(set i32:$dst, ADDRri:$addr)]>; let Defs = [ICC] in defm ADDCC : F3_12<"addcc", 0b010000, addc>; @@ -603,11 +604,11 @@ def FDTOI : F3_3<2, 0b110100, 0b011010010, def FSTOD : F3_3<2, 0b110100, 0b011001001, (outs DFPRegs:$dst), (ins FPRegs:$src), "fstod $src, $dst", - [(set DFPRegs:$dst, (fextend FPRegs:$src))]>; + [(set f64:$dst, (fextend f32:$src))]>; def FDTOS : F3_3<2, 0b110100, 0b011000110, (outs FPRegs:$dst), (ins DFPRegs:$src), "fdtos $src, $dst", - [(set FPRegs:$dst, (fround DFPRegs:$src))]>; + [(set f32:$dst, (fround f64:$src))]>; // Floating-point Move Instructions, p. 144 def FMOVS : F3_3<2, 0b110100, 0b000000001, @@ -616,22 +617,22 @@ def FMOVS : F3_3<2, 0b110100, 0b000000001, def FNEGS : F3_3<2, 0b110100, 0b000000101, (outs FPRegs:$dst), (ins FPRegs:$src), "fnegs $src, $dst", - [(set FPRegs:$dst, (fneg FPRegs:$src))]>; + [(set f32:$dst, (fneg f32:$src))]>; def FABSS : F3_3<2, 0b110100, 0b000001001, (outs FPRegs:$dst), (ins FPRegs:$src), "fabss $src, $dst", - [(set FPRegs:$dst, (fabs FPRegs:$src))]>; + [(set f32:$dst, (fabs f32:$src))]>; // Floating-point Square Root Instructions, p.145 def FSQRTS : F3_3<2, 0b110100, 0b000101001, (outs FPRegs:$dst), (ins FPRegs:$src), "fsqrts $src, $dst", - [(set FPRegs:$dst, (fsqrt FPRegs:$src))]>; + [(set f32:$dst, (fsqrt f32:$src))]>; def FSQRTD : F3_3<2, 0b110100, 0b000101010, (outs DFPRegs:$dst), (ins DFPRegs:$src), "fsqrtd $src, $dst", - [(set DFPRegs:$dst, (fsqrt DFPRegs:$src))]>; + [(set f64:$dst, (fsqrt f64:$src))]>; @@ -639,42 +640,42 @@ def FSQRTD : F3_3<2, 0b110100, 0b000101010, def FADDS : F3_3<2, 0b110100, 0b001000001, (outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2), "fadds $src1, $src2, $dst", - [(set FPRegs:$dst, (fadd FPRegs:$src1, FPRegs:$src2))]>; + [(set f32:$dst, (fadd f32:$src1, f32:$src2))]>; def FADDD : F3_3<2, 0b110100, 0b001000010, (outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2), "faddd $src1, $src2, $dst", - [(set DFPRegs:$dst, (fadd DFPRegs:$src1, DFPRegs:$src2))]>; + [(set f64:$dst, (fadd f64:$src1, f64:$src2))]>; def FSUBS : F3_3<2, 0b110100, 0b001000101, (outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2), "fsubs $src1, $src2, $dst", - [(set FPRegs:$dst, (fsub FPRegs:$src1, FPRegs:$src2))]>; + [(set f32:$dst, (fsub f32:$src1, f32:$src2))]>; def FSUBD : F3_3<2, 0b110100, 0b001000110, (outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2), "fsubd $src1, $src2, $dst", - [(set DFPRegs:$dst, (fsub DFPRegs:$src1, DFPRegs:$src2))]>; + [(set f64:$dst, (fsub f64:$src1, f64:$src2))]>; // Floating-point Multiply and Divide Instructions, p. 147 def FMULS : F3_3<2, 0b110100, 0b001001001, (outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2), "fmuls $src1, $src2, $dst", - [(set FPRegs:$dst, (fmul FPRegs:$src1, FPRegs:$src2))]>; + [(set f32:$dst, (fmul f32:$src1, f32:$src2))]>; def FMULD : F3_3<2, 0b110100, 0b001001010, (outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2), "fmuld $src1, $src2, $dst", - [(set DFPRegs:$dst, (fmul DFPRegs:$src1, DFPRegs:$src2))]>; + [(set f64:$dst, (fmul f64:$src1, f64:$src2))]>; def FSMULD : F3_3<2, 0b110100, 0b001101001, (outs DFPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2), "fsmuld $src1, $src2, $dst", - [(set DFPRegs:$dst, (fmul (fextend FPRegs:$src1), - (fextend FPRegs:$src2)))]>; + [(set f64:$dst, (fmul (fextend f32:$src1), + (fextend f32:$src2)))]>; def FDIVS : F3_3<2, 0b110100, 0b001001101, (outs FPRegs:$dst), (ins FPRegs:$src1, FPRegs:$src2), "fdivs $src1, $src2, $dst", - [(set FPRegs:$dst, (fdiv FPRegs:$src1, FPRegs:$src2))]>; + [(set f32:$dst, (fdiv f32:$src1, f32:$src2))]>; def FDIVD : F3_3<2, 0b110100, 0b001001110, (outs DFPRegs:$dst), (ins DFPRegs:$src1, DFPRegs:$src2), "fdivd $src1, $src2, $dst", - [(set DFPRegs:$dst, (fdiv DFPRegs:$src1, DFPRegs:$src2))]>; + [(set f64:$dst, (fdiv f64:$src1, f64:$src2))]>; // Floating-point Compare Instructions, p. 148 // Note: the 2nd template arg is different for these guys. @@ -685,11 +686,11 @@ let Defs = [FCC] in { def FCMPS : F3_3<2, 0b110101, 0b001010001, (outs), (ins FPRegs:$src1, FPRegs:$src2), "fcmps $src1, $src2\n\tnop", - [(SPcmpfcc FPRegs:$src1, FPRegs:$src2)]>; + [(SPcmpfcc f32:$src1, f32:$src2)]>; def FCMPD : F3_3<2, 0b110101, 0b001010010, (outs), (ins DFPRegs:$src1, DFPRegs:$src2), "fcmpd $src1, $src2\n\tnop", - [(SPcmpfcc DFPRegs:$src1, DFPRegs:$src2)]>; + [(SPcmpfcc f64:$src1, f64:$src2)]>; } //===----------------------------------------------------------------------===// @@ -704,52 +705,45 @@ let Predicates = [HasV9], Constraints = "$T = $dst" in { def MOVICCrr : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc), "mov$cc %icc, $F, $dst", - [(set IntRegs:$dst, - (SPselecticc IntRegs:$F, IntRegs:$T, imm:$cc))]>; + [(set i32:$dst, (SPselecticc i32:$F, i32:$T, imm:$cc))]>; def MOVICCri : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc), "mov$cc %icc, $F, $dst", - [(set IntRegs:$dst, - (SPselecticc simm11:$F, IntRegs:$T, imm:$cc))]>; + [(set i32:$dst, (SPselecticc simm11:$F, i32:$T, imm:$cc))]>; } let Uses = [FCC] in { def MOVFCCrr : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc), "mov$cc %fcc0, $F, $dst", - [(set IntRegs:$dst, - (SPselectfcc IntRegs:$F, IntRegs:$T, imm:$cc))]>; + [(set i32:$dst, (SPselectfcc i32:$F, i32:$T, imm:$cc))]>; def MOVFCCri : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc), "mov$cc %fcc0, $F, $dst", - [(set IntRegs:$dst, - (SPselectfcc simm11:$F, IntRegs:$T, imm:$cc))]>; + [(set i32:$dst, (SPselectfcc simm11:$F, i32:$T, imm:$cc))]>; } let Uses = [ICC] in { def FMOVS_ICC : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc), "fmovs$cc %icc, $F, $dst", - [(set FPRegs:$dst, - (SPselecticc FPRegs:$F, FPRegs:$T, imm:$cc))]>; + [(set f32:$dst, + (SPselecticc f32:$F, f32:$T, imm:$cc))]>; def FMOVD_ICC : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc), "fmovd$cc %icc, $F, $dst", - [(set DFPRegs:$dst, - (SPselecticc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>; + [(set f64:$dst, (SPselecticc f64:$F, f64:$T, imm:$cc))]>; } let Uses = [FCC] in { def FMOVS_FCC : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc), "fmovs$cc %fcc0, $F, $dst", - [(set FPRegs:$dst, - (SPselectfcc FPRegs:$F, FPRegs:$T, imm:$cc))]>; + [(set f32:$dst, (SPselectfcc f32:$F, f32:$T, imm:$cc))]>; def FMOVD_FCC : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc), "fmovd$cc %fcc0, $F, $dst", - [(set DFPRegs:$dst, - (SPselectfcc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>; + [(set f64:$dst, (SPselectfcc f64:$F, f64:$T, imm:$cc))]>; } } @@ -762,11 +756,11 @@ let Predicates = [HasV9] in { def FNEGD : F3_3<2, 0b110100, 0b000000110, (outs DFPRegs:$dst), (ins DFPRegs:$src), "fnegd $src, $dst", - [(set DFPRegs:$dst, (fneg DFPRegs:$src))]>; + [(set f64:$dst, (fneg f64:$src))]>; def FABSD : F3_3<2, 0b110100, 0b000001010, (outs DFPRegs:$dst), (ins DFPRegs:$src), "fabsd $src, $dst", - [(set DFPRegs:$dst, (fabs DFPRegs:$src))]>; + [(set f64:$dst, (fabs f64:$src))]>; } // POPCrr - This does a ctpop of a 64-bit register. As such, we have to clear @@ -774,8 +768,8 @@ let Predicates = [HasV9] in { def POPCrr : F3_1<2, 0b101110, (outs IntRegs:$dst), (ins IntRegs:$src), "popc $src, $dst", []>, Requires<[HasV9]>; -def : Pat<(ctpop IntRegs:$src), - (POPCrr (SLLri IntRegs:$src, 0))>; +def : Pat<(ctpop i32:$src), + (POPCrr (SLLri $src, 0))>; //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -783,28 +777,26 @@ def : Pat<(ctpop IntRegs:$src), // Small immediates. def : Pat<(i32 simm13:$val), - (ORri G0, imm:$val)>; + (ORri (i32 G0), imm:$val)>; // Arbitrary immediates. def : Pat<(i32 imm:$val), (ORri (SETHIi (HI22 imm:$val)), (LO10 imm:$val))>; // subc -def : Pat<(subc IntRegs:$b, IntRegs:$c), - (SUBCCrr IntRegs:$b, IntRegs:$c)>; -def : Pat<(subc IntRegs:$b, simm13:$val), - (SUBCCri IntRegs:$b, imm:$val)>; +def : Pat<(subc i32:$b, i32:$c), + (SUBCCrr $b, $c)>; +def : Pat<(subc i32:$b, simm13:$val), + (SUBCCri $b, imm:$val)>; // Global addresses, constant pool entries def : Pat<(SPhi tglobaladdr:$in), (SETHIi tglobaladdr:$in)>; -def : Pat<(SPlo tglobaladdr:$in), (ORri G0, tglobaladdr:$in)>; +def : Pat<(SPlo tglobaladdr:$in), (ORri (i32 G0), tglobaladdr:$in)>; def : Pat<(SPhi tconstpool:$in), (SETHIi tconstpool:$in)>; -def : Pat<(SPlo tconstpool:$in), (ORri G0, tconstpool:$in)>; +def : Pat<(SPlo tconstpool:$in), (ORri (i32 G0), tconstpool:$in)>; // Add reg, lo. This is used when taking the addr of a global/constpool entry. -def : Pat<(add IntRegs:$r, (SPlo tglobaladdr:$in)), - (ADDri IntRegs:$r, tglobaladdr:$in)>; -def : Pat<(add IntRegs:$r, (SPlo tconstpool:$in)), - (ADDri IntRegs:$r, tconstpool:$in)>; +def : Pat<(add iPTR:$r, (SPlo tglobaladdr:$in)), (ADDri $r, tglobaladdr:$in)>; +def : Pat<(add iPTR:$r, (SPlo tconstpool:$in)), (ADDri $r, tconstpool:$in)>; // Calls: def : Pat<(call tglobaladdr:$dst), @@ -823,3 +815,5 @@ def : Pat<(i32 (extloadi16 ADDRri:$src)), (LDUHri ADDRri:$src)>; // zextload bool -> zextload byte def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>; def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>; + +include "SparcInstr64Bit.td" diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp index 25e90b7..3af4c61 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.cpp +++ b/lib/Target/Sparc/SparcRegisterInfo.cpp @@ -56,6 +56,12 @@ BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const { return Reserved; } +const TargetRegisterClass* +SparcRegisterInfo::getPointerRegClass(const MachineFunction &MF, + unsigned Kind) const { + return Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; +} + void SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, @@ -68,8 +74,9 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Addressable stack objects are accessed using neg. offsets from %fp MachineFunction &MF = *MI.getParent()->getParent(); - int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + - MI.getOperand(FIOperandNum + 1).getImm(); + int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + + MI.getOperand(FIOperandNum + 1).getImm() + + Subtarget.getStackPointerBias(); // Replace frame index with a frame pointer reference. if (Offset >= -4096 && Offset <= 4095) { diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h index b53a1ed..f91df53 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.h +++ b/lib/Target/Sparc/SparcRegisterInfo.h @@ -36,6 +36,9 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo { BitVector getReservedRegs(const MachineFunction &MF) const; + const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF, + unsigned Kind) const; + void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS = NULL) const; diff --git a/lib/Target/Sparc/SparcRegisterInfo.td b/lib/Target/Sparc/SparcRegisterInfo.td index 81bff6c..497e7c5 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.td +++ b/lib/Target/Sparc/SparcRegisterInfo.td @@ -43,7 +43,7 @@ class Rd<bits<5> num, string n, list<Register> subregs> : SparcReg<n> { } // Control Registers -def ICC : SparcCtrlReg<"ICC">; +def ICC : SparcCtrlReg<"ICC">; // This represents icc and xcc in 64-bit code. def FCC : SparcCtrlReg<"FCC">; // Y register @@ -140,7 +140,10 @@ def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>; // FIXME: the register order should be defined in terms of the preferred // allocation order... // -def IntRegs : RegisterClass<"SP", [i32], 32, +// This register class should not be used to hold i64 values, use the I64Regs +// register class for that. The i64 type is included here to allow i64 patterns +// using the integer instructions. +def IntRegs : RegisterClass<"SP", [i32, i64], 32, (add L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, O0, O1, O2, O3, O4, O5, O7, @@ -155,6 +158,13 @@ def IntRegs : RegisterClass<"SP", [i32], 32, G5, G6, G7 // reserved for kernel )>; +// Register class for 64-bit mode, with a 64-bit spill slot size. +// These are the same as the 32-bit registers, so TableGen will consider this +// to be a sub-class of IntRegs. That works out because requiring a 64-bit +// spill slot is a stricter constraint than only requiring a 32-bit spill slot. +def I64Regs : RegisterClass<"SP", [i64], 64, (add IntRegs)>; + +// Floating point register classes. def FPRegs : RegisterClass<"SP", [f32], 32, (sequence "F%u", 0, 31)>; def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 15)>; diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h index a81931b..b94dd11 100644 --- a/lib/Target/Sparc/SparcSubtarget.h +++ b/lib/Target/Sparc/SparcSubtarget.h @@ -52,6 +52,12 @@ public: } return std::string(p); } + + /// The 64-bit ABI uses biased stack and frame pointers, so the stack frame + /// of the current function is the area from [%sp+BIAS] to [%fp+BIAS]. + int64_t getStackPointerBias() const { + return is64Bit() ? 2047 : 0; + } }; } // end namespace llvm diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp index 9a78ebc..3d92f29 100644 --- a/lib/Target/Target.cpp +++ b/lib/Target/Target.cpp @@ -16,6 +16,7 @@ #include "llvm-c/Initialization.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/PassManager.h" #include "llvm/Target/TargetLibraryInfo.h" @@ -23,6 +24,23 @@ using namespace llvm; +inline DataLayout *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast<DataLayout*>(P); +} + +inline LLVMTargetDataRef wrap(const DataLayout *P) { + return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P)); +} + +inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { + return reinterpret_cast<TargetLibraryInfo*>(P); +} + +inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) { + TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P); + return reinterpret_cast<LLVMTargetLibraryInfoRef>(X); +} + void llvm::initializeTarget(PassRegistry &Registry) { initializeDataLayoutPass(Registry); initializeTargetLibraryInfoPass(Registry); diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp index 79f74bd..01d12e8 100644 --- a/lib/Target/TargetMachineC.cpp +++ b/lib/Target/TargetMachineC.cpp @@ -28,7 +28,36 @@ using namespace llvm; +inline DataLayout *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast<DataLayout*>(P); +} + +inline LLVMTargetDataRef wrap(const DataLayout *P) { + return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P)); +} + +inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { + return reinterpret_cast<TargetLibraryInfo*>(P); +} + +inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) { + TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P); + return reinterpret_cast<LLVMTargetLibraryInfoRef>(X); +} +inline TargetMachine *unwrap(LLVMTargetMachineRef P) { + return reinterpret_cast<TargetMachine*>(P); +} +inline Target *unwrap(LLVMTargetRef P) { + return reinterpret_cast<Target*>(P); +} +inline LLVMTargetMachineRef wrap(const TargetMachine *P) { + return + reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P)); +} +inline LLVMTargetRef wrap(const Target * P) { + return reinterpret_cast<LLVMTargetRef>(const_cast<Target*>(P)); +} LLVMTargetRef LLVMGetFirstTarget() { const Target* target = &*TargetRegistry::begin(); @@ -77,29 +106,9 @@ LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, char* Triple, break; } - CodeModel::Model CM; - switch (CodeModel) { - case LLVMCodeModelJITDefault: - CM = CodeModel::JITDefault; - break; - case LLVMCodeModelSmall: - CM = CodeModel::Small; - break; - case LLVMCodeModelKernel: - CM = CodeModel::Kernel; - break; - case LLVMCodeModelMedium: - CM = CodeModel::Medium; - break; - case LLVMCodeModelLarge: - CM = CodeModel::Large; - break; - default: - CM = CodeModel::Default; - break; - } - CodeGenOpt::Level OL; + CodeModel::Model CM = unwrap(CodeModel); + CodeGenOpt::Level OL; switch (Level) { case LLVMCodeGenLevelNone: OL = CodeGenOpt::None; @@ -149,8 +158,8 @@ LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) { return wrap(unwrap(T)->getDataLayout()); } -LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, - char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) { +static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M, + formatted_raw_ostream &OS, LLVMCodeGenFileType codegen, char **ErrorMessage) { TargetMachine* TM = unwrap(T); Module* Mod = unwrap(M); @@ -176,14 +185,7 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, ft = TargetMachine::CGFT_ObjectFile; break; } - raw_fd_ostream dest(Filename, error, raw_fd_ostream::F_Binary); - formatted_raw_ostream destf(dest); - if (!error.empty()) { - *ErrorMessage = strdup(error.c_str()); - return true; - } - - if (TM->addPassesToEmitFile(pass, destf, ft)) { + if (TM->addPassesToEmitFile(pass, OS, ft)) { error = "TargetMachine can't emit a file of this type"; *ErrorMessage = strdup(error.c_str()); return true; @@ -191,7 +193,35 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, pass.run(*Mod); - destf.flush(); - dest.flush(); + OS.flush(); return false; } + +LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, + char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) { + std::string error; + raw_fd_ostream dest(Filename, error, raw_fd_ostream::F_Binary); + formatted_raw_ostream destf(dest); + if (!error.empty()) { + *ErrorMessage = strdup(error.c_str()); + return true; + } + bool Result = LLVMTargetMachineEmit(T, M, destf, codegen, ErrorMessage); + dest.flush(); + return Result; +} + +LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T, + LLVMModuleRef M, LLVMCodeGenFileType codegen, char** ErrorMessage, + LLVMMemoryBufferRef *OutMemBuf) { + std::string CodeString; + raw_string_ostream OStream(CodeString); + formatted_raw_ostream Out(OStream); + bool Result = LLVMTargetMachineEmit(T, M, Out, codegen, ErrorMessage); + OStream.flush(); + + std::string &Data = OStream.str(); + *OutMemBuf = LLVMCreateMemoryBufferWithMemoryRangeCopy(Data.c_str(), + Data.length(), ""); + return Result; +} diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index 4ed5534a6..fef5cfe 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -13,6 +13,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Twine.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCParser/MCAsmLexer.h" @@ -32,11 +33,445 @@ using namespace llvm; namespace { struct X86Operand; +static const char OpPrecedence[] = { + 0, // IC_PLUS + 0, // IC_MINUS + 1, // IC_MULTIPLY + 1, // IC_DIVIDE + 2, // IC_RPAREN + 3, // IC_LPAREN + 0, // IC_IMM + 0 // IC_REGISTER +}; + class X86AsmParser : public MCTargetAsmParser { MCSubtargetInfo &STI; MCAsmParser &Parser; ParseInstructionInfo *InstInfo; private: + enum InfixCalculatorTok { + IC_PLUS = 0, + IC_MINUS, + IC_MULTIPLY, + IC_DIVIDE, + IC_RPAREN, + IC_LPAREN, + IC_IMM, + IC_REGISTER + }; + + class InfixCalculator { + typedef std::pair< InfixCalculatorTok, int64_t > ICToken; + SmallVector<InfixCalculatorTok, 4> InfixOperatorStack; + SmallVector<ICToken, 4> PostfixStack; + + public: + int64_t popOperand() { + assert (!PostfixStack.empty() && "Poped an empty stack!"); + ICToken Op = PostfixStack.pop_back_val(); + assert ((Op.first == IC_IMM || Op.first == IC_REGISTER) + && "Expected and immediate or register!"); + return Op.second; + } + void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) { + assert ((Op == IC_IMM || Op == IC_REGISTER) && + "Unexpected operand!"); + PostfixStack.push_back(std::make_pair(Op, Val)); + } + + void popOperator() { InfixOperatorStack.pop_back_val(); } + void pushOperator(InfixCalculatorTok Op) { + // Push the new operator if the stack is empty. + if (InfixOperatorStack.empty()) { + InfixOperatorStack.push_back(Op); + return; + } + + // Push the new operator if it has a higher precedence than the operator + // on the top of the stack or the operator on the top of the stack is a + // left parentheses. + unsigned Idx = InfixOperatorStack.size() - 1; + InfixCalculatorTok StackOp = InfixOperatorStack[Idx]; + if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) { + InfixOperatorStack.push_back(Op); + return; + } + + // The operator on the top of the stack has higher precedence than the + // new operator. + unsigned ParenCount = 0; + while (1) { + // Nothing to process. + if (InfixOperatorStack.empty()) + break; + + Idx = InfixOperatorStack.size() - 1; + StackOp = InfixOperatorStack[Idx]; + if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount)) + break; + + // If we have an even parentheses count and we see a left parentheses, + // then stop processing. + if (!ParenCount && StackOp == IC_LPAREN) + break; + + if (StackOp == IC_RPAREN) { + ++ParenCount; + InfixOperatorStack.pop_back_val(); + } else if (StackOp == IC_LPAREN) { + --ParenCount; + InfixOperatorStack.pop_back_val(); + } else { + InfixOperatorStack.pop_back_val(); + PostfixStack.push_back(std::make_pair(StackOp, 0)); + } + } + // Push the new operator. + InfixOperatorStack.push_back(Op); + } + int64_t execute() { + // Push any remaining operators onto the postfix stack. + while (!InfixOperatorStack.empty()) { + InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val(); + if (StackOp != IC_LPAREN && StackOp != IC_RPAREN) + PostfixStack.push_back(std::make_pair(StackOp, 0)); + } + + if (PostfixStack.empty()) + return 0; + + SmallVector<ICToken, 16> OperandStack; + for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) { + ICToken Op = PostfixStack[i]; + if (Op.first == IC_IMM || Op.first == IC_REGISTER) { + OperandStack.push_back(Op); + } else { + assert (OperandStack.size() > 1 && "Too few operands."); + int64_t Val; + ICToken Op2 = OperandStack.pop_back_val(); + ICToken Op1 = OperandStack.pop_back_val(); + switch (Op.first) { + default: + report_fatal_error("Unexpected operator!"); + break; + case IC_PLUS: + Val = Op1.second + Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_MINUS: + Val = Op1.second - Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_MULTIPLY: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Multiply operation with an immediate and a register!"); + Val = Op1.second * Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_DIVIDE: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Divide operation with an immediate and a register!"); + assert (Op2.second != 0 && "Division by zero!"); + Val = Op1.second / Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + } + } + } + assert (OperandStack.size() == 1 && "Expected a single result."); + return OperandStack.pop_back_val().second; + } + }; + + enum IntelExprState { + IES_PLUS, + IES_MINUS, + IES_MULTIPLY, + IES_DIVIDE, + IES_LBRAC, + IES_RBRAC, + IES_LPAREN, + IES_RPAREN, + IES_REGISTER, + IES_INTEGER, + IES_IDENTIFIER, + IES_ERROR + }; + + class IntelExprStateMachine { + IntelExprState State, PrevState; + unsigned BaseReg, IndexReg, TmpReg, Scale; + int64_t Imm; + const MCExpr *Sym; + StringRef SymName; + bool StopOnLBrac, AddImmPrefix; + InfixCalculator IC; + InlineAsmIdentifierInfo Info; + public: + IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) : + State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0), + Scale(1), Imm(imm), Sym(0), StopOnLBrac(stoponlbrac), + AddImmPrefix(addimmprefix) { Info.clear(); } + + unsigned getBaseReg() { return BaseReg; } + unsigned getIndexReg() { return IndexReg; } + unsigned getScale() { return Scale; } + const MCExpr *getSym() { return Sym; } + StringRef getSymName() { return SymName; } + int64_t getImm() { return Imm + IC.execute(); } + bool isValidEndState() { return State == IES_RBRAC; } + bool getStopOnLBrac() { return StopOnLBrac; } + bool getAddImmPrefix() { return AddImmPrefix; } + bool hadError() { return State == IES_ERROR; } + + InlineAsmIdentifierInfo &getIdentifierInfo() { + return Info; + } + + void onPlus() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_PLUS; + IC.pushOperator(IC_PLUS); + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onMinus() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MULTIPLY: + case IES_DIVIDE: + case IES_LPAREN: + case IES_RPAREN: + case IES_LBRAC: + case IES_RBRAC: + case IES_INTEGER: + case IES_REGISTER: + State = IES_MINUS; + // Only push the minus operator if it is not a unary operator. + if (!(CurrState == IES_PLUS || CurrState == IES_MINUS || + CurrState == IES_MULTIPLY || CurrState == IES_DIVIDE || + CurrState == IES_LPAREN || CurrState == IES_LBRAC)) + IC.pushOperator(IC_MINUS); + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onRegister(unsigned Reg) { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_LPAREN: + State = IES_REGISTER; + TmpReg = Reg; + IC.pushOperand(IC_REGISTER); + break; + case IES_MULTIPLY: + // Index Register - Scale * Register + if (PrevState == IES_INTEGER) { + assert (!IndexReg && "IndexReg already set!"); + State = IES_REGISTER; + IndexReg = Reg; + // Get the scale and replace the 'Scale * Register' with '0'. + Scale = IC.popOperand(); + IC.pushOperand(IC_IMM); + IC.popOperator(); + } else { + State = IES_ERROR; + } + break; + } + PrevState = CurrState; + } + void onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName) { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + State = IES_INTEGER; + Sym = SymRef; + SymName = SymRefName; + IC.pushOperand(IC_IMM); + break; + } + } + void onInteger(int64_t TmpInt) { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + case IES_DIVIDE: + case IES_MULTIPLY: + case IES_LPAREN: + State = IES_INTEGER; + if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) { + // Index Register - Register * Scale + assert (!IndexReg && "IndexReg already set!"); + IndexReg = TmpReg; + Scale = TmpInt; + // Get the scale and replace the 'Register * Scale' with '0'. + IC.popOperator(); + } else if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || + PrevState == IES_LPAREN || PrevState == IES_LBRAC) && + CurrState == IES_MINUS) { + // Unary minus. No need to pop the minus operand because it was never + // pushed. + IC.pushOperand(IC_IMM, -TmpInt); // Push -Imm. + } else { + IC.pushOperand(IC_IMM, TmpInt); + } + break; + } + PrevState = CurrState; + } + void onStar() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_MULTIPLY; + IC.pushOperator(IC_MULTIPLY); + break; + } + } + void onDivide() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + State = IES_DIVIDE; + IC.pushOperator(IC_DIVIDE); + break; + } + } + void onLBrac() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_RBRAC: + State = IES_PLUS; + IC.pushOperator(IC_PLUS); + break; + } + } + void onRBrac() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_RBRAC; + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onLParen() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + case IES_MULTIPLY: + case IES_DIVIDE: + case IES_LPAREN: + // FIXME: We don't handle this type of unary minus, yet. + if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || + PrevState == IES_LPAREN || PrevState == IES_LBRAC) && + CurrState == IES_MINUS) { + State = IES_ERROR; + break; + } + State = IES_LPAREN; + IC.pushOperator(IC_LPAREN); + break; + } + PrevState = CurrState; + } + void onRParen() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_RPAREN; + IC.pushOperator(IC_RPAREN); + break; + } + } + }; + MCAsmParser &getParser() const { return Parser; } MCAsmLexer &getLexer() const { return Parser.getLexer(); } @@ -56,14 +491,24 @@ private: X86Operand *ParseOperand(); X86Operand *ParseATTOperand(); X86Operand *ParseIntelOperand(); - X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc); - X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind); - X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc); - X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size); + X86Operand *ParseIntelOffsetOfOperator(); + X86Operand *ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp); + X86Operand *ParseIntelOperator(unsigned OpKind); + X86Operand *ParseIntelMemOperand(unsigned SegReg, int64_t ImmDisp, + SMLoc StartLoc); + X86Operand *ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End); + X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start, + int64_t ImmDisp, unsigned Size); + X86Operand *ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, + InlineAsmIdentifierInfo &Info, SMLoc &End); + X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); - bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp, - SmallString<64> &Err); + X86Operand *CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, + unsigned BaseReg, unsigned IndexReg, + unsigned Scale, SMLoc Start, SMLoc End, + unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info); bool ParseDirectiveWord(unsigned Size, SMLoc L); bool ParseDirectiveCode(StringRef IDVal, SMLoc L); @@ -93,6 +538,10 @@ private: setAvailableFeatures(FB); } + bool isParsingIntelSyntax() { + return getParser().getAssemblerDialect(); + } + /// @name Auto-generated Matcher Functions /// { @@ -115,10 +564,6 @@ public: SmallVectorImpl<MCParsedAsmOperand*> &Operands); virtual bool ParseDirective(AsmToken DirectiveID); - - bool isParsingIntelSyntax() { - return getParser().getAssemblerDialect(); - } }; } // end anonymous namespace @@ -168,6 +613,8 @@ struct X86Operand : public MCParsedAsmOperand { SMLoc StartLoc, EndLoc; SMLoc OffsetOfLoc; + StringRef SymName; + void *OpDecl; bool AddressOf; struct TokOp { @@ -181,7 +628,6 @@ struct X86Operand : public MCParsedAsmOperand { struct ImmOp { const MCExpr *Val; - bool NeedAsmRewrite; }; struct MemOp { @@ -191,7 +637,6 @@ struct X86Operand : public MCParsedAsmOperand { unsigned IndexReg; unsigned Scale; unsigned Size; - bool NeedSizeDir; }; union { @@ -204,6 +649,9 @@ struct X86Operand : public MCParsedAsmOperand { X86Operand(KindTy K, SMLoc Start, SMLoc End) : Kind(K), StartLoc(Start), EndLoc(End) {} + StringRef getSymName() { return SymName; } + void *getOpDecl() { return OpDecl; } + /// getStartLoc - Get the location of the first token of this operand. SMLoc getStartLoc() const { return StartLoc; } /// getEndLoc - Get the location of the last token of this operand. @@ -236,11 +684,6 @@ struct X86Operand : public MCParsedAsmOperand { return Imm.Val; } - bool needAsmRewrite() const { - assert(Kind == Immediate && "Invalid access!"); - return Imm.NeedAsmRewrite; - } - const MCExpr *getMemDisp() const { assert(Kind == Memory && "Invalid access!"); return Mem.Disp; @@ -337,11 +780,6 @@ struct X86Operand : public MCParsedAsmOperand { return isImmSExti64i32Value(CE->getValue()); } - unsigned getMemSize() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Size; - } - bool isOffsetOf() const { return OffsetOfLoc.getPointer(); } @@ -350,11 +788,6 @@ struct X86Operand : public MCParsedAsmOperand { return AddressOf; } - bool needSizeDirective() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.NeedSizeDir; - } - bool isMem() const { return Kind == Memory; } bool isMem8() const { return Kind == Memory && (!Mem.Size || Mem.Size == 8); @@ -482,25 +915,28 @@ struct X86Operand : public MCParsedAsmOperand { static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf = false, - SMLoc OffsetOfLoc = SMLoc()) { + SMLoc OffsetOfLoc = SMLoc(), + StringRef SymName = StringRef(), + void *OpDecl = 0) { X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc); Res->Reg.RegNo = RegNo; Res->AddressOf = AddressOf; Res->OffsetOfLoc = OffsetOfLoc; + Res->SymName = SymName; + Res->OpDecl = OpDecl; return Res; } - static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, - bool NeedRewrite = true){ + static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){ X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc); Res->Imm.Val = Val; - Res->Imm.NeedAsmRewrite = NeedRewrite; return Res; } /// Create an absolute memory operand. static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, bool NeedSizeDir = false) { + unsigned Size = 0, StringRef SymName = StringRef(), + void *OpDecl = 0) { X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); Res->Mem.SegReg = 0; Res->Mem.Disp = Disp; @@ -508,8 +944,9 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = 0; Res->Mem.Scale = 1; Res->Mem.Size = Size; - Res->Mem.NeedSizeDir = NeedSizeDir; - Res->AddressOf = false; + Res->SymName = SymName; + Res->OpDecl = OpDecl; + Res->AddressOf = false; return Res; } @@ -517,7 +954,9 @@ struct X86Operand : public MCParsedAsmOperand { static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, bool NeedSizeDir = false) { + unsigned Size = 0, + StringRef SymName = StringRef(), + void *OpDecl = 0) { // We should never just have a displacement, that should be parsed as an // absolute memory operand. assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); @@ -532,8 +971,9 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = IndexReg; Res->Mem.Scale = Scale; Res->Mem.Size = Size; - Res->Mem.NeedSizeDir = NeedSizeDir; - Res->AddressOf = false; + Res->SymName = SymName; + Res->OpDecl = OpDecl; + Res->AddressOf = false; return Res; } }; @@ -689,251 +1129,104 @@ static unsigned getIntelMemOperandSize(StringRef OpStr) { return Size; } -enum IntelBracExprState { - IBES_START, - IBES_LBRAC, - IBES_RBRAC, - IBES_REGISTER, - IBES_REGISTER_STAR, - IBES_REGISTER_STAR_INTEGER, - IBES_INTEGER, - IBES_INTEGER_STAR, - IBES_INDEX_REGISTER, - IBES_IDENTIFIER, - IBES_DISP_EXPR, - IBES_MINUS, - IBES_ERROR -}; - -class IntelBracExprStateMachine { - IntelBracExprState State; - unsigned BaseReg, IndexReg, Scale; - int64_t Disp; - - unsigned TmpReg; - int64_t TmpInteger; - - bool isPlus; - -public: - IntelBracExprStateMachine(MCAsmParser &parser) : - State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(0), - TmpReg(0), TmpInteger(0), isPlus(true) {} - - unsigned getBaseReg() { return BaseReg; } - unsigned getIndexReg() { return IndexReg; } - unsigned getScale() { return Scale; } - int64_t getDisp() { return Disp; } - bool isValidEndState() { return State == IBES_RBRAC; } - - void onPlus() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_INTEGER: - State = IBES_START; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_START; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; - } - break; - case IBES_INDEX_REGISTER: - State = IBES_START; - break; - } - isPlus = true; - } - void onMinus() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_MINUS; - break; - case IBES_INTEGER: - State = IBES_START; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_START; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; - } - break; - case IBES_INDEX_REGISTER: - State = IBES_START; - break; - } - isPlus = false; - } - void onRegister(unsigned Reg) { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_REGISTER; - TmpReg = Reg; - break; - case IBES_INTEGER_STAR: - assert (!IndexReg && "IndexReg already set!"); - State = IBES_INDEX_REGISTER; - IndexReg = Reg; - Scale = TmpInteger; - break; - } - } - void onDispExpr() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_DISP_EXPR; - break; - } - } - void onInteger(int64_t TmpInt) { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_INTEGER; - TmpInteger = TmpInt; - break; - case IBES_MINUS: - State = IBES_INTEGER; - TmpInteger = TmpInt; - break; - case IBES_REGISTER_STAR: - assert (!IndexReg && "IndexReg already set!"); - State = IBES_INDEX_REGISTER; - IndexReg = TmpReg; - Scale = TmpInt; - break; - } - } - void onStar() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_INTEGER: - State = IBES_INTEGER_STAR; - break; - case IBES_REGISTER: - State = IBES_REGISTER_STAR; - break; +X86Operand * +X86AsmParser::CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, + unsigned BaseReg, unsigned IndexReg, + unsigned Scale, SMLoc Start, SMLoc End, + unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info){ + if (isa<MCSymbolRefExpr>(Disp)) { + // If this is not a VarDecl then assume it is a FuncDecl or some other label + // reference. We need an 'r' constraint here, so we need to create register + // operand to ensure proper matching. Just pick a GPR based on the size of + // a pointer. + if (!Info.IsVarDecl) { + unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; + return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true, + SMLoc(), Identifier, Info.OpDecl); } - } - void onLBrac() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_RBRAC: - State = IBES_START; - isPlus = true; - break; + if (!Size) { + Size = Info.Type * 8; // Size is in terms of bits in this context. + if (Size) + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, Start, + /*Len=*/0, Size)); } } - void onRBrac() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_DISP_EXPR: - State = IBES_RBRAC; - break; - case IBES_INTEGER: - State = IBES_RBRAC; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_RBRAC; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; + + // When parsing inline assembly we set the base register to a non-zero value + // if we don't know the actual value at this time. This is necessary to + // get the matching correct in some cases. + BaseReg = BaseReg ? BaseReg : 1; + return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size, Identifier, Info.OpDecl); +} + +static void +RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites, + StringRef SymName, int64_t ImmDisp, + int64_t FinalImmDisp, SMLoc &BracLoc, + SMLoc &StartInBrac, SMLoc &End) { + // Remove the '[' and ']' from the IR string. + AsmRewrites->push_back(AsmRewrite(AOK_Skip, BracLoc, 1)); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, End, 1)); + + // If ImmDisp is non-zero, then we parsed a displacement before the + // bracketed expression (i.e., ImmDisp [ BaseReg + Scale*IndexReg + Disp]) + // If ImmDisp doesn't match the displacement computed by the state machine + // then we have an additional displacement in the bracketed expression. + if (ImmDisp != FinalImmDisp) { + if (ImmDisp) { + // We have an immediate displacement before the bracketed expression. + // Adjust this to match the final immediate displacement. + bool Found = false; + for (SmallVectorImpl<AsmRewrite>::iterator I = AsmRewrites->begin(), + E = AsmRewrites->end(); I != E; ++I) { + if ((*I).Loc.getPointer() > BracLoc.getPointer()) + continue; + if ((*I).Kind == AOK_ImmPrefix || (*I).Kind == AOK_Imm) { + assert (!Found && "ImmDisp already rewritten."); + (*I).Kind = AOK_Imm; + (*I).Len = BracLoc.getPointer() - (*I).Loc.getPointer(); + (*I).Val = FinalImmDisp; + Found = true; + break; + } } - break; - case IBES_INDEX_REGISTER: - State = IBES_RBRAC; - break; + assert (Found && "Unable to rewrite ImmDisp."); + } else { + // We have a symbolic and an immediate displacement, but no displacement + // before the bracketed expression. Put the immediate displacement + // before the bracketed expression. + AsmRewrites->push_back(AsmRewrite(AOK_Imm, BracLoc, 0, FinalImmDisp)); } } -}; + // Remove all the ImmPrefix rewrites within the brackets. + for (SmallVectorImpl<AsmRewrite>::iterator I = AsmRewrites->begin(), + E = AsmRewrites->end(); I != E; ++I) { + if ((*I).Loc.getPointer() < StartInBrac.getPointer()) + continue; + if ((*I).Kind == AOK_ImmPrefix) + (*I).Kind = AOK_Delete; + } + const char *SymLocPtr = SymName.data(); + // Skip everything before the symbol. + if (unsigned Len = SymLocPtr - StartInBrac.getPointer()) { + assert(Len > 0 && "Expected a non-negative length."); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, StartInBrac, Len)); + } + // Skip everything after the symbol. + if (unsigned Len = End.getPointer() - (SymLocPtr + SymName.size())) { + SMLoc Loc = SMLoc::getFromPointer(SymLocPtr + SymName.size()); + assert(Len > 0 && "Expected a non-negative length."); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, Loc, Len)); + } +} -X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, - unsigned Size) { +X86Operand * +X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { const AsmToken &Tok = Parser.getTok(); - SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc(); - - // Eat '[' - if (getLexer().isNot(AsmToken::LBrac)) - return ErrorOperand(Start, "Expected '[' token!"); - Parser.Lex(); - unsigned TmpReg = 0; - - // Try to handle '[' 'symbol' ']' - if (getLexer().is(AsmToken::Identifier)) { - if (ParseRegister(TmpReg, Start, End)) { - const MCExpr *Disp; - if (getParser().parseExpression(Disp, End)) - return 0; - - if (getLexer().isNot(AsmToken::RBrac)) - return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!"); - // Adjust the EndLoc due to the ']'. - End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1); - Parser.Lex(); - return X86Operand::CreateMem(Disp, Start, End, Size); - } - } - - // Parse [ BaseReg + Scale*IndexReg + Disp ]. bool Done = false; - IntelBracExprStateMachine SM(Parser); - - // If we parsed a register, then the end loc has already been set and - // the identifier has already been lexed. We also need to update the - // state. - if (TmpReg) - SM.onRegister(TmpReg); - - const MCExpr *Disp = 0; while (!Done) { bool UpdateLocLex = true; @@ -941,6 +1234,10 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, // identifier. Don't try an parse it as a register. if (Tok.getString().startswith(".")) break; + + // If we're parsing an immediate expression, we don't expect a '['. + if (SM.getStopOnLBrac() && getLexer().getKind() == AsmToken::LBrac) + break; switch (getLexer().getKind()) { default: { @@ -950,82 +1247,185 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, } return ErrorOperand(Tok.getLoc(), "Unexpected token!"); } + case AsmToken::EndOfStatement: { + Done = true; + break; + } case AsmToken::Identifier: { - // This could be a register or a displacement expression. - if(!ParseRegister(TmpReg, Start, End)) { + // This could be a register or a symbolic displacement. + unsigned TmpReg; + const MCExpr *Val; + SMLoc IdentLoc = Tok.getLoc(); + StringRef Identifier = Tok.getString(); + if(!ParseRegister(TmpReg, IdentLoc, End)) { SM.onRegister(TmpReg); UpdateLocLex = false; break; - } else if (!getParser().parseExpression(Disp, End)) { - SM.onDispExpr(); + } else { + if (!isParsingInlineAsm()) { + if (getParser().parsePrimaryExpr(Val, End)) + return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); + } else { + InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + return Err; + } + SM.onIdentifierExpr(Val, Identifier); UpdateLocLex = false; break; } return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); } - case AsmToken::Integer: { - int64_t Val = Tok.getIntVal(); - SM.onInteger(Val); + case AsmToken::Integer: + if (isParsingInlineAsm() && SM.getAddImmPrefix()) + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, + Tok.getLoc())); + SM.onInteger(Tok.getIntVal()); break; - } case AsmToken::Plus: SM.onPlus(); break; case AsmToken::Minus: SM.onMinus(); break; case AsmToken::Star: SM.onStar(); break; + case AsmToken::Slash: SM.onDivide(); break; case AsmToken::LBrac: SM.onLBrac(); break; case AsmToken::RBrac: SM.onRBrac(); break; + case AsmToken::LParen: SM.onLParen(); break; + case AsmToken::RParen: SM.onRParen(); break; } + if (SM.hadError()) + return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + if (!Done && UpdateLocLex) { End = Tok.getLoc(); Parser.Lex(); // Consume the token. } } + return 0; +} - if (!Disp) - Disp = MCConstantExpr::Create(SM.getDisp(), getContext()); +X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, + int64_t ImmDisp, + unsigned Size) { + const AsmToken &Tok = Parser.getTok(); + SMLoc BracLoc = Tok.getLoc(), End = Tok.getEndLoc(); + if (getLexer().isNot(AsmToken::LBrac)) + return ErrorOperand(BracLoc, "Expected '[' token!"); + Parser.Lex(); // Eat '[' + + SMLoc StartInBrac = Tok.getLoc(); + // Parse [ Symbol + ImmDisp ] and [ BaseReg + Scale*IndexReg + ImmDisp ]. We + // may have already parsed an immediate displacement before the bracketed + // expression. + IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true); + if (X86Operand *Err = ParseIntelExpression(SM, End)) + return Err; + + const MCExpr *Disp; + if (const MCExpr *Sym = SM.getSym()) { + // A symbolic displacement. + Disp = Sym; + if (isParsingInlineAsm()) + RewriteIntelBracExpression(InstInfo->AsmRewrites, SM.getSymName(), + ImmDisp, SM.getImm(), BracLoc, StartInBrac, + End); + } else { + // An immediate displacement only. + Disp = MCConstantExpr::Create(SM.getImm(), getContext()); + } // Parse the dot operator (e.g., [ebx].foo.bar). if (Tok.getString().startswith(".")) { - SmallString<64> Err; const MCExpr *NewDisp; - if (ParseIntelDotOperator(Disp, &NewDisp, Err)) - return ErrorOperand(Tok.getLoc(), Err); + if (X86Operand *Err = ParseIntelDotOperator(Disp, NewDisp)) + return Err; - End = Parser.getTok().getEndLoc(); + End = Tok.getEndLoc(); Parser.Lex(); // Eat the field. Disp = NewDisp; } int BaseReg = SM.getBaseReg(); int IndexReg = SM.getIndexReg(); - - // handle [-42] - if (!BaseReg && !IndexReg) { - if (!SegReg) - return X86Operand::CreateMem(Disp, Start, End); - else - return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); + int Scale = SM.getScale(); + if (!isParsingInlineAsm()) { + // handle [-42] + if (!BaseReg && !IndexReg) { + if (!SegReg) + return X86Operand::CreateMem(Disp, Start, End, Size); + else + return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); + } + return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size); } - int Scale = SM.getScale(); - return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, - Start, End, Size); + InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); + return CreateMemForInlineAsm(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size, SM.getSymName(), Info); +} + +// Inline assembly may use variable names with namespace alias qualifiers. +X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, + StringRef &Identifier, + InlineAsmIdentifierInfo &Info, + SMLoc &End) { + assert (isParsingInlineAsm() && "Expected to be parsing inline assembly."); + Val = 0; + + StringRef LineBuf(Identifier.data()); + SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info); + unsigned BufLen = LineBuf.size(); + assert (BufLen && "Expected a non-zero length identifier."); + + // Advance the token stream based on what the frontend parsed. + const AsmToken &Tok = Parser.getTok(); + AsmToken IdentEnd = Tok; + while (BufLen > 0) { + IdentEnd = Tok; + BufLen -= Tok.getString().size(); + getLexer().Lex(); // Consume the token. + } + if (BufLen != 0) + return ErrorOperand(IdentEnd.getLoc(), + "Frontend parser mismatch with asm lexer!"); + End = IdentEnd.getEndLoc(); + + // Create the symbol reference. + Identifier = LineBuf; + MCSymbol *Sym = getContext().GetOrCreateSymbol(Identifier); + MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; + Val = MCSymbolRefExpr::Create(Sym, Variant, getParser().getContext()); + return 0; } /// ParseIntelMemOperand - Parse intel style memory operand. -X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) { +X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, + int64_t ImmDisp, + SMLoc Start) { const AsmToken &Tok = Parser.getTok(); SMLoc End; unsigned Size = getIntelMemOperandSize(Tok.getString()); if (Size) { - Parser.Lex(); - assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") && - "Unexpected token!"); - Parser.Lex(); + Parser.Lex(); // Eat operand size (e.g., byte, word). + if (Tok.getString() != "PTR" && Tok.getString() != "ptr") + return ErrorOperand(Start, "Expected 'PTR' or 'ptr' token!"); + Parser.Lex(); // Eat ptr. + } + + // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ]. + if (getLexer().is(AsmToken::Integer)) { + if (isParsingInlineAsm()) + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, + Tok.getLoc())); + int64_t ImmDisp = Tok.getIntVal(); + Parser.Lex(); // Eat the integer. + if (getLexer().isNot(AsmToken::LBrac)) + return ErrorOperand(Start, "Expected '[' token!"); + return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size); } if (getLexer().is(AsmToken::LBrac)) - return ParseIntelBracExpression(SegReg, Size); + return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size); if (!ParseRegister(SegReg, Start, End)) { // Handel SegReg : [ ... ] @@ -1034,63 +1434,36 @@ X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) { Parser.Lex(); // Eat : if (getLexer().isNot(AsmToken::LBrac)) return ErrorOperand(Start, "Expected '[' token!"); - return ParseIntelBracExpression(SegReg, Size); + return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size); } - const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); - if (getParser().parseExpression(Disp, End)) - return 0; + const MCExpr *Val; + if (!isParsingInlineAsm()) { + if (getParser().parsePrimaryExpr(Val, End)) + return ErrorOperand(Tok.getLoc(), "Unexpected token!"); - bool NeedSizeDir = false; - bool IsVarDecl = false; - if (isParsingInlineAsm()) { - if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) { - const MCSymbol &Sym = SymRef->getSymbol(); - // FIXME: The SemaLookup will fail if the name is anything other then an - // identifier. - // FIXME: Pass a valid SMLoc. - unsigned tLength, tSize, tType; - SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength, - tSize, tType, IsVarDecl); - if (!Size) - Size = tType * 8; // Size is in terms of bits in this context. - NeedSizeDir = Size > 0; - } + return X86Operand::CreateMem(Val, Start, End, Size); } - if (!isParsingInlineAsm()) - return X86Operand::CreateMem(Disp, Start, End, Size); - else { - // If this is not a VarDecl then assume it is a FuncDecl or some other label - // reference. We need an 'r' constraint here, so we need to create register - // operand to ensure proper matching. Just pick a GPR based on the size of - // a pointer. - if (!IsVarDecl) { - unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; - return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true); - } - // When parsing inline assembly we set the base register to a non-zero value - // as we don't know the actual value at this time. This is necessary to - // get the matching correct in some cases. - return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0, - /*Scale*/1, Start, End, Size, NeedSizeDir); - } + InlineAsmIdentifierInfo Info; + StringRef Identifier = Tok.getString(); + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + return Err; + return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0, + /*Scale=*/1, Start, End, Size, Identifier, Info); } /// Parse the '.' operator. -bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, - const MCExpr **NewDisp, - SmallString<64> &Err) { - AsmToken Tok = *&Parser.getTok(); - uint64_t OrigDispVal, DotDispVal; +X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, + const MCExpr *&NewDisp) { + const AsmToken &Tok = Parser.getTok(); + int64_t OrigDispVal, DotDispVal; // FIXME: Handle non-constant expressions. - if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) { + if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) OrigDispVal = OrigDisp->getValue(); - } else { - Err = "Non-constant offsets are not supported!"; - return true; - } + else + return ErrorOperand(Tok.getLoc(), "Non-constant offsets are not supported!"); // Drop the '.'. StringRef DotDispStr = Tok.getString().drop_front(1); @@ -1100,23 +1473,15 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, APInt DotDisp; DotDispStr.getAsInteger(10, DotDisp); DotDispVal = DotDisp.getZExtValue(); - } else if (Tok.is(AsmToken::Identifier)) { - // We should only see an identifier when parsing the original inline asm. - // The front-end should rewrite this in terms of immediates. - assert (isParsingInlineAsm() && "Unexpected field name!"); - + } else if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { unsigned DotDisp; std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.'); if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second, - DotDisp)) { - Err = "Unable to lookup field reference!"; - return true; - } + DotDisp)) + return ErrorOperand(Tok.getLoc(), "Unable to lookup field reference!"); DotDispVal = DotDisp; - } else { - Err = "Unexpected token type!"; - return true; - } + } else + return ErrorOperand(Tok.getLoc(), "Unexpected token type!"); if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data()); @@ -1126,22 +1491,23 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, Val)); } - *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); - return false; + NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); + return 0; } /// Parse the 'offset' operator. This operator is used to specify the /// location rather then the content of a variable. -X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { - SMLoc OffsetOfLoc = Start; +X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { + const AsmToken &Tok = Parser.getTok(); + SMLoc OffsetOfLoc = Tok.getLoc(); Parser.Lex(); // Eat offset. - Start = Parser.getTok().getLoc(); - assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); - SMLoc End; const MCExpr *Val; - if (getParser().parseExpression(Val, End)) - return ErrorOperand(Start, "Unable to parse expression!"); + InlineAsmIdentifierInfo Info; + SMLoc Start = Tok.getLoc(), End; + StringRef Identifier = Tok.getString(); + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + return Err; // Don't emit the offset operator. InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); @@ -1151,7 +1517,7 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { // the size of a pointer. unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true, - OffsetOfLoc); + OffsetOfLoc, Identifier, Info.OpDecl); } enum IntelOperatorKind { @@ -1166,34 +1532,24 @@ enum IntelOperatorKind { /// variable. A variable's size is the product of its LENGTH and TYPE. The /// TYPE operator returns the size of a C or C++ type or variable. If the /// variable is an array, TYPE returns the size of a single element. -X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) { - SMLoc TypeLoc = Start; - Parser.Lex(); // Eat offset. - Start = Parser.getTok().getLoc(); - assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); +X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { + const AsmToken &Tok = Parser.getTok(); + SMLoc TypeLoc = Tok.getLoc(); + Parser.Lex(); // Eat operator. - SMLoc End; - const MCExpr *Val; - if (getParser().parseExpression(Val, End)) - return 0; + const MCExpr *Val = 0; + InlineAsmIdentifierInfo Info; + SMLoc Start = Tok.getLoc(), End; + StringRef Identifier = Tok.getString(); + if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, End)) + return Err; - unsigned Length = 0, Size = 0, Type = 0; - if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) { - const MCSymbol &Sym = SymRef->getSymbol(); - // FIXME: The SemaLookup will fail if the name is anything other then an - // identifier. - // FIXME: Pass a valid SMLoc. - bool IsVarDecl; - if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length, - Size, Type, IsVarDecl)) - return ErrorOperand(Start, "Unable to lookup expr!"); - } - unsigned CVal; + unsigned CVal = 0; switch(OpKind) { default: llvm_unreachable("Unexpected operand kind!"); - case IOK_LENGTH: CVal = Length; break; - case IOK_SIZE: CVal = Size; break; - case IOK_TYPE: CVal = Type; break; + case IOK_LENGTH: CVal = Info.Length; break; + case IOK_SIZE: CVal = Info.Size; break; + case IOK_TYPE: CVal = Info.Type; break; } // Rewrite the type operator and the C or C++ type or variable in terms of an @@ -1202,32 +1558,58 @@ X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) { InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal)); const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext()); - return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false); + return X86Operand::CreateImm(Imm, Start, End); } X86Operand *X86AsmParser::ParseIntelOperand() { - SMLoc Start = Parser.getTok().getLoc(), End; - StringRef AsmTokStr = Parser.getTok().getString(); + const AsmToken &Tok = Parser.getTok(); + SMLoc Start = Tok.getLoc(), End; // Offset, length, type and size operators. if (isParsingInlineAsm()) { + StringRef AsmTokStr = Tok.getString(); if (AsmTokStr == "offset" || AsmTokStr == "OFFSET") - return ParseIntelOffsetOfOperator(Start); + return ParseIntelOffsetOfOperator(); if (AsmTokStr == "length" || AsmTokStr == "LENGTH") - return ParseIntelOperator(Start, IOK_LENGTH); + return ParseIntelOperator(IOK_LENGTH); if (AsmTokStr == "size" || AsmTokStr == "SIZE") - return ParseIntelOperator(Start, IOK_SIZE); + return ParseIntelOperator(IOK_SIZE); if (AsmTokStr == "type" || AsmTokStr == "TYPE") - return ParseIntelOperator(Start, IOK_TYPE); + return ParseIntelOperator(IOK_TYPE); } // Immediate. - if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) || - getLexer().is(AsmToken::Minus)) { - const MCExpr *Val; - if (!getParser().parseExpression(Val, End)) { - return X86Operand::CreateImm(Val, Start, End); + if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Minus) || + getLexer().is(AsmToken::LParen)) { + AsmToken StartTok = Tok; + IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true, + /*AddImmPrefix=*/false); + if (X86Operand *Err = ParseIntelExpression(SM, End)) + return Err; + + int64_t Imm = SM.getImm(); + if (isParsingInlineAsm()) { + unsigned Len = Tok.getLoc().getPointer() - Start.getPointer(); + if (StartTok.getString().size() == Len) + // Just add a prefix if this wasn't a complex immediate expression. + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, Start)); + else + // Otherwise, rewrite the complex expression as a single immediate. + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, Start, Len, Imm)); + } + + if (getLexer().isNot(AsmToken::LBrac)) { + const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext()); + return X86Operand::CreateImm(ImmExpr, Start, End); } + + // Only positive immediates are valid. + if (Imm < 0) + return ErrorOperand(Start, "expected a positive immediate displacement " + "before bracketed expr."); + + // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ]. + return ParseIntelMemOperand(/*SegReg=*/0, Imm, Start); } // Register. @@ -1239,11 +1621,11 @@ X86Operand *X86AsmParser::ParseIntelOperand() { return X86Operand::CreateReg(RegNo, Start, End); getParser().Lex(); // Eat the colon. - return ParseIntelMemOperand(RegNo, Start); + return ParseIntelMemOperand(/*SegReg=*/RegNo, /*Disp=*/0, Start); } // Memory operand. - return ParseIntelMemOperand(0, Start); + return ParseIntelMemOperand(/*SegReg=*/0, /*Disp=*/0, Start); } X86Operand *X86AsmParser::ParseATTOperand() { @@ -1267,7 +1649,6 @@ X86Operand *X86AsmParser::ParseATTOperand() { if (getLexer().isNot(AsmToken::Colon)) return X86Operand::CreateReg(RegNo, Start, End); - getParser().Lex(); // Eat the colon. return ParseMemOperand(RegNo, Start); } diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index d14899d..7cb71f0 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -33,6 +33,7 @@ set(sources X86TargetObjectFile.cpp X86TargetTransformInfo.cpp X86VZeroUpper.cpp + X86FixupLEAs.cpp ) if( CMAKE_CL_64 ) diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c index 85d8a99..e40edba 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c @@ -61,7 +61,7 @@ static int modRMRequired(OpcodeType type, InstructionContext insnContext, uint8_t opcode) { const struct ContextDecision* decision = 0; - + switch (type) { case ONEBYTE: decision = &ONEBYTE_SYM; @@ -102,7 +102,7 @@ static InstrUID decode(OpcodeType type, uint8_t opcode, uint8_t modRM) { const struct ModRMDecision* dec = 0; - + switch (type) { case ONEBYTE: dec = &ONEBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; @@ -123,7 +123,7 @@ static InstrUID decode(OpcodeType type, dec = &THREEBYTEA7_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; break; } - + switch (dec->modrm_type) { default: debug("Corrupt table! Unknown modrm_type"); @@ -171,10 +171,10 @@ static const struct InstructionSpecifier *specifierForUID(InstrUID uid) { */ static int consumeByte(struct InternalInstruction* insn, uint8_t* byte) { int ret = insn->reader(insn->readerArg, byte, insn->readerCursor); - + if (!ret) ++(insn->readerCursor); - + return ret; } @@ -238,19 +238,19 @@ CONSUME_FUNC(consumeUInt64, uint64_t) */ static void dbgprintf(struct InternalInstruction* insn, const char* format, - ...) { + ...) { char buffer[256]; va_list ap; - + if (!insn->dlog) return; - + va_start(ap, format); (void)vsnprintf(buffer, sizeof(buffer), format, ap); va_end(ap); - + insn->dlog(insn->dlogArg, buffer); - + return; } @@ -305,27 +305,40 @@ static int readPrefixes(struct InternalInstruction* insn) { BOOL prefixGroups[4] = { FALSE }; uint64_t prefixLocation; uint8_t byte = 0; - + BOOL hasAdSize = FALSE; BOOL hasOpSize = FALSE; - + dbgprintf(insn, "readPrefixes()"); - + while (isPrefix) { prefixLocation = insn->readerCursor; - + if (consumeByte(insn, &byte)) return -1; /* - * If the first byte is a LOCK prefix break and let it be disassembled - * as a lock "instruction", by creating an <MCInst #xxxx LOCK_PREFIX>. - * FIXME there is currently no way to get the disassembler to print the - * lock prefix if it is not the first byte. + * If the byte is a LOCK/REP/REPNE prefix and not a part of the opcode, then + * break and let it be disassembled as a normal "instruction". */ - if (insn->readerCursor - 1 == insn->startLocation && byte == 0xf0) - break; - + if (insn->readerCursor - 1 == insn->startLocation + && (byte == 0xf0 || byte == 0xf2 || byte == 0xf3)) { + uint8_t nextByte; + if (byte == 0xf0) + break; + if (lookAtByte(insn, &nextByte)) + return -1; + if (insn->mode == MODE_64BIT && (nextByte & 0xf0) == 0x40) { + if (consumeByte(insn, &nextByte)) + return -1; + if (lookAtByte(insn, &nextByte)) + return -1; + unconsumeByte(insn); + } + if (nextByte != 0x0f && nextByte != 0x90) + break; + } + switch (byte) { case 0xf0: /* LOCK */ case 0xf2: /* REPNE/REPNZ */ @@ -387,21 +400,21 @@ static int readPrefixes(struct InternalInstruction* insn) { isPrefix = FALSE; break; } - + if (isPrefix) dbgprintf(insn, "Found prefix 0x%hhx", byte); } - + insn->vexSize = 0; - + if (byte == 0xc4) { uint8_t byte1; - + if (lookAtByte(insn, &byte1)) { dbgprintf(insn, "Couldn't read second byte of VEX"); return -1; } - + if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) { insn->vexSize = 3; insn->necessaryPrefixLocation = insn->readerCursor - 1; @@ -410,67 +423,67 @@ static int readPrefixes(struct InternalInstruction* insn) { unconsumeByte(insn); insn->necessaryPrefixLocation = insn->readerCursor - 1; } - + if (insn->vexSize == 3) { insn->vexPrefix[0] = byte; consumeByte(insn, &insn->vexPrefix[1]); consumeByte(insn, &insn->vexPrefix[2]); /* We simulate the REX prefix for simplicity's sake */ - + if (insn->mode == MODE_64BIT) { - insn->rexPrefix = 0x40 + insn->rexPrefix = 0x40 | (wFromVEX3of3(insn->vexPrefix[2]) << 3) | (rFromVEX2of3(insn->vexPrefix[1]) << 2) | (xFromVEX2of3(insn->vexPrefix[1]) << 1) | (bFromVEX2of3(insn->vexPrefix[1]) << 0); } - + switch (ppFromVEX3of3(insn->vexPrefix[2])) { default: break; case VEX_PREFIX_66: - hasOpSize = TRUE; + hasOpSize = TRUE; break; } - + dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx 0x%hhx", insn->vexPrefix[0], insn->vexPrefix[1], insn->vexPrefix[2]); } } else if (byte == 0xc5) { uint8_t byte1; - + if (lookAtByte(insn, &byte1)) { dbgprintf(insn, "Couldn't read second byte of VEX"); return -1; } - + if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) { insn->vexSize = 2; } else { unconsumeByte(insn); } - + if (insn->vexSize == 2) { insn->vexPrefix[0] = byte; consumeByte(insn, &insn->vexPrefix[1]); - + if (insn->mode == MODE_64BIT) { - insn->rexPrefix = 0x40 + insn->rexPrefix = 0x40 | (rFromVEX2of2(insn->vexPrefix[1]) << 2); } - + switch (ppFromVEX2of2(insn->vexPrefix[1])) { default: break; case VEX_PREFIX_66: - hasOpSize = TRUE; + hasOpSize = TRUE; break; } - + dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx", insn->vexPrefix[0], insn->vexPrefix[1]); } } @@ -478,17 +491,17 @@ static int readPrefixes(struct InternalInstruction* insn) { if (insn->mode == MODE_64BIT) { if ((byte & 0xf0) == 0x40) { uint8_t opcodeByte; - + if (lookAtByte(insn, &opcodeByte) || ((opcodeByte & 0xf0) == 0x40)) { dbgprintf(insn, "Redundant REX prefix"); return -1; } - + insn->rexPrefix = byte; insn->necessaryPrefixLocation = insn->readerCursor - 2; - + dbgprintf(insn, "Found REX prefix 0x%hhx", byte); - } else { + } else { unconsumeByte(insn); insn->necessaryPrefixLocation = insn->readerCursor - 1; } @@ -526,7 +539,7 @@ static int readPrefixes(struct InternalInstruction* insn) { insn->immediateSize = (hasOpSize ? 2 : 4); } } - + return 0; } @@ -537,22 +550,22 @@ static int readPrefixes(struct InternalInstruction* insn) { * @param insn - The instruction whose opcode is to be read. * @return - 0 if the opcode could be read successfully; nonzero otherwise. */ -static int readOpcode(struct InternalInstruction* insn) { +static int readOpcode(struct InternalInstruction* insn) { /* Determine the length of the primary opcode */ - + uint8_t current; - + dbgprintf(insn, "readOpcode()"); - + insn->opcodeType = ONEBYTE; - + if (insn->vexSize == 3) { switch (mmmmmFromVEX2of3(insn->vexPrefix[1])) { default: dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)", mmmmmFromVEX2of3(insn->vexPrefix[1])); - return -1; + return -1; case 0: break; case VEX_LOB_0F: @@ -564,7 +577,7 @@ static int readOpcode(struct InternalInstruction* insn) { insn->threeByteEscape = 0x38; insn->opcodeType = THREEBYTE_38; return consumeByte(insn, &insn->opcode); - case VEX_LOB_0F3A: + case VEX_LOB_0F3A: insn->twoByteEscape = 0x0f; insn->threeByteEscape = 0x3a; insn->opcodeType = THREEBYTE_3A; @@ -577,68 +590,68 @@ static int readOpcode(struct InternalInstruction* insn) { insn->opcodeType = TWOBYTE; return consumeByte(insn, &insn->opcode); } - + if (consumeByte(insn, ¤t)) return -1; - + if (current == 0x0f) { dbgprintf(insn, "Found a two-byte escape prefix (0x%hhx)", current); - + insn->twoByteEscape = current; - + if (consumeByte(insn, ¤t)) return -1; - + if (current == 0x38) { dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); - + insn->threeByteEscape = current; - + if (consumeByte(insn, ¤t)) return -1; - + insn->opcodeType = THREEBYTE_38; } else if (current == 0x3a) { dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); - + insn->threeByteEscape = current; - + if (consumeByte(insn, ¤t)) return -1; - + insn->opcodeType = THREEBYTE_3A; } else if (current == 0xa6) { dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); - + insn->threeByteEscape = current; - + if (consumeByte(insn, ¤t)) return -1; - + insn->opcodeType = THREEBYTE_A6; } else if (current == 0xa7) { dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); - + insn->threeByteEscape = current; - + if (consumeByte(insn, ¤t)) return -1; - + insn->opcodeType = THREEBYTE_A7; } else { dbgprintf(insn, "Didn't find a three-byte escape prefix"); - + insn->opcodeType = TWOBYTE; } } - + /* * At this point we have consumed the full opcode. * Anything we consume from here on must be unconsumed. */ - + insn->opcode = current; - + return 0; } @@ -660,19 +673,19 @@ static int getIDWithAttrMask(uint16_t* instructionID, struct InternalInstruction* insn, uint8_t attrMask) { BOOL hasModRMExtension; - + uint8_t instructionClass; instructionClass = contextForAttrs(attrMask); - + hasModRMExtension = modRMRequired(insn->opcodeType, instructionClass, insn->opcode); - + if (hasModRMExtension) { if (readModRM(insn)) return -1; - + *instructionID = decode(insn->opcodeType, instructionClass, insn->opcode, @@ -683,7 +696,7 @@ static int getIDWithAttrMask(uint16_t* instructionID, insn->opcode, 0); } - + return 0; } @@ -696,7 +709,7 @@ static int getIDWithAttrMask(uint16_t* instructionID, */ static BOOL is16BitEquivalent(const char* orig, const char* equiv) { off_t i; - + for (i = 0;; i++) { if (orig[i] == '\0' && equiv[i] == '\0') return TRUE; @@ -715,8 +728,8 @@ static BOOL is16BitEquivalent(const char* orig, const char* equiv) { } /* - * getID - Determines the ID of an instruction, consuming the ModR/M byte as - * appropriate for extended and escape opcodes. Determines the attributes and + * getID - Determines the ID of an instruction, consuming the ModR/M byte as + * appropriate for extended and escape opcodes. Determines the attributes and * context for the instruction before doing so. * * @param insn - The instruction whose ID is to be determined. @@ -726,21 +739,21 @@ static BOOL is16BitEquivalent(const char* orig, const char* equiv) { static int getID(struct InternalInstruction* insn, const void *miiArg) { uint8_t attrMask; uint16_t instructionID; - + dbgprintf(insn, "getID()"); - + attrMask = ATTR_NONE; if (insn->mode == MODE_64BIT) attrMask |= ATTR_64BIT; - + if (insn->vexSize) { attrMask |= ATTR_VEX; if (insn->vexSize == 3) { switch (ppFromVEX3of3(insn->vexPrefix[2])) { case VEX_PREFIX_66: - attrMask |= ATTR_OPSIZE; + attrMask |= ATTR_OPSIZE; break; case VEX_PREFIX_F3: attrMask |= ATTR_XS; @@ -749,14 +762,14 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) { attrMask |= ATTR_XD; break; } - + if (lFromVEX3of3(insn->vexPrefix[2])) attrMask |= ATTR_VEXL; } else if (insn->vexSize == 2) { switch (ppFromVEX2of2(insn->vexPrefix[1])) { case VEX_PREFIX_66: - attrMask |= ATTR_OPSIZE; + attrMask |= ATTR_OPSIZE; break; case VEX_PREFIX_F3: attrMask |= ATTR_XS; @@ -765,7 +778,7 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) { attrMask |= ATTR_XD; break; } - + if (lFromVEX2of2(insn->vexPrefix[1])) attrMask |= ATTR_VEXL; } @@ -836,26 +849,26 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) { * conservative, but in the specific case where OpSize is present but not * in the right place we check if there's a 16-bit operation. */ - + const struct InstructionSpecifier *spec; uint16_t instructionIDWithOpsize; const char *specName, *specWithOpSizeName; - + spec = specifierForUID(instructionID); - + if (getIDWithAttrMask(&instructionIDWithOpsize, insn, attrMask | ATTR_OPSIZE)) { - /* + /* * ModRM required with OpSize but not present; give up and return version * without OpSize set */ - + insn->instructionID = instructionID; insn->spec = spec; return 0; } - + specName = x86DisassemblerGetInstrName(instructionID, miiArg); specWithOpSizeName = x86DisassemblerGetInstrName(instructionIDWithOpsize, miiArg); @@ -882,10 +895,10 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) { const struct InstructionSpecifier *specWithNewOpcode; spec = specifierForUID(instructionID); - + /* Borrow opcode from one of the other XCHGar opcodes */ insn->opcode = 0x91; - + if (getIDWithAttrMask(&instructionIDWithNewOpcode, insn, attrMask)) { @@ -906,10 +919,10 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) { return 0; } - + insn->instructionID = instructionID; insn->spec = specifierForUID(insn->instructionID); - + return 0; } @@ -924,14 +937,14 @@ static int readSIB(struct InternalInstruction* insn) { SIBIndex sibIndexBase = 0; SIBBase sibBaseBase = 0; uint8_t index, base; - + dbgprintf(insn, "readSIB()"); - + if (insn->consumedSIB) return 0; - + insn->consumedSIB = TRUE; - + switch (insn->addressSize) { case 2: dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode"); @@ -949,9 +962,9 @@ static int readSIB(struct InternalInstruction* insn) { if (consumeByte(insn, &insn->sib)) return -1; - + index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3); - + switch (index) { case 0x4: insn->sibIndex = SIB_INDEX_NONE; @@ -963,7 +976,7 @@ static int readSIB(struct InternalInstruction* insn) { insn->sibIndex = SIB_INDEX_NONE; break; } - + switch (scaleFromSIB(insn->sib)) { case 0: insn->sibScale = 1; @@ -978,9 +991,9 @@ static int readSIB(struct InternalInstruction* insn) { insn->sibScale = 8; break; } - + base = baseFromSIB(insn->sib) | (bFromREX(insn->rexPrefix) << 3); - + switch (base) { case 0x5: switch (modFromModRM(insn->modRM)) { @@ -990,12 +1003,12 @@ static int readSIB(struct InternalInstruction* insn) { break; case 0x1: insn->eaDisplacement = EA_DISP_8; - insn->sibBase = (insn->addressSize == 4 ? + insn->sibBase = (insn->addressSize == 4 ? SIB_BASE_EBP : SIB_BASE_RBP); break; case 0x2: insn->eaDisplacement = EA_DISP_32; - insn->sibBase = (insn->addressSize == 4 ? + insn->sibBase = (insn->addressSize == 4 ? SIB_BASE_EBP : SIB_BASE_RBP); break; case 0x3: @@ -1007,7 +1020,7 @@ static int readSIB(struct InternalInstruction* insn) { insn->sibBase = (SIBBase)(sibBaseBase + base); break; } - + return 0; } @@ -1015,22 +1028,22 @@ static int readSIB(struct InternalInstruction* insn) { * readDisplacement - Consumes the displacement of an instruction. * * @param insn - The instruction whose displacement is to be read. - * @return - 0 if the displacement byte was successfully read; nonzero + * @return - 0 if the displacement byte was successfully read; nonzero * otherwise. */ -static int readDisplacement(struct InternalInstruction* insn) { +static int readDisplacement(struct InternalInstruction* insn) { int8_t d8; int16_t d16; int32_t d32; - + dbgprintf(insn, "readDisplacement()"); - + if (insn->consumedDisplacement) return 0; - + insn->consumedDisplacement = TRUE; insn->displacementOffset = insn->readerCursor - insn->startLocation; - + switch (insn->eaDisplacement) { case EA_DISP_NONE: insn->consumedDisplacement = FALSE; @@ -1051,7 +1064,7 @@ static int readDisplacement(struct InternalInstruction* insn) { insn->displacement = d32; break; } - + insn->consumedDisplacement = TRUE; return 0; } @@ -1063,22 +1076,22 @@ static int readDisplacement(struct InternalInstruction* insn) { * @param insn - The instruction whose addressing information is to be read. * @return - 0 if the information was successfully read; nonzero otherwise. */ -static int readModRM(struct InternalInstruction* insn) { +static int readModRM(struct InternalInstruction* insn) { uint8_t mod, rm, reg; - + dbgprintf(insn, "readModRM()"); - + if (insn->consumedModRM) return 0; - + if (consumeByte(insn, &insn->modRM)) return -1; insn->consumedModRM = TRUE; - + mod = modFromModRM(insn->modRM); rm = rmFromModRM(insn->modRM); reg = regFromModRM(insn->modRM); - + /* * This goes by insn->registerSize to pick the correct register, which messes * up if we're using (say) XMM or 8-bit register operands. That gets fixed in @@ -1098,16 +1111,16 @@ static int readModRM(struct InternalInstruction* insn) { insn->eaRegBase = EA_REG_RAX; break; } - + reg |= rFromREX(insn->rexPrefix) << 3; rm |= bFromREX(insn->rexPrefix) << 3; - + insn->reg = (Reg)(insn->regBase + reg); - + switch (insn->addressSize) { case 2: insn->eaBaseBase = EA_BASE_BX_SI; - + switch (mod) { case 0x0: if (rm == 0x6) { @@ -1142,14 +1155,14 @@ static int readModRM(struct InternalInstruction* insn) { case 4: case 8: insn->eaBaseBase = (insn->addressSize == 4 ? EA_BASE_EAX : EA_BASE_RAX); - + switch (mod) { case 0x0: insn->eaDisplacement = EA_DISP_NONE; /* readSIB may override this */ switch (rm) { case 0x4: case 0xc: /* in case REXW.b is set */ - insn->eaBase = (insn->addressSize == 4 ? + insn->eaBase = (insn->addressSize == 4 ? EA_BASE_sib : EA_BASE_sib64); readSIB(insn); if (readDisplacement(insn)) @@ -1191,7 +1204,7 @@ static int readModRM(struct InternalInstruction* insn) { } break; } /* switch (insn->addressSize) */ - + return 0; } @@ -1274,12 +1287,12 @@ GENERIC_FIXUP_FUNC(fixupRMValue, insn->eaRegBase, EA_REG) * @return - 0 if fixup was successful; -1 if the register returned was * invalid for its class. */ -static int fixupReg(struct InternalInstruction *insn, +static int fixupReg(struct InternalInstruction *insn, const struct OperandSpecifier *op) { uint8_t valid; - + dbgprintf(insn, "fixupReg()"); - + switch ((OperandEncoding)op->encoding) { default: debug("Expected a REG or R/M encoding in fixupReg"); @@ -1311,12 +1324,12 @@ static int fixupReg(struct InternalInstruction *insn, } break; } - + return 0; } /* - * readOpcodeModifier - Reads an operand from the opcode field of an + * readOpcodeModifier - Reads an operand from the opcode field of an * instruction. Handles AddRegFrm instructions. * * @param insn - The instruction whose opcode field is to be read. @@ -1326,12 +1339,12 @@ static int fixupReg(struct InternalInstruction *insn, */ static int readOpcodeModifier(struct InternalInstruction* insn) { dbgprintf(insn, "readOpcodeModifier()"); - + if (insn->consumedOpcodeModifier) return 0; - + insn->consumedOpcodeModifier = TRUE; - + switch (insn->spec->modifierType) { default: debug("Unknown modifier type."); @@ -1345,11 +1358,11 @@ static int readOpcodeModifier(struct InternalInstruction* insn) { case MODIFIER_MODRM: insn->opcodeModifier = insn->modRM - insn->spec->modifierBase; return 0; - } + } } /* - * readOpcodeRegister - Reads an operand from the opcode field of an + * readOpcodeRegister - Reads an operand from the opcode field of an * instruction and interprets it appropriately given the operand width. * Handles AddRegFrm instructions. * @@ -1364,39 +1377,39 @@ static int readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) { if (readOpcodeModifier(insn)) return -1; - + if (size == 0) size = insn->registerSize; - + switch (size) { case 1: - insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3) + insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3) | insn->opcodeModifier)); - if (insn->rexPrefix && + if (insn->rexPrefix && insn->opcodeRegister >= MODRM_REG_AL + 0x4 && insn->opcodeRegister < MODRM_REG_AL + 0x8) { insn->opcodeRegister = (Reg)(MODRM_REG_SPL + (insn->opcodeRegister - MODRM_REG_AL - 4)); } - + break; case 2: insn->opcodeRegister = (Reg)(MODRM_REG_AX - + ((bFromREX(insn->rexPrefix) << 3) + + ((bFromREX(insn->rexPrefix) << 3) | insn->opcodeModifier)); break; case 4: insn->opcodeRegister = (Reg)(MODRM_REG_EAX - + ((bFromREX(insn->rexPrefix) << 3) + + ((bFromREX(insn->rexPrefix) << 3) | insn->opcodeModifier)); break; case 8: - insn->opcodeRegister = (Reg)(MODRM_REG_RAX - + ((bFromREX(insn->rexPrefix) << 3) + insn->opcodeRegister = (Reg)(MODRM_REG_RAX + + ((bFromREX(insn->rexPrefix) << 3) | insn->opcodeModifier)); break; } - + return 0; } @@ -1414,20 +1427,20 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) { uint16_t imm16; uint32_t imm32; uint64_t imm64; - + dbgprintf(insn, "readImmediate()"); - + if (insn->numImmediatesConsumed == 2) { debug("Already consumed two immediates"); return -1; } - + if (size == 0) size = insn->immediateSize; else insn->immediateSize = size; insn->immediateOffset = insn->readerCursor - insn->startLocation; - + switch (size) { case 1: if (consumeByte(insn, &imm8)) @@ -1450,9 +1463,9 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) { insn->immediates[insn->numImmediatesConsumed] = imm64; break; } - + insn->numImmediatesConsumed++; - + return 0; } @@ -1465,7 +1478,7 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) { */ static int readVVVV(struct InternalInstruction* insn) { dbgprintf(insn, "readVVVV()"); - + if (insn->vexSize == 3) insn->vvvv = vvvvFromVEX3of3(insn->vexPrefix[2]); else if (insn->vexSize == 2) @@ -1490,14 +1503,14 @@ static int readOperands(struct InternalInstruction* insn) { int index; int hasVVVV, needVVVV; int sawRegImm = 0; - + dbgprintf(insn, "readOperands()"); /* If non-zero vvvv specified, need to make sure one of the operands uses it. */ hasVVVV = !readVVVV(insn); needVVVV = hasVVVV && (insn->vvvv != 0); - + for (index = 0; index < X86_MAX_OPERANDS; ++index) { switch (x86OperandSets[insn->spec->operands][index].encoding) { case ENCODING_NONE: @@ -1599,7 +1612,7 @@ static int readOperands(struct InternalInstruction* insn) { /* If we didn't find ENCODING_VVVV operand, but non-zero vvvv present, fail */ if (needVVVV) return -1; - + return 0; } @@ -1607,7 +1620,7 @@ static int readOperands(struct InternalInstruction* insn) { * decodeInstruction - Reads and interprets a full instruction provided by the * user. * - * @param insn - A pointer to the instruction to be populated. Must be + * @param insn - A pointer to the instruction to be populated. Must be * pre-allocated. * @param reader - The function to be used to read the instruction's bytes. * @param readerArg - A generic argument to be passed to the reader to store @@ -1632,7 +1645,7 @@ int decodeInstruction(struct InternalInstruction* insn, uint64_t startLoc, DisassemblerMode mode) { memset(insn, 0, sizeof(struct InternalInstruction)); - + insn->reader = reader; insn->readerArg = readerArg; insn->dlog = logger; @@ -1641,7 +1654,7 @@ int decodeInstruction(struct InternalInstruction* insn, insn->readerCursor = startLoc; insn->mode = mode; insn->numImmediatesConsumed = 0; - + if (readPrefixes(insn) || readOpcode(insn) || getID(insn, miiArg) || @@ -1650,14 +1663,14 @@ int decodeInstruction(struct InternalInstruction* insn, return -1; insn->operands = &x86OperandSets[insn->spec->operands][0]; - + insn->length = insn->readerCursor - insn->startLocation; - + dbgprintf(insn, "Read from 0x%llx to 0x%llx: length %zu", startLoc, insn->readerCursor, insn->length); - + if (insn->length > 15) dbgprintf(insn, "Instruction exceeds 15-byte limit"); - + return 0; } diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index 9e68388..d8f7278 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -20,6 +20,7 @@ #include "X86MCTargetDesc.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/MC/MCInstrInfo.h" namespace llvm { @@ -41,7 +42,6 @@ namespace X86 { AddrNumOperands = 5 }; } // end namespace X86; - /// X86II - This namespace holds all of the target specific flags that /// instruction info tracks. @@ -274,11 +274,12 @@ namespace X86II { //// MRM_XX - A mod/rm byte of exactly 0xXX. MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35, MRM_C4 = 36, - MRM_C8 = 37, MRM_C9 = 38, MRM_E8 = 39, MRM_F0 = 40, - MRM_F8 = 41, MRM_F9 = 42, MRM_D0 = 45, MRM_D1 = 46, - MRM_D4 = 47, MRM_D5 = 48, MRM_D8 = 49, MRM_D9 = 50, - MRM_DA = 51, MRM_DB = 52, MRM_DC = 53, MRM_DD = 54, - MRM_DE = 55, MRM_DF = 56, + MRM_C8 = 37, MRM_C9 = 38, MRM_CA = 39, MRM_CB = 40, + MRM_E8 = 41, MRM_F0 = 42, MRM_F8 = 45, MRM_F9 = 46, + MRM_D0 = 47, MRM_D1 = 48, MRM_D4 = 49, MRM_D5 = 50, + MRM_D6 = 51, MRM_D8 = 52, MRM_D9 = 53, MRM_DA = 54, + MRM_DB = 55, MRM_DC = 56, MRM_DD = 57, MRM_DE = 58, + MRM_DF = 59, /// RawFrmImm8 - This is used for the ENTER instruction, which has two /// immediates, the first of which is a 16-bit immediate (specified by @@ -521,6 +522,26 @@ namespace X86II { } } + /// getOperandBias - compute any additional adjustment needed to + /// the offset to the start of the memory operand + /// in this instruction. + /// If this is a two-address instruction,skip one of the register operands. + /// FIXME: This should be handled during MCInst lowering. + inline int getOperandBias(const MCInstrDesc& Desc) + { + unsigned NumOps = Desc.getNumOperands(); + unsigned CurOp = 0; + if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0) + ++CurOp; + else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) { + assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1); + // Special case for GATHER with 2 TIED_TO operands + // Skip the first 2 operands: dst, mask_wb + CurOp += 2; + } + return CurOp; + } + /// getMemoryOperandNo - The function returns the MCInst operand # for the /// first field of the memory operand. If the instruction doesn't have a /// memory operand, this returns -1. @@ -574,17 +595,15 @@ namespace X86II { ++FirstMemOp;// Skip the register dest (which is encoded in VEX_VVVV). return FirstMemOp; } - case X86II::MRM_C1: case X86II::MRM_C2: - case X86II::MRM_C3: case X86II::MRM_C4: - case X86II::MRM_C8: case X86II::MRM_C9: - case X86II::MRM_E8: case X86II::MRM_F0: - case X86II::MRM_F8: case X86II::MRM_F9: - case X86II::MRM_D0: case X86II::MRM_D1: - case X86II::MRM_D4: case X86II::MRM_D5: - case X86II::MRM_D8: case X86II::MRM_D9: - case X86II::MRM_DA: case X86II::MRM_DB: - case X86II::MRM_DC: case X86II::MRM_DD: - case X86II::MRM_DE: case X86II::MRM_DF: + case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3: + case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9: + case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_E8: + case X86II::MRM_F0: case X86II::MRM_F8: case X86II::MRM_F9: + case X86II::MRM_D0: case X86II::MRM_D1: case X86II::MRM_D4: + case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D8: + case X86II::MRM_D9: case X86II::MRM_DA: case X86II::MRM_DB: + case X86II::MRM_DC: case X86II::MRM_DD: case X86II::MRM_DE: + case X86II::MRM_DF: return -1; } } diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 5fbefae..016af71 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -237,6 +237,14 @@ StartsWithGlobalOffsetTable(const MCExpr *Expr) { return GOT_Normal; } +static bool HasSecRelSymbolRef(const MCExpr *Expr) { + if (Expr->getKind() == MCExpr::SymbolRef) { + const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); + return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; + } + return false; +} + void X86MCCodeEmitter:: EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, @@ -268,8 +276,13 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, if (Kind == GOT_Normal) ImmOffset = CurByte; } else if (Expr->getKind() == MCExpr::SymbolRef) { - const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); - if (Ref->getKind() == MCSymbolRefExpr::VK_SECREL) { + if (HasSecRelSymbolRef(Expr)) { + FixupKind = MCFixupKind(FK_SecRel_4); + } + } else if (Expr->getKind() == MCExpr::Binary) { + const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr); + if (HasSecRelSymbolRef(Bin->getLHS()) + || HasSecRelSymbolRef(Bin->getRHS())) { FixupKind = MCFixupKind(FK_SecRel_4); } } @@ -979,18 +992,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, if ((TSFlags & X86II::FormMask) == X86II::Pseudo) return; - // If this is a two-address instruction, skip one of the register operands. - // FIXME: This should be handled during MCInst lowering. unsigned NumOps = Desc.getNumOperands(); - unsigned CurOp = 0; - if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0) - ++CurOp; - else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) { - assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1); - // Special case for GATHER with 2 TIED_TO operands - // Skip the first 2 operands: dst, mask_wb - CurOp += 2; - } + unsigned CurOp = X86II::getOperandBias(Desc); // Keep track of the current byte being emitted. unsigned CurByte = 0; @@ -1136,17 +1139,15 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, TSFlags, CurByte, OS, Fixups); CurOp += X86::AddrNumOperands; break; - case X86II::MRM_C1: case X86II::MRM_C2: - case X86II::MRM_C3: case X86II::MRM_C4: - case X86II::MRM_C8: case X86II::MRM_C9: - case X86II::MRM_D0: case X86II::MRM_D1: - case X86II::MRM_D4: case X86II::MRM_D5: - case X86II::MRM_D8: case X86II::MRM_D9: - case X86II::MRM_DA: case X86II::MRM_DB: - case X86II::MRM_DC: case X86II::MRM_DD: - case X86II::MRM_DE: case X86II::MRM_DF: - case X86II::MRM_E8: case X86II::MRM_F0: - case X86II::MRM_F8: case X86II::MRM_F9: + case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3: + case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9: + case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_D0: + case X86II::MRM_D1: case X86II::MRM_D4: case X86II::MRM_D5: + case X86II::MRM_D6: case X86II::MRM_D8: case X86II::MRM_D9: + case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC: + case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF: + case X86II::MRM_E8: case X86II::MRM_F0: case X86II::MRM_F8: + case X86II::MRM_F9: EmitByte(BaseOpcode, CurByte, OS); unsigned char MRM; @@ -1158,10 +1159,13 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::MRM_C4: MRM = 0xC4; break; case X86II::MRM_C8: MRM = 0xC8; break; case X86II::MRM_C9: MRM = 0xC9; break; + case X86II::MRM_CA: MRM = 0xCA; break; + case X86II::MRM_CB: MRM = 0xCB; break; case X86II::MRM_D0: MRM = 0xD0; break; case X86II::MRM_D1: MRM = 0xD1; break; case X86II::MRM_D4: MRM = 0xD4; break; case X86II::MRM_D5: MRM = 0xD5; break; + case X86II::MRM_D6: MRM = 0xD6; break; case X86II::MRM_D8: MRM = 0xD8; break; case X86II::MRM_D9: MRM = 0xD9; break; case X86II::MRM_DA: MRM = 0xDA; break; diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp index bc272ef..ed64a32 100644 --- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp @@ -9,6 +9,8 @@ #include "MCTargetDesc/X86FixupKinds.h" #include "MCTargetDesc/X86MCTargetDesc.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCValue.h" #include "llvm/MC/MCWinCOFFObjectWriter.h" #include "llvm/Support/COFF.h" #include "llvm/Support/ErrorHandling.h" @@ -27,7 +29,9 @@ namespace { X86WinCOFFObjectWriter(bool Is64Bit_); ~X86WinCOFFObjectWriter(); - virtual unsigned getRelocType(unsigned FixupKind) const; + virtual unsigned getRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsCrossSection) const LLVM_OVERRIDE; }; } @@ -38,7 +42,14 @@ X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit_) X86WinCOFFObjectWriter::~X86WinCOFFObjectWriter() {} -unsigned X86WinCOFFObjectWriter::getRelocType(unsigned FixupKind) const { +unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsCrossSection) const { + unsigned FixupKind = IsCrossSection ? FK_PCRel_4 : Fixup.getKind(); + + MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ? + MCSymbolRefExpr::VK_None : Target.getSymA()->getKind(); + switch (FixupKind) { case FK_PCRel_4: case X86::reloc_riprel_4byte: @@ -46,6 +57,9 @@ unsigned X86WinCOFFObjectWriter::getRelocType(unsigned FixupKind) const { return Is64Bit ? COFF::IMAGE_REL_AMD64_REL32 : COFF::IMAGE_REL_I386_REL32; case FK_Data_4: case X86::reloc_signed_4byte: + if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32) + return Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32NB : + COFF::IMAGE_REL_I386_DIR32NB; return Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32 : COFF::IMAGE_REL_I386_DIR32; case FK_Data_8: if (Is64Bit) diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h index 1f9919f..947002f 100644 --- a/lib/Target/X86/X86.h +++ b/lib/Target/X86/X86.h @@ -69,6 +69,11 @@ ImmutablePass *createX86TargetTransformInfoPass(const X86TargetMachine *TM); /// createX86PadShortFunctions - Return a pass that pads short functions /// with NOOPs. This will prevent a stall when returning on the Atom. FunctionPass *createX86PadShortFunctions(); +/// createX86FixupLEAs - Return a a pass that selectively replaces +/// certain instructions (like add, sub, inc, dec, some shifts, +/// and some multiplies) by equivalent LEA instructions, in order +/// to eliminate execution delays in some Atom processors. +FunctionPass *createX86FixupLEAs(); } // End llvm namespace diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index 0216252..87bb68d 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -120,8 +120,14 @@ def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true", "Support BMI2 instructions">; def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true", "Support RTM instructions">; +def FeatureHLE : SubtargetFeature<"hle", "HasHLE", "true", + "Support HLE">; def FeatureADX : SubtargetFeature<"adx", "HasADX", "true", "Support ADX instructions">; +def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true", + "Support PRFCHW instructions">; +def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true", + "Support RDSEED instruction">; def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true", "Use LEA for adjusting the stack pointer">; def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb", @@ -130,6 +136,11 @@ def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb", def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions", "PadShortFunctions", "true", "Pad short functions">; +def FeatureCallRegIndirect : SubtargetFeature<"call-reg-indirect", + "CallRegIndirect", "true", + "Call register indirect">; +def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true", + "LEA instruction needs inputs at AG stage">; //===----------------------------------------------------------------------===// // X86 processors supported. @@ -143,9 +154,6 @@ def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom", class Proc<string Name, list<SubtargetFeature> Features> : ProcessorModel<Name, GenericModel, Features>; -class AtomProc<string Name, list<SubtargetFeature> Features> - : ProcessorModel<Name, AtomModel, Features>; - def : Proc<"generic", []>; def : Proc<"i386", []>; def : Proc<"i486", []>; @@ -162,46 +170,62 @@ def : Proc<"pentium4", [FeatureSSE2]>; def : Proc<"pentium4m", [FeatureSSE2, FeatureSlowBTMem]>; def : Proc<"x86-64", [FeatureSSE2, Feature64Bit, FeatureSlowBTMem, FeatureFastUAMem]>; -def : Proc<"yonah", [FeatureSSE3, FeatureSlowBTMem]>; -def : Proc<"prescott", [FeatureSSE3, FeatureSlowBTMem]>; -def : Proc<"nocona", [FeatureSSE3, FeatureCMPXCHG16B, - FeatureSlowBTMem]>; -def : Proc<"core2", [FeatureSSSE3, FeatureCMPXCHG16B, - FeatureSlowBTMem]>; -def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B, - FeatureSlowBTMem]>; -def : AtomProc<"atom", [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B, - FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP, - FeatureSlowDivide, FeaturePadShortFunctions]>; +// Intel Core Duo. +def : ProcessorModel<"yonah", SandyBridgeModel, + [FeatureSSE3, FeatureSlowBTMem]>; + +// NetBurst. +def : Proc<"prescott", [FeatureSSE3, FeatureSlowBTMem]>; +def : Proc<"nocona", [FeatureSSE3, FeatureCMPXCHG16B, FeatureSlowBTMem]>; + +// Intel Core 2 Solo/Duo. +def : ProcessorModel<"core2", SandyBridgeModel, + [FeatureSSSE3, FeatureCMPXCHG16B, FeatureSlowBTMem]>; +def : ProcessorModel<"penryn", SandyBridgeModel, + [FeatureSSE41, FeatureCMPXCHG16B, FeatureSlowBTMem]>; + +// Atom. +def : ProcessorModel<"atom", AtomModel, + [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B, + FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP, + FeatureSlowDivide, + FeatureCallRegIndirect, + FeatureLEAUsesAG, + FeaturePadShortFunctions]>; + // "Arrandale" along with corei3 and corei5 -def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B, - FeatureSlowBTMem, FeatureFastUAMem, - FeaturePOPCNT, FeatureAES]>; -def : Proc<"nehalem", [FeatureSSE42, FeatureCMPXCHG16B, - FeatureSlowBTMem, FeatureFastUAMem, - FeaturePOPCNT]>; +def : ProcessorModel<"corei7", SandyBridgeModel, + [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem, + FeatureFastUAMem, FeaturePOPCNT, FeatureAES]>; + +def : ProcessorModel<"nehalem", SandyBridgeModel, + [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem, + FeatureFastUAMem, FeaturePOPCNT]>; // Westmere is a similar machine to nehalem with some additional features. // Westmere is the corei3/i5/i7 path from nehalem to sandybridge -def : Proc<"westmere", [FeatureSSE42, FeatureCMPXCHG16B, - FeatureSlowBTMem, FeatureFastUAMem, - FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>; +def : ProcessorModel<"westmere", SandyBridgeModel, + [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem, + FeatureFastUAMem, FeaturePOPCNT, FeatureAES, + FeaturePCLMUL]>; // Sandy Bridge // SSE is not listed here since llvm treats AVX as a reimplementation of SSE, // rather than a superset. -def : Proc<"corei7-avx", [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem, - FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>; +def : ProcessorModel<"corei7-avx", SandyBridgeModel, + [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem, + FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>; // Ivy Bridge -def : Proc<"core-avx-i", [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem, - FeaturePOPCNT, FeatureAES, FeaturePCLMUL, - FeatureRDRAND, FeatureF16C, FeatureFSGSBase]>; +def : ProcessorModel<"core-avx-i", SandyBridgeModel, + [FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem, + FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND, + FeatureF16C, FeatureFSGSBase]>; // Haswell -def : Proc<"core-avx2", [FeatureAVX2, FeatureCMPXCHG16B, FeatureFastUAMem, - FeaturePOPCNT, FeatureAES, FeaturePCLMUL, - FeatureRDRAND, FeatureF16C, FeatureFSGSBase, - FeatureMOVBE, FeatureLZCNT, FeatureBMI, - FeatureBMI2, FeatureFMA, - FeatureRTM]>; +def : ProcessorModel<"core-avx2", HaswellModel, + [FeatureAVX2, FeatureCMPXCHG16B, FeatureFastUAMem, + FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND, + FeatureF16C, FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, + FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM, + FeatureHLE]>; def : Proc<"k6", [FeatureMMX]>; def : Proc<"k6-2", [Feature3DNow]>; @@ -279,6 +303,9 @@ def ATTAsmParser : AsmParser { def ATTAsmParserVariant : AsmParserVariant { int Variant = 0; + // Variant name. + string Name = "att"; + // Discard comments in assembly strings. string CommentDelimiter = "#"; @@ -289,6 +316,9 @@ def ATTAsmParserVariant : AsmParserVariant { def IntelAsmParserVariant : AsmParserVariant { int Variant = 1; + // Variant name. + string Name = "intel"; + // Discard comments in assembly strings. string CommentDelimiter = ";"; diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index ac5daec..6b228b0 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -201,7 +201,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO, case X86II::MO_TLVP_PIC_BASE: O << "@TLVP" << '-' << *MF->getPICBaseSymbol(); break; - case X86II::MO_SECREL: O << "@SECREL"; break; + case X86II::MO_SECREL: O << "@SECREL32"; break; } } diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index b516be0..9eafbd5 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -387,8 +387,8 @@ def CC_X86_32_ThisCall : CallingConv<[ // Promote i8/i16 arguments to i32. CCIfType<[i8, i16], CCPromoteToType<i32>>, - // Pass sret arguments indirectly through EAX - CCIfSRet<CCAssignToReg<[EAX]>>, + // Pass sret arguments indirectly through stack. + CCIfSRet<CCAssignToStack<4, 4>>, // The first integer argument is passed in ECX CCIfType<[i32], CCAssignToReg<[ECX]>>, diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index 2518e02..8fea6ed 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -1451,6 +1451,14 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI, MCE.emitByte(BaseOpcode); MCE.emitByte(0xC9); break; + case X86II::MRM_CA: + MCE.emitByte(BaseOpcode); + MCE.emitByte(0xCA); + break; + case X86II::MRM_CB: + MCE.emitByte(BaseOpcode); + MCE.emitByte(0xCB); + break; case X86II::MRM_E8: MCE.emitByte(BaseOpcode); MCE.emitByte(0xE8); diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 85155f5..cf44bd0 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -68,12 +68,12 @@ public: virtual bool TargetSelectInstruction(const Instruction *I); - /// TryToFoldLoad - The specified machine instr operand is a vreg, and that + /// \brief The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, /// try to fold the load as an operand to the instruction, returning true if /// possible. - virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, - const LoadInst *LI); + virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI); virtual bool FastLowerArguments(); @@ -107,6 +107,8 @@ private: bool X86SelectShift(const Instruction *I); + bool X86SelectDivRem(const Instruction *I); + bool X86SelectSelect(const Instruction *I); bool X86SelectTrunc(const Instruction *I); @@ -691,11 +693,6 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { if (S->isAtomic()) return false; - unsigned SABIAlignment = - TD.getABITypeAlignment(S->getValueOperand()->getType()); - if (S->getAlignment() != 0 && S->getAlignment() < SABIAlignment) - return false; - MVT VT; if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true)) return false; @@ -816,14 +813,16 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // The x86-64 ABI for returning structs by value requires that we copy // the sret argument into %rax for the return. We saved the argument into // a virtual register in the entry block, so now we copy the value out - // and into %rax. - if (Subtarget->is64Bit() && F.hasStructRetAttr()) { + // and into %rax. We also do the same with %eax for Win32. + if (F.hasStructRetAttr() && + (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { unsigned Reg = X86MFInfo->getSRetReturnReg(); assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()!"); + unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), - X86::RAX).addReg(Reg); - RetRegs.push_back(X86::RAX); + RetReg).addReg(Reg); + RetRegs.push_back(RetReg); } // Now emit the RET. @@ -1233,6 +1232,124 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { return true; } +bool X86FastISel::X86SelectDivRem(const Instruction *I) { + const static unsigned NumTypes = 4; // i8, i16, i32, i64 + const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem + const static bool S = true; // IsSigned + const static bool U = false; // !IsSigned + const static unsigned Copy = TargetOpcode::COPY; + // For the X86 DIV/IDIV instruction, in most cases the dividend + // (numerator) must be in a specific register pair highreg:lowreg, + // producing the quotient in lowreg and the remainder in highreg. + // For most data types, to set up the instruction, the dividend is + // copied into lowreg, and lowreg is sign-extended or zero-extended + // into highreg. The exception is i8, where the dividend is defined + // as a single register rather than a register pair, and we + // therefore directly sign-extend or zero-extend the dividend into + // lowreg, instead of copying, and ignore the highreg. + const static struct DivRemEntry { + // The following portion depends only on the data type. + const TargetRegisterClass *RC; + unsigned LowInReg; // low part of the register pair + unsigned HighInReg; // high part of the register pair + // The following portion depends on both the data type and the operation. + struct DivRemResult { + unsigned OpDivRem; // The specific DIV/IDIV opcode to use. + unsigned OpSignExtend; // Opcode for sign-extending lowreg into + // highreg, or copying a zero into highreg. + unsigned OpCopy; // Opcode for copying dividend into lowreg, or + // zero/sign-extending into lowreg for i8. + unsigned DivRemResultReg; // Register containing the desired result. + bool IsOpSigned; // Whether to use signed or unsigned form. + } ResultTable[NumOps]; + } OpTable[NumTypes] = { + { &X86::GR8RegClass, X86::AX, 0, { + { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv + { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem + { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv + { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem + } + }, // i8 + { &X86::GR16RegClass, X86::AX, X86::DX, { + { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv + { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem + { X86::DIV16r, X86::MOV16r0, Copy, X86::AX, U }, // UDiv + { X86::DIV16r, X86::MOV16r0, Copy, X86::DX, U }, // URem + } + }, // i16 + { &X86::GR32RegClass, X86::EAX, X86::EDX, { + { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv + { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem + { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv + { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem + } + }, // i32 + { &X86::GR64RegClass, X86::RAX, X86::RDX, { + { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv + { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem + { X86::DIV64r, X86::MOV64r0, Copy, X86::RAX, U }, // UDiv + { X86::DIV64r, X86::MOV64r0, Copy, X86::RDX, U }, // URem + } + }, // i64 + }; + + MVT VT; + if (!isTypeLegal(I->getType(), VT)) + return false; + + unsigned TypeIndex, OpIndex; + switch (VT.SimpleTy) { + default: return false; + case MVT::i8: TypeIndex = 0; break; + case MVT::i16: TypeIndex = 1; break; + case MVT::i32: TypeIndex = 2; break; + case MVT::i64: TypeIndex = 3; + if (!Subtarget->is64Bit()) + return false; + break; + } + + switch (I->getOpcode()) { + default: llvm_unreachable("Unexpected div/rem opcode"); + case Instruction::SDiv: OpIndex = 0; break; + case Instruction::SRem: OpIndex = 1; break; + case Instruction::UDiv: OpIndex = 2; break; + case Instruction::URem: OpIndex = 3; break; + } + + const DivRemEntry &TypeEntry = OpTable[TypeIndex]; + const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex]; + unsigned Op0Reg = getRegForValue(I->getOperand(0)); + if (Op0Reg == 0) + return false; + unsigned Op1Reg = getRegForValue(I->getOperand(1)); + if (Op1Reg == 0) + return false; + + // Move op0 into low-order input register. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg); + // Zero-extend or sign-extend into high-order input register. + if (OpEntry.OpSignExtend) { + if (OpEntry.IsOpSigned) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpEntry.OpSignExtend)); + else + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpEntry.OpSignExtend), TypeEntry.HighInReg); + } + // Generate the DIV/IDIV instruction. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); + // Copy output register into result register. + unsigned ResultReg = createResultReg(TypeEntry.RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Copy), ResultReg).addReg(OpEntry.DivRemResultReg); + UpdateValueMap(I, ResultReg); + + return true; +} + bool X86FastISel::X86SelectSelect(const Instruction *I) { MVT VT; if (!isTypeLegal(I->getType(), VT)) @@ -1526,7 +1643,7 @@ bool X86FastISel::FastLowerArguments() { if (!FuncInfo.CanLowerReturn) return false; - if (Subtarget->isTargetWindows()) + if (Subtarget->isTargetWin64()) return false; const Function *F = FuncInfo.Fn; @@ -2082,6 +2199,11 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { case Instruction::AShr: case Instruction::Shl: return X86SelectShift(I); + case Instruction::SDiv: + case Instruction::UDiv: + case Instruction::SRem: + case Instruction::URem: + return X86SelectDivRem(I); case Instruction::Select: return X86SelectSelect(I); case Instruction::Trunc: @@ -2273,12 +2395,8 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { } -/// TryToFoldLoad - The specified machine instr operand is a vreg, and that -/// vreg is being provided by the specified load instruction. If possible, -/// try to fold the load as an operand to the instruction, returning true if -/// possible. -bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, - const LoadInst *LI) { +bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, + const LoadInst *LI) { X86AddressMode AM; if (!X86SelectAddress(LI->getOperand(0), AM)) return false; diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp new file mode 100644 index 0000000..0dd034c --- /dev/null +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -0,0 +1,253 @@ +//===-- X86FixupLEAs.cpp - use or replace LEA instructions -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the pass which will find instructions which +// can be re-written as LEA instructions in order to reduce pipeline +// delays for some models of the Intel Atom family. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "x86-fixup-LEAs" +#include "X86.h" +#include "X86InstrInfo.h" +#include "X86Subtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetInstrInfo.h" +using namespace llvm; + +STATISTIC(NumLEAs, "Number of LEA instructions created"); + +namespace { + class FixupLEAPass : public MachineFunctionPass { + enum RegUsageState { RU_NotUsed, RU_Write, RU_Read }; + static char ID; + /// \brief Loop over all of the instructions in the basic block + /// replacing applicable instructions with LEA instructions, + /// where appropriate. + bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI); + + virtual const char *getPassName() const { return "X86 Atom LEA Fixup";} + + /// \brief Given a machine register, look for the instruction + /// which writes it in the current basic block. If found, + /// try to replace it with an equivalent LEA instruction. + /// If replacement succeeds, then also process the the newly created + /// instruction. + void seekLEAFixup(MachineOperand& p, MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI); + + /// \brief Given a memory access or LEA instruction + /// whose address mode uses a base and/or index register, look for + /// an opportunity to replace the instruction which sets the base or index + /// register with an equivalent LEA instruction. + void processInstruction(MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI); + + /// \brief Determine if an instruction references a machine register + /// and, if so, whether it reads or writes the register. + RegUsageState usesRegister(MachineOperand& p, + MachineBasicBlock::iterator I); + + /// \brief Step backwards through a basic block, looking + /// for an instruction which writes a register within + /// a maximum of INSTR_DISTANCE_THRESHOLD instruction latency cycles. + MachineBasicBlock::iterator searchBackwards(MachineOperand& p, + MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI); + + /// \brief if an instruction can be converted to an + /// equivalent LEA, insert the new instruction into the basic block + /// and return a pointer to it. Otherwise, return zero. + MachineInstr* postRAConvertToLEA(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI) const; + + public: + FixupLEAPass() : MachineFunctionPass(ID) {} + + /// \brief Loop over all of the basic blocks, + /// replacing instructions by equivalent LEA instructions + /// if needed and when possible. + virtual bool runOnMachineFunction(MachineFunction &MF); + + private: + MachineFunction *MF; + const TargetMachine *TM; + const TargetInstrInfo *TII; // Machine instruction info. + + }; + char FixupLEAPass::ID = 0; +} + +MachineInstr * +FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI) const { + MachineInstr* MI = MBBI; + MachineInstr* NewMI; + switch (MI->getOpcode()) { + case X86::MOV32rr: + case X86::MOV64rr: { + const MachineOperand& Src = MI->getOperand(1); + const MachineOperand& Dest = MI->getOperand(0); + NewMI = BuildMI(*MF, MI->getDebugLoc(), + TII->get( MI->getOpcode() == X86::MOV32rr ? X86::LEA32r : X86::LEA64r)) + .addOperand(Dest) + .addOperand(Src).addImm(1).addReg(0).addImm(0).addReg(0); + MFI->insert(MBBI, NewMI); // Insert the new inst + return NewMI; + } + case X86::ADD64ri32: + case X86::ADD64ri8: + case X86::ADD64ri32_DB: + case X86::ADD64ri8_DB: + case X86::ADD32ri: + case X86::ADD32ri8: + case X86::ADD32ri_DB: + case X86::ADD32ri8_DB: + case X86::ADD16ri: + case X86::ADD16ri8: + case X86::ADD16ri_DB: + case X86::ADD16ri8_DB: + if (!MI->getOperand(2).isImm()) { + // convertToThreeAddress will call getImm() + // which requires isImm() to be true + return 0; + } + } + return TII->convertToThreeAddress(MFI, MBBI, 0); +} + +FunctionPass *llvm::createX86FixupLEAs() { + return new FixupLEAPass(); +} + +bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { + MF = &Func; + TII = Func.getTarget().getInstrInfo(); + TM = &MF->getTarget(); + + DEBUG(dbgs() << "Start X86FixupLEAs\n";); + // Process all basic blocks. + for (MachineFunction::iterator I = Func.begin(), E = Func.end(); I != E; ++I) + processBasicBlock(Func, I); + DEBUG(dbgs() << "End X86FixupLEAs\n";); + + return true; +} + +FixupLEAPass::RegUsageState FixupLEAPass::usesRegister(MachineOperand& p, + MachineBasicBlock::iterator I) { + RegUsageState RegUsage = RU_NotUsed; + MachineInstr* MI = I; + + for (unsigned int i = 0; i < MI->getNumOperands(); ++i) { + MachineOperand& opnd = MI->getOperand(i); + if (opnd.isReg() && opnd.getReg() == p.getReg()){ + if (opnd.isDef()) + return RU_Write; + RegUsage = RU_Read; + } + } + return RegUsage; +} + +/// getPreviousInstr - Given a reference to an instruction in a basic +/// block, return a reference to the previous instruction in the block, +/// wrapping around to the last instruction of the block if the block +/// branches to itself. +static inline bool getPreviousInstr(MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI) { + if (I == MFI->begin()) { + if (MFI->isPredecessor(MFI)) { + I = --MFI->end(); + return true; + } + else + return false; + } + --I; + return true; +} + +MachineBasicBlock::iterator FixupLEAPass::searchBackwards(MachineOperand& p, + MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI) { + int InstrDistance = 1; + MachineBasicBlock::iterator CurInst; + static const int INSTR_DISTANCE_THRESHOLD = 5; + + CurInst = I; + bool Found; + Found = getPreviousInstr(CurInst, MFI); + while( Found && I != CurInst) { + if (CurInst->isCall() || CurInst->isInlineAsm()) + break; + if (InstrDistance > INSTR_DISTANCE_THRESHOLD) + break; // too far back to make a difference + if (usesRegister(p, CurInst) == RU_Write){ + return CurInst; + } + InstrDistance += TII->getInstrLatency(TM->getInstrItineraryData(), CurInst); + Found = getPreviousInstr(CurInst, MFI); + } + return 0; +} + +void FixupLEAPass::processInstruction(MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI) { + // Process a load, store, or LEA instruction. + MachineInstr *MI = I; + int opcode = MI->getOpcode(); + const MCInstrDesc& Desc = MI->getDesc(); + int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags, opcode); + if (AddrOffset >= 0) { + AddrOffset += X86II::getOperandBias(Desc); + MachineOperand& p = MI->getOperand(AddrOffset + X86::AddrBaseReg); + if (p.isReg() && p.getReg() != X86::ESP) { + seekLEAFixup(p, I, MFI); + } + MachineOperand& q = MI->getOperand(AddrOffset + X86::AddrIndexReg); + if (q.isReg() && q.getReg() != X86::ESP) { + seekLEAFixup(q, I, MFI); + } + } +} + +void FixupLEAPass::seekLEAFixup(MachineOperand& p, + MachineBasicBlock::iterator& I, + MachineFunction::iterator MFI) { + MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI); + if (MBI) { + MachineInstr* NewMI = postRAConvertToLEA(MFI, MBI); + if (NewMI) { + ++NumLEAs; + DEBUG(dbgs() << "Candidate to replace:"; MBI->dump();); + // now to replace with an equivalent LEA... + DEBUG(dbgs() << "Replaced by: "; NewMI->dump();); + MFI->erase(MBI); + MachineBasicBlock::iterator J = + static_cast<MachineBasicBlock::iterator> (NewMI); + processInstruction(J, MFI); + } + } +} + +bool FixupLEAPass::processBasicBlock(MachineFunction &MF, + MachineFunction::iterator MFI) { + + for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) + processInstruction(I, MFI); + return false; +} diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 54cbd40..16e1e42 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -528,11 +528,11 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { if (!MI.getFlag(MachineInstr::FrameSetup)) break; // We don't exect any more prolog instructions. - if (ExpectEnd) return 0; + if (ExpectEnd) return CU::UNWIND_MODE_DWARF; if (Opc == PushInstr) { // If there are too many saved registers, we cannot use compact encoding. - if (SavedRegIdx >= CU_NUM_SAVED_REGS) return 0; + if (SavedRegIdx >= CU_NUM_SAVED_REGS) return CU::UNWIND_MODE_DWARF; SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg(); StackAdjust += OffsetSize; @@ -542,7 +542,7 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { unsigned DstReg = MI.getOperand(0).getReg(); if (DstReg != FramePtr || SrcReg != StackPtr) - return 0; + return CU::UNWIND_MODE_DWARF; StackAdjust = 0; memset(SavedRegs, 0, sizeof(SavedRegs)); @@ -552,7 +552,7 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { Opc == X86::SUB32ri || Opc == X86::SUB32ri8) { if (StackSize) // We already have a stack size. - return 0; + return CU::UNWIND_MODE_DWARF; if (!MI.getOperand(0).isReg() || MI.getOperand(0).getReg() != MI.getOperand(1).getReg() || @@ -560,7 +560,7 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { // We need this to be a stack adjustment pointer. Something like: // // %RSP<def> = SUB64ri8 %RSP, 48 - return 0; + return CU::UNWIND_MODE_DWARF; StackSize = MI.getOperand(2).getImm() / StackDivide; SubtractInstrIdx += InstrOffset; @@ -574,31 +574,31 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { if (HasFP) { if ((StackAdjust & 0xFF) != StackAdjust) // Offset was too big for compact encoding. - return 0; + return CU::UNWIND_MODE_DWARF; // Get the encoding of the saved registers when we have a frame pointer. uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit); - if (RegEnc == ~0U) return 0; + if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; - CompactUnwindEncoding |= 0x01000000; + CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; - CompactUnwindEncoding |= RegEnc & 0x7FFF; + CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; } else { ++StackAdjust; uint32_t TotalStackSize = StackAdjust + StackSize; if ((TotalStackSize & 0xFF) == TotalStackSize) { // Frameless stack with a small stack size. - CompactUnwindEncoding |= 0x02000000; + CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; // Encode the stack size. CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16; } else { if ((StackAdjust & 0x7) != StackAdjust) // The extra stack adjustments are too big for us to handle. - return 0; + return CU::UNWIND_MODE_DWARF; // Frameless stack with an offset too large for us to encode compactly. - CompactUnwindEncoding |= 0x03000000; + CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' // instruction. @@ -616,10 +616,11 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx, Is64Bit); - if (RegEnc == ~0U) return 0; + if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; // Encode the register encoding. - CompactUnwindEncoding |= RegEnc & 0x3FF; + CompactUnwindEncoding |= + RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; } return CompactUnwindEncoding; diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h index 3f08b9a..6e309d8 100644 --- a/lib/Target/X86/X86FrameLowering.h +++ b/lib/Target/X86/X86FrameLowering.h @@ -19,8 +19,35 @@ #include "llvm/Target/TargetFrameLowering.h" namespace llvm { - class MCSymbol; - class X86TargetMachine; + +namespace CU { + + /// Compact unwind encoding values. + enum CompactUnwindEncodings { + /// [RE]BP based frame where [RE]BP is pused on the stack immediately after + /// the return address, then [RE]SP is moved to [RE]BP. + UNWIND_MODE_BP_FRAME = 0x01000000, + + /// A frameless function with a small constant stack size. + UNWIND_MODE_STACK_IMMD = 0x02000000, + + /// A frameless function with a large constant stack size. + UNWIND_MODE_STACK_IND = 0x03000000, + + /// No compact unwind encoding is available. + UNWIND_MODE_DWARF = 0x04000000, + + /// Mask for encoding the frame registers. + UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, + + /// Mask for encoding the frameless registers. + UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF + }; + +} // end CU namespace + +class MCSymbol; +class X86TargetMachine; class X86FrameLowering : public TargetFrameLowering { const X86TargetMachine &TM; diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 00fbe69..968b358 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -444,7 +444,9 @@ void X86DAGToDAGISel::PreprocessISelDAG() { SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. if (OptLevel != CodeGenOpt::None && - (N->getOpcode() == X86ISD::CALL || + // Only does this when target favors doesn't favor register indirect + // call. + ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) || (N->getOpcode() == X86ISD::TC_RETURN && // Only does this if load can be folded into TC_RETURN. (Subtarget->is64Bit() || @@ -1501,8 +1503,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), - MVT::i32, MVT::i32, MVT::Other, Ops, - array_lengthof(Ops)); + MVT::i32, MVT::i32, MVT::Other, Ops); cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); return ResNode; } @@ -1718,7 +1719,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { Op = ADD; break; } - + Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); bool isUnOp = !Val.getNode(); bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); @@ -1770,12 +1771,10 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); if (isUnOp) { SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; - Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, - array_lengthof(Ops)), 0); + Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); } else { SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; - Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, - array_lengthof(Ops)), 0); + Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); } cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); SDValue RetVals[] = { Undef, Ret }; @@ -1969,8 +1968,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { SDValue Segment = CurDAG->getRegister(0, MVT::i32); const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, Disp, Segment, VMask, Chain}; - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), - VTs, Ops, array_lengthof(Ops)); + SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), VTs, Ops); // Node has 2 outputs: VDst and MVT::Other. // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. // We replace VDst of Node with VDst of ResNode, and Other of Node with Other @@ -2184,7 +2182,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); SDValue Ops[] = {N1, InFlag}; - SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2); + SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); @@ -2265,16 +2263,14 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { InFlag }; if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); - SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, - array_lengthof(Ops)); + SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); ResHi = SDValue(CNode, 0); ResLo = SDValue(CNode, 1); Chain = SDValue(CNode, 2); InFlag = SDValue(CNode, 3); } else { SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); - SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, - array_lengthof(Ops)); + SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); Chain = SDValue(CNode, 0); InFlag = SDValue(CNode, 1); } @@ -2285,15 +2281,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue Ops[] = { N1, InFlag }; if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); - SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, - array_lengthof(Ops)); + SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); ResHi = SDValue(CNode, 0); ResLo = SDValue(CNode, 1); InFlag = SDValue(CNode, 2); } else { SDVTList VTs = CurDAG->getVTList(MVT::Glue); - SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, - array_lengthof(Ops)); + SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); InFlag = SDValue(CNode, 0); } } @@ -2341,6 +2335,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); } + // Propagate ordering to the last node, for now. + CurDAG->AssignOrdering(InFlag.getNode(), CurDAG->GetOrdering(Node)); + return NULL; } @@ -2407,8 +2404,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; Move = SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, - MVT::Other, Ops, - array_lengthof(Ops)), 0); + MVT::Other, Ops), 0); Chain = Move.getValue(1); ReplaceUses(N0.getValue(1), Chain); } else { @@ -2439,8 +2435,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), InFlag }; SDNode *CNode = - CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, - array_lengthof(Ops)); + CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); InFlag = SDValue(CNode, 1); // Update the chain. ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); @@ -2672,8 +2667,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); MachineSDNode *Result = CurDAG->getMachineNode(newOpc, Node->getDebugLoc(), - MVT::i32, MVT::Other, Ops, - array_lengthof(Ops)); + MVT::i32, MVT::Other, Ops); Result->setMemRefs(MemOp, MemOp + 2); ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e6858bc..b587336 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -163,10 +163,28 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) Subtarget = &TM.getSubtarget<X86Subtarget>(); X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); - RegInfo = TM.getRegisterInfo(); TD = getDataLayout(); + resetOperationActions(); +} + +void X86TargetLowering::resetOperationActions() { + const TargetMachine &TM = getTargetMachine(); + static bool FirstTimeThrough = true; + + // If none of the target options have changed, then we don't need to reset the + // operation actions. + if (!FirstTimeThrough && TO == TM.Options) return; + + if (!FirstTimeThrough) { + // Reinitialize the actions. + initActions(); + FirstTimeThrough = false; + } + + TO = TM.Options; + // Set up the TargetLowering object. static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; @@ -470,7 +488,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SETCC , MVT::i64 , Custom); } setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); - // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support + // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support // SjLj exception handling but a light-weight setjmp/longjmp replacement to // support continuation, user-level threading, and etc.. As a result, no // other SjLj exception interfaces are implemented and please don't build @@ -508,16 +526,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->hasSSE1()) setOperationAction(ISD::PREFETCH , MVT::Other, Legal); - setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); - // On X86 and X86-64, atomic operations are lowered to locked instructions. - // Locked instructions, in turn, have implicit fence semantics (all memory - // operations are flushed before issuing the locked instruction, and they - // are not buffered), so we can fold away the common pattern of - // fence-atomic-fence. - setShouldFoldAtomicFences(true); - // Expand certain atomics for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { MVT VT = IntVTs[i]; @@ -1053,23 +1063,16 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SRA, MVT::v8i16, Custom); setOperationAction(ISD::SRA, MVT::v16i8, Custom); - if (Subtarget->hasInt256()) { - setOperationAction(ISD::SRL, MVT::v2i64, Legal); - setOperationAction(ISD::SRL, MVT::v4i32, Legal); - - setOperationAction(ISD::SHL, MVT::v2i64, Legal); - setOperationAction(ISD::SHL, MVT::v4i32, Legal); + // In the customized shift lowering, the legal cases in AVX2 will be + // recognized. + setOperationAction(ISD::SRL, MVT::v2i64, Custom); + setOperationAction(ISD::SRL, MVT::v4i32, Custom); - setOperationAction(ISD::SRA, MVT::v4i32, Legal); - } else { - setOperationAction(ISD::SRL, MVT::v2i64, Custom); - setOperationAction(ISD::SRL, MVT::v4i32, Custom); + setOperationAction(ISD::SHL, MVT::v2i64, Custom); + setOperationAction(ISD::SHL, MVT::v4i32, Custom); - setOperationAction(ISD::SHL, MVT::v2i64, Custom); - setOperationAction(ISD::SHL, MVT::v4i32, Custom); + setOperationAction(ISD::SRA, MVT::v4i32, Custom); - setOperationAction(ISD::SRA, MVT::v4i32, Custom); - } setOperationAction(ISD::SDIV, MVT::v8i16, Custom); setOperationAction(ISD::SDIV, MVT::v4i32, Custom); } @@ -1118,6 +1121,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote); setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); @@ -1186,14 +1190,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); - setOperationAction(ISD::SRL, MVT::v4i64, Legal); - setOperationAction(ISD::SRL, MVT::v8i32, Legal); - - setOperationAction(ISD::SHL, MVT::v4i64, Legal); - setOperationAction(ISD::SHL, MVT::v8i32, Legal); - - setOperationAction(ISD::SRA, MVT::v8i32, Legal); - setOperationAction(ISD::SDIV, MVT::v8i32, Custom); } else { setOperationAction(ISD::ADD, MVT::v4i64, Custom); @@ -1210,15 +1206,17 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::MUL, MVT::v8i32, Custom); setOperationAction(ISD::MUL, MVT::v16i16, Custom); // Don't lower v32i8 because there is no 128-bit byte mul + } - setOperationAction(ISD::SRL, MVT::v4i64, Custom); - setOperationAction(ISD::SRL, MVT::v8i32, Custom); + // In the customized shift lowering, the legal cases in AVX2 will be + // recognized. + setOperationAction(ISD::SRL, MVT::v4i64, Custom); + setOperationAction(ISD::SRL, MVT::v8i32, Custom); - setOperationAction(ISD::SHL, MVT::v4i64, Custom); - setOperationAction(ISD::SHL, MVT::v8i32, Custom); + setOperationAction(ISD::SHL, MVT::v4i64, Custom); + setOperationAction(ISD::SHL, MVT::v8i32, Custom); - setOperationAction(ISD::SRA, MVT::v8i32, Custom); - } + setOperationAction(ISD::SRA, MVT::v8i32, Custom); // Custom lower several nodes for 256-bit types. for (int i = MVT::FIRST_VECTOR_VALUETYPE; @@ -1356,7 +1354,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; setPrefLoopAlignment(4); // 2^4 bytes. - BenefitFromCodePlacementOpt = true; // Predictable cmov don't hurt on atom because it's in-order. PredictableSelectIsExpensive = !Subtarget->isAtom(); @@ -1679,10 +1676,11 @@ X86TargetLowering::LowerReturn(SDValue Chain, // The x86-64 ABIs require that for returning structs by value we copy // the sret argument into %rax/%eax (depending on ABI) for the return. + // Win32 requires us to put the sret argument to %eax as well. // We saved the argument into a virtual register in the entry block, // so now we copy the value out and into %rax/%eax. - if (Subtarget->is64Bit() && - DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { + if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() && + (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); unsigned Reg = FuncInfo->getSRetReturnReg(); @@ -1690,12 +1688,14 @@ X86TargetLowering::LowerReturn(SDValue Chain, "SRetReturnReg should have been set in LowerFormalArguments()."); SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); - unsigned RetValReg = Subtarget->isTarget64BitILP32() ? X86::EAX : X86::RAX; + unsigned RetValReg + = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ? + X86::RAX : X86::EAX; Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag); Flag = Chain.getValue(1); // RAX/EAX now acts like a return value. - RetOps.push_back(DAG.getRegister(RetValReg, MVT::i64)); + RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy())); } RetOps[0] = Chain; // Update chain. @@ -1795,7 +1795,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; SDValue Ops[] = { Chain, InFlag }; Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, - MVT::Other, MVT::Glue, Ops, 2), 1); + MVT::Other, MVT::Glue, Ops), 1); Val = Chain.getValue(0); // Round the f80 to the right size, which also moves it to the appropriate @@ -2049,9 +2049,11 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, // The x86-64 ABIs require that for returning structs by value we copy // the sret argument into %rax/%eax (depending on ABI) for the return. + // Win32 requires us to put the sret argument to %eax as well. // Save the argument into a virtual register so that we can access it // from the return points. - if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction()->hasStructRetAttr() && + (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); unsigned Reg = FuncInfo->getSRetReturnReg(); if (!Reg) { @@ -4412,13 +4414,15 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, if (Subtarget->hasInt256()) { // AVX2 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; - Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, + array_lengthof(Ops)); } else { // 256-bit logic and arithmetic instructions in AVX are all // floating-point, no support for integer ops. Emit fp zeroed vectors. SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; - Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, + array_lengthof(Ops)); } } else llvm_unreachable("Unexpected vector type"); @@ -4439,7 +4443,8 @@ static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, if (VT.is256BitVector()) { if (HasInt256) { // AVX2 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; - Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, + array_lengthof(Ops)); } else { // AVX Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); @@ -5109,7 +5114,8 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; SDValue ResNode = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, + DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, + array_lengthof(Ops), MVT::i64, LDBase->getPointerInfo(), LDBase->getAlignment(), false/*isVolatile*/, true/*ReadMem*/, @@ -7632,10 +7638,10 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, if (InFlag) { SDValue Ops[] = { Chain, TGA, *InFlag }; - Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); + Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); } else { SDValue Ops[] = { Chain, TGA }; - Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); + Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); } // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. @@ -7945,7 +7951,7 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ } SDValue Ops[2] = { Lo, Hi }; - return DAG.getMergeValues(Ops, 2, dl); + return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); } SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, @@ -8228,8 +8234,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; - SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, - MVT::i64, MMO); + SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, + array_lengthof(Ops), MVT::i64, MMO); APInt FF(32, 0x5F800000ULL); @@ -8321,8 +8327,8 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), MachineMemOperand::MOLoad, MemSize, MemSize); - Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, - DstTy, MMO); + Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, + array_lengthof(Ops), DstTy, MMO); Chain = Value.getValue(1); SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); @@ -8336,7 +8342,8 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, // Build the FP_TO_INT*_IN_MEM SDValue Ops[] = { Chain, Value, StackSlot }; SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), - Ops, 3, DstTy, MMO); + Ops, array_lengthof(Ops), DstTy, + MMO); return std::make_pair(FIST, StackSlot); } else { SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, @@ -8348,8 +8355,8 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, MVT::i32, eax.getValue(2)); SDValue Ops[] = { eax, edx }; SDValue pair = IsReplace - ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2) - : DAG.getMergeValues(Ops, 2, DL); + ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, array_lengthof(Ops)) + : DAG.getMergeValues(Ops, array_lengthof(Ops), DL); return std::make_pair(pair, SDValue()); } } @@ -9340,11 +9347,49 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, if (Swap) std::swap(Op0, Op1); + // Since SSE has no unsigned integer comparisons, we need to flip the sign + // bits of the inputs before performing those operations. + if (FlipSigns) { + EVT EltVT = VT.getVectorElementType(); + SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), + EltVT); + std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); + SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], + SignBits.size()); + Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); + Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); + } + // Check that the operation in question is available (most are plain SSE2, // but PCMPGTQ and PCMPEQQ have different requirements). if (VT == MVT::v2i64) { - if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) - return SDValue(); + if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) { + assert(Subtarget->hasSSE2() && "Don't know how to lower!"); + + // First cast everything to the right type, + Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); + Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); + + // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) + SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); + SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); + + // Create masks for only the low parts/high parts of the 64 bit integers. + const int MaskHi[] = { 1, 1, 3, 3 }; + const int MaskLo[] = { 0, 0, 2, 2 }; + SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); + SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); + SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); + + SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo); + Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi); + + if (Invert) + Result = DAG.getNOT(dl, Result, MVT::v4i32); + + return DAG.getNode(ISD::BITCAST, dl, VT, Result); + } + if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) { // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with // pcmpeqd + pshufd + pand. @@ -9369,19 +9414,6 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, } } - // Since SSE has no unsigned integer comparisons, we need to flip the sign - // bits of the inputs before performing those operations. - if (FlipSigns) { - EVT EltVT = VT.getVectorElementType(); - SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), - EltVT); - std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); - SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], - SignBits.size()); - Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); - Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); - } - SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); // If the logical-not of the result is required, perform that now. @@ -10922,28 +10954,47 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. - // RDRAND intrinsics. + // RDRAND/RDSEED intrinsics. case Intrinsic::x86_rdrand_16: case Intrinsic::x86_rdrand_32: - case Intrinsic::x86_rdrand_64: { + case Intrinsic::x86_rdrand_64: + case Intrinsic::x86_rdseed_16: + case Intrinsic::x86_rdseed_32: + case Intrinsic::x86_rdseed_64: { + unsigned Opcode = (IntNo == Intrinsic::x86_rdseed_16 || + IntNo == Intrinsic::x86_rdseed_32 || + IntNo == Intrinsic::x86_rdseed_64) ? X86ISD::RDSEED : + X86ISD::RDRAND; // Emit the node with the right value type. SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); - SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); + SDValue Result = DAG.getNode(Opcode, dl, VTs, Op.getOperand(0)); - // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise - // return the value from Rand, which is always 0, casted to i32. + // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. + // Otherwise return the value from Rand, which is always 0, casted to i32. SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), DAG.getConstant(1, Op->getValueType(1)), DAG.getConstant(X86::COND_B, MVT::i32), SDValue(Result.getNode(), 1) }; SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, DAG.getVTList(Op->getValueType(1), MVT::Glue), - Ops, 4); + Ops, array_lengthof(Ops)); // Return { result, isValid, chain }. return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, SDValue(Result.getNode(), 2)); } + + // XTEST intrinsics. + case Intrinsic::x86_xtest: { + SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); + SDValue InTrans = DAG.getNode(X86ISD::XTEST, dl, VTs, Op.getOperand(0)); + SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, + DAG.getConstant(X86::COND_NE, MVT::i8), + InTrans); + SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); + return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), + Ret, SDValue(InTrans.getNode(), 1)); + } } } @@ -10979,7 +11030,10 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; + unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); + assert(((FrameReg == X86::RBP && VT == MVT::i64) || + (FrameReg == X86::EBP && VT == MVT::i32)) && + "Invalid Frame Register!"); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); while (Depth--) FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, @@ -10999,21 +11053,23 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Handler = Op.getOperand(2); DebugLoc dl = Op.getDebugLoc(); - SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, - Subtarget->is64Bit() ? X86::RBP : X86::EBP, - getPointerTy()); - unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); - - SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, - DAG.getIntPtrConstant(RegInfo->getSlotSize())); - StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); + EVT PtrVT = getPointerTy(); + unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); + assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || + (FrameReg == X86::EBP && PtrVT == MVT::i32)) && + "Invalid Frame Register!"); + SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); + unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; + + SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, + DAG.getIntPtrConstant(RegInfo->getSlotSize())); + StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); - return DAG.getNode(X86ISD::EH_RETURN, dl, - MVT::Other, - Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); + return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain, + DAG.getRegister(StoreAddrReg, PtrVT)); } SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, @@ -11224,7 +11280,8 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), - Ops, 2, MVT::i16, MMO); + Ops, array_lengthof(Ops), MVT::i16, + MMO); // Load FP Control Word from stack slot SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, @@ -11491,16 +11548,13 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { return SDValue(); } -SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { - +static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { EVT VT = Op.getValueType(); DebugLoc dl = Op.getDebugLoc(); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); - if (!Subtarget->hasSSE2()) - return SDValue(); - // Optimize shl/srl/sra with constant shift amount. if (isSplatVector(Amt.getNode())) { SDValue SclrAmt = Amt->getOperand(0); @@ -11611,6 +11665,224 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { } } + // Special case in 32-bit mode, where i64 is expanded into high and low parts. + if (!Subtarget->is64Bit() && + (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && + Amt.getOpcode() == ISD::BITCAST && + Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { + Amt = Amt.getOperand(0); + unsigned Ratio = Amt.getValueType().getVectorNumElements() / + VT.getVectorNumElements(); + unsigned RatioInLog2 = Log2_32_Ceil(Ratio); + uint64_t ShiftAmt = 0; + for (unsigned i = 0; i != Ratio; ++i) { + ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i)); + if (C == 0) + return SDValue(); + // 6 == Log2(64) + ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2))); + } + // Check remaining shift amounts. + for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { + uint64_t ShAmt = 0; + for (unsigned j = 0; j != Ratio; ++j) { + ConstantSDNode *C = + dyn_cast<ConstantSDNode>(Amt.getOperand(i + j)); + if (C == 0) + return SDValue(); + // 6 == Log2(64) + ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2))); + } + if (ShAmt != ShiftAmt) + return SDValue(); + } + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unknown shift opcode!"); + case ISD::SHL: + return DAG.getNode(X86ISD::VSHLI, dl, VT, R, + DAG.getConstant(ShiftAmt, MVT::i32)); + case ISD::SRL: + return DAG.getNode(X86ISD::VSRLI, dl, VT, R, + DAG.getConstant(ShiftAmt, MVT::i32)); + case ISD::SRA: + return DAG.getNode(X86ISD::VSRAI, dl, VT, R, + DAG.getConstant(ShiftAmt, MVT::i32)); + } + } + + return SDValue(); +} + +static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, + const X86Subtarget* Subtarget) { + EVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); + SDValue R = Op.getOperand(0); + SDValue Amt = Op.getOperand(1); + + if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) || + VT == MVT::v4i32 || VT == MVT::v8i16 || + (Subtarget->hasInt256() && + ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) || + VT == MVT::v8i32 || VT == MVT::v16i16))) { + SDValue BaseShAmt; + EVT EltVT = VT.getVectorElementType(); + + if (Amt.getOpcode() == ISD::BUILD_VECTOR) { + unsigned NumElts = VT.getVectorNumElements(); + unsigned i, j; + for (i = 0; i != NumElts; ++i) { + if (Amt.getOperand(i).getOpcode() == ISD::UNDEF) + continue; + break; + } + for (j = i; j != NumElts; ++j) { + SDValue Arg = Amt.getOperand(j); + if (Arg.getOpcode() == ISD::UNDEF) continue; + if (Arg != Amt.getOperand(i)) + break; + } + if (i != NumElts && j == NumElts) + BaseShAmt = Amt.getOperand(i); + } else { + if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) + Amt = Amt.getOperand(0); + if (Amt.getOpcode() == ISD::VECTOR_SHUFFLE && + cast<ShuffleVectorSDNode>(Amt)->isSplat()) { + SDValue InVec = Amt.getOperand(0); + if (InVec.getOpcode() == ISD::BUILD_VECTOR) { + unsigned NumElts = InVec.getValueType().getVectorNumElements(); + unsigned i = 0; + for (; i != NumElts; ++i) { + SDValue Arg = InVec.getOperand(i); + if (Arg.getOpcode() == ISD::UNDEF) continue; + BaseShAmt = Arg; + break; + } + } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { + if (ConstantSDNode *C = + dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { + unsigned SplatIdx = + cast<ShuffleVectorSDNode>(Amt)->getSplatIndex(); + if (C->getZExtValue() == SplatIdx) + BaseShAmt = InVec.getOperand(1); + } + } + if (BaseShAmt.getNode() == 0) + BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt, + DAG.getIntPtrConstant(0)); + } + } + + if (BaseShAmt.getNode()) { + if (EltVT.bitsGT(MVT::i32)) + BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt); + else if (EltVT.bitsLT(MVT::i32)) + BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); + + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unknown shift opcode!"); + case ISD::SHL: + switch (VT.getSimpleVT().SimpleTy) { + default: return SDValue(); + case MVT::v2i64: + case MVT::v4i32: + case MVT::v8i16: + case MVT::v4i64: + case MVT::v8i32: + case MVT::v16i16: + return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG); + } + case ISD::SRA: + switch (VT.getSimpleVT().SimpleTy) { + default: return SDValue(); + case MVT::v4i32: + case MVT::v8i16: + case MVT::v8i32: + case MVT::v16i16: + return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG); + } + case ISD::SRL: + switch (VT.getSimpleVT().SimpleTy) { + default: return SDValue(); + case MVT::v2i64: + case MVT::v4i32: + case MVT::v8i16: + case MVT::v4i64: + case MVT::v8i32: + case MVT::v16i16: + return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG); + } + } + } + } + + // Special case in 32-bit mode, where i64 is expanded into high and low parts. + if (!Subtarget->is64Bit() && + (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && + Amt.getOpcode() == ISD::BITCAST && + Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { + Amt = Amt.getOperand(0); + unsigned Ratio = Amt.getValueType().getVectorNumElements() / + VT.getVectorNumElements(); + std::vector<SDValue> Vals(Ratio); + for (unsigned i = 0; i != Ratio; ++i) + Vals[i] = Amt.getOperand(i); + for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { + for (unsigned j = 0; j != Ratio; ++j) + if (Vals[j] != Amt.getOperand(i + j)) + return SDValue(); + } + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unknown shift opcode!"); + case ISD::SHL: + return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1)); + case ISD::SRL: + return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1)); + case ISD::SRA: + return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1)); + } + } + + return SDValue(); +} + +SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { + + EVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); + SDValue R = Op.getOperand(0); + SDValue Amt = Op.getOperand(1); + SDValue V; + + if (!Subtarget->hasSSE2()) + return SDValue(); + + V = LowerScalarImmediateShift(Op, DAG, Subtarget); + if (V.getNode()) + return V; + + V = LowerScalarVariableShift(Op, DAG, Subtarget); + if (V.getNode()) + return V; + + // AVX2 has VPSLLV/VPSRAV/VPSRLV. + if (Subtarget->hasInt256()) { + if (Op.getOpcode() == ISD::SRL && + (VT == MVT::v2i64 || VT == MVT::v4i32 || + VT == MVT::v4i64 || VT == MVT::v8i32)) + return Op; + if (Op.getOpcode() == ISD::SHL && + (VT == MVT::v2i64 || VT == MVT::v4i32 || + VT == MVT::v4i64 || VT == MVT::v8i32)) + return Op; + if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32)) + return Op; + } + // Lower SHL with variable shift amount. if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT)); @@ -11827,59 +12099,28 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, // fall through case MVT::v4i32: case MVT::v8i16: { - SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, - Op.getOperand(0), ShAmt, DAG); + // (sext (vzext x)) -> (vsext x) + SDValue Op0 = Op.getOperand(0); + SDValue Op00 = Op0.getOperand(0); + SDValue Tmp1; + // Hopefully, this VECTOR_SHUFFLE is just a VZEXT. + if (Op0.getOpcode() == ISD::BITCAST && + Op00.getOpcode() == ISD::VECTOR_SHUFFLE) + Tmp1 = LowerVectorIntExtend(Op00, DAG); + if (Tmp1.getNode()) { + SDValue Tmp1Op0 = Tmp1.getOperand(0); + assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT && + "This optimization is invalid without a VZEXT."); + return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0)); + } + + // If the above didn't work, then just use Shift-Left + Shift-Right. + Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, Op0, ShAmt, DAG); return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); } } } -static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget, - SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); - - // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. - // There isn't any reason to disable it if the target processor supports it. - if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { - SDValue Chain = Op.getOperand(0); - SDValue Zero = DAG.getConstant(0, MVT::i32); - SDValue Ops[] = { - DAG.getRegister(X86::ESP, MVT::i32), // Base - DAG.getTargetConstant(1, MVT::i8), // Scale - DAG.getRegister(0, MVT::i32), // Index - DAG.getTargetConstant(0, MVT::i32), // Disp - DAG.getRegister(0, MVT::i32), // Segment. - Zero, - Chain - }; - SDNode *Res = - DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, - array_lengthof(Ops)); - return SDValue(Res, 0); - } - - unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); - if (!isDev) - return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); - - unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); - unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); - unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); - unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); - - // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; - if (!Op1 && !Op2 && !Op3 && Op4) - return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); - - // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; - if (Op1 && !Op2 && !Op3 && !Op4) - return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); - - // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), - // (MFENCE)>; - return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); -} - static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { DebugLoc dl = Op.getDebugLoc(); @@ -11908,9 +12149,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, Zero, Chain }; - SDNode *Res = - DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, - array_lengthof(Ops)); + SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops); return SDValue(Res, 0); } @@ -11944,7 +12183,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, - Ops, 5, T, MMO); + Ops, array_lengthof(Ops), T, MMO); SDValue cpOut = DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); return cpOut; @@ -11966,7 +12205,7 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), rdx.getValue(1) }; - return DAG.getMergeValues(Ops, 2, dl); + return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); } SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { @@ -12060,7 +12299,8 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit()); // For MacOSX, we want to call an alternative entry point: __sincos_stret, - // which returns the values in two XMM registers. + // which returns the values as { float, float } (in XMM0) or + // { double, double } (which is returned in XMM0, XMM1). DebugLoc dl = Op.getDebugLoc(); SDValue Arg = Op.getOperand(0); EVT ArgVT = Arg.getValueType(); @@ -12075,14 +12315,16 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { Entry.isZExt = false; Args.push_back(Entry); + bool isF64 = ArgVT == MVT::f64; // Only optimize x86_64 for now. i386 is a bit messy. For f32, // the small struct {f32, f32} is returned in (eax, edx). For f64, // the results are returned via SRet in memory. - const char *LibcallName = (ArgVT == MVT::f64) - ? "__sincos_stret" : "__sincosf_stret"; + const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret"; SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy()); - StructType *RetTy = StructType::get(ArgTy, ArgTy, NULL); + Type *RetTy = isF64 + ? (Type*)StructType::get(ArgTy, ArgTy, NULL) + : (Type*)VectorType::get(ArgTy, 4); TargetLowering:: CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, false, false, false, false, 0, @@ -12090,7 +12332,18 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { /*doesNotRet=*/false, /*isReturnValueUsed*/true, Callee, Args, DAG, dl); std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); - return CallResult.first; + + if (isF64) + // Returned in xmm0 and xmm1. + return CallResult.first; + + // Returned in bits 0:31 and 32:64 xmm0. + SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, + CallResult.first, DAG.getIntPtrConstant(0)); + SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, + CallResult.first, DAG.getIntPtrConstant(1)); + SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); + return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); } /// LowerOperation - Provide custom lowering hooks for some operations. @@ -12099,7 +12352,6 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); - case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG); case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); @@ -12216,7 +12468,7 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, SDValue Ops[] = { Chain, In1, In2L, In2H }; SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); SDValue Result = - DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, + DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, array_lengthof(Ops), MVT::i64, cast<MemSDNode>(Node)->getMemOperand()); SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); @@ -12296,7 +12548,8 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, eax.getValue(2)); // Use a buildpair to merge the two 32-bit values into a 64-bit one. SDValue Ops[] = { eax, edx }; - Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); + Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, + array_lengthof(Ops))); Results.push_back(edx.getValue(1)); return; } @@ -12335,7 +12588,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG; SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, - Ops, 3, T, MMO); + Ops, array_lengthof(Ops), T, MMO); SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, Regs64bit ? X86::RAX : X86::EAX, HalfT, Result.getValue(1)); @@ -12547,6 +12800,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; case X86ISD::SAHF: return "X86ISD::SAHF"; case X86ISD::RDRAND: return "X86ISD::RDRAND"; + case X86ISD::RDSEED: return "X86ISD::RDSEED"; case X86ISD::FMADD: return "X86ISD::FMADD"; case X86ISD::FMSUB: return "X86ISD::FMSUB"; case X86ISD::FNMADD: return "X86ISD::FNMADD"; @@ -12555,6 +12809,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI"; case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI"; + case X86ISD::XTEST: return "X86ISD::XTEST"; } } @@ -14820,7 +15075,8 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; SDValue ResNode = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, + DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, + array_lengthof(Ops), Ld->getMemoryVT(), Ld->getPointerInfo(), Ld->getAlignment(), @@ -15512,6 +15768,51 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, if (unsigned Op = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget)) return DAG.getNode(Op, DL, N->getValueType(0), LHS, RHS); + // Simplify vector selection if the selector will be produced by CMPP*/PCMP*. + if (!DCI.isBeforeLegalize() && N->getOpcode() == ISD::VSELECT && + Cond.getOpcode() == ISD::SETCC) { + + assert(Cond.getValueType().isVector() && + "vector select expects a vector selector!"); + + EVT IntVT = Cond.getValueType(); + bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); + bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); + + if (!TValIsAllOnes && !FValIsAllZeros) { + // Try invert the condition if true value is not all 1s and false value + // is not all 0s. + bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); + bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); + + if (TValIsAllZeros || FValIsAllOnes) { + SDValue CC = Cond.getOperand(2); + ISD::CondCode NewCC = + ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), + Cond.getOperand(0).getValueType().isInteger()); + Cond = DAG.getSetCC(DL, IntVT, Cond.getOperand(0), Cond.getOperand(1), NewCC); + std::swap(LHS, RHS); + TValIsAllOnes = FValIsAllOnes; + FValIsAllZeros = TValIsAllZeros; + } + } + + if (TValIsAllOnes || FValIsAllZeros) { + SDValue Ret; + + if (TValIsAllOnes && FValIsAllZeros) + Ret = Cond; + else if (TValIsAllOnes) + Ret = DAG.getNode(ISD::OR, DL, IntVT, Cond, + DAG.getNode(ISD::BITCAST, DL, IntVT, RHS)); + else if (FValIsAllZeros) + Ret = DAG.getNode(ISD::AND, DL, IntVT, Cond, + DAG.getNode(ISD::BITCAST, DL, IntVT, LHS)); + + return DAG.getNode(ISD::BITCAST, DL, VT, Ret); + } + } + // If we know that this node is legal then we know that it is going to be // matched by one of the SSE/AVX BLEND instructions. These instructions only // depend on the highest bit in each word. Try to use SimplifyDemandedBits @@ -15572,6 +15873,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { SDValue SetCC; const ConstantSDNode* C = 0; bool needOppositeCond = (CC == X86::COND_E); + bool checkAgainstTrue = false; // Is it a comparison against 1? if ((C = dyn_cast<ConstantSDNode>(Op1))) SetCC = Op2; @@ -15580,17 +15882,46 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { else // Quit if all operands are not constants. return SDValue(); - if (C->getZExtValue() == 1) + if (C->getZExtValue() == 1) { needOppositeCond = !needOppositeCond; - else if (C->getZExtValue() != 0) + checkAgainstTrue = true; + } else if (C->getZExtValue() != 0) // Quit if the constant is neither 0 or 1. return SDValue(); - // Skip 'zext' node. - if (SetCC.getOpcode() == ISD::ZERO_EXTEND) - SetCC = SetCC.getOperand(0); + bool truncatedToBoolWithAnd = false; + // Skip (zext $x), (trunc $x), or (and $x, 1) node. + while (SetCC.getOpcode() == ISD::ZERO_EXTEND || + SetCC.getOpcode() == ISD::TRUNCATE || + SetCC.getOpcode() == ISD::AND) { + if (SetCC.getOpcode() == ISD::AND) { + int OpIdx = -1; + ConstantSDNode *CS; + if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) && + CS->getZExtValue() == 1) + OpIdx = 1; + if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) && + CS->getZExtValue() == 1) + OpIdx = 0; + if (OpIdx == -1) + break; + SetCC = SetCC.getOperand(OpIdx); + truncatedToBoolWithAnd = true; + } else + SetCC = SetCC.getOperand(0); + } switch (SetCC.getOpcode()) { + case X86ISD::SETCC_CARRY: + // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to + // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1, + // i.e. it's a comparison against true but the result of SETCC_CARRY is not + // truncated to i1 using 'and'. + if (checkAgainstTrue && !truncatedToBoolWithAnd) + break; + assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && + "Invalid use of SETCC_CARRY!"); + // FALL THROUGH case X86ISD::SETCC: // Set the condition code or opposite one if necessary. CC = X86::CondCode(SetCC.getConstantOperandVal(0)); @@ -15606,9 +15937,15 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { return SDValue(); // Quit if false value is not a constant. if (!FVal) { - // A special case for rdrand, where 0 is set if false cond is found. SDValue Op = SetCC.getOperand(0); - if (Op.getOpcode() != X86ISD::RDRAND) + // Skip 'zext' or 'trunc' node. + if (Op.getOpcode() == ISD::ZERO_EXTEND || + Op.getOpcode() == ISD::TRUNCATE) + Op = Op.getOperand(0); + // A special case for rdrand/rdseed, where 0 is set if false cond is + // found. + if ((Op.getOpcode() != X86ISD::RDRAND && + Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0) return SDValue(); } // Quit if false value is not the constant 0 or 1. @@ -15920,124 +16257,12 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - EVT VT = N->getValueType(0); if (N->getOpcode() == ISD::SHL) { SDValue V = PerformSHLCombine(N, DAG); if (V.getNode()) return V; } - // On X86 with SSE2 support, we can transform this to a vector shift if - // all elements are shifted by the same amount. We can't do this in legalize - // because the a constant vector is typically transformed to a constant pool - // so we have no knowledge of the shift amount. - if (!Subtarget->hasSSE2()) - return SDValue(); - - if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && - (!Subtarget->hasInt256() || - (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) - return SDValue(); - - SDValue ShAmtOp = N->getOperand(1); - EVT EltVT = VT.getVectorElementType(); - DebugLoc DL = N->getDebugLoc(); - SDValue BaseShAmt = SDValue(); - if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { - unsigned NumElts = VT.getVectorNumElements(); - unsigned i = 0; - for (; i != NumElts; ++i) { - SDValue Arg = ShAmtOp.getOperand(i); - if (Arg.getOpcode() == ISD::UNDEF) continue; - BaseShAmt = Arg; - break; - } - // Handle the case where the build_vector is all undef - // FIXME: Should DAG allow this? - if (i == NumElts) - return SDValue(); - - for (; i != NumElts; ++i) { - SDValue Arg = ShAmtOp.getOperand(i); - if (Arg.getOpcode() == ISD::UNDEF) continue; - if (Arg != BaseShAmt) { - return SDValue(); - } - } - } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && - cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { - SDValue InVec = ShAmtOp.getOperand(0); - if (InVec.getOpcode() == ISD::BUILD_VECTOR) { - unsigned NumElts = InVec.getValueType().getVectorNumElements(); - unsigned i = 0; - for (; i != NumElts; ++i) { - SDValue Arg = InVec.getOperand(i); - if (Arg.getOpcode() == ISD::UNDEF) continue; - BaseShAmt = Arg; - break; - } - } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { - if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { - unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); - if (C->getZExtValue() == SplatIdx) - BaseShAmt = InVec.getOperand(1); - } - } - if (BaseShAmt.getNode() == 0) { - // Don't create instructions with illegal types after legalize - // types has run. - if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) && - !DCI.isBeforeLegalize()) - return SDValue(); - - BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, - DAG.getIntPtrConstant(0)); - } - } else - return SDValue(); - - // The shift amount is an i32. - if (EltVT.bitsGT(MVT::i32)) - BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); - else if (EltVT.bitsLT(MVT::i32)) - BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); - - // The shift amount is identical so we can do a vector shift. - SDValue ValOp = N->getOperand(0); - switch (N->getOpcode()) { - default: - llvm_unreachable("Unknown shift opcode!"); - case ISD::SHL: - switch (VT.getSimpleVT().SimpleTy) { - default: return SDValue(); - case MVT::v2i64: - case MVT::v4i32: - case MVT::v8i16: - case MVT::v4i64: - case MVT::v8i32: - case MVT::v16i16: - return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); - } - case ISD::SRA: - switch (VT.getSimpleVT().SimpleTy) { - default: return SDValue(); - case MVT::v4i32: - case MVT::v8i16: - case MVT::v8i32: - case MVT::v16i16: - return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); - } - case ISD::SRL: - switch (VT.getSimpleVT().SimpleTy) { - default: return SDValue(); - case MVT::v2i64: - case MVT::v4i32: - case MVT::v8i16: - case MVT::v4i64: - case MVT::v8i32: - case MVT::v16i16: - return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); - } - } + return SDValue(); } // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) @@ -16348,13 +16573,19 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, // Validate that the Mask operand is a vector sra node. // FIXME: what to do for bytes, since there is a psignb/pblendvb, but // there is no psrai.b - if (Mask.getOpcode() != X86ISD::VSRAI) - return SDValue(); - - // Check that the SRA is all signbits. - SDValue SraC = Mask.getOperand(1); - unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); + unsigned SraAmt = ~0; + if (Mask.getOpcode() == ISD::SRA) { + SDValue Amt = Mask.getOperand(1); + if (isSplatVector(Amt.getNode())) { + SDValue SclrAmt = Amt->getOperand(0); + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) + SraAmt = C->getZExtValue(); + } + } else if (Mask.getOpcode() == X86ISD::VSRAI) { + SDValue SraC = Mask.getOperand(1); + SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); + } if ((SraAmt + 1) != EltBits) return SDValue(); @@ -16528,11 +16759,10 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned RegSz = RegVT.getSizeInBits(); + // On Sandybridge unaligned 256bit loads are inefficient. ISD::LoadExtType Ext = Ld->getExtensionType(); unsigned Alignment = Ld->getAlignment(); - bool IsAligned = Alignment == 0 || Alignment == MemVT.getSizeInBits()/8; - - // On Sandybridge unaligned 256bit loads are inefficient. + bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8; if (RegVT.is256BitVector() && !Subtarget->hasInt256() && !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) { unsigned NumElems = RegVT.getVectorNumElements(); @@ -16552,7 +16782,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(), - std::max(Alignment/2U, 1U)); + std::min(16U, Alignment)); SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Load1.getValue(1), Load2.getValue(1)); @@ -16723,13 +16953,13 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, DebugLoc dl = St->getDebugLoc(); SDValue StoredVal = St->getOperand(1); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - unsigned Alignment = St->getAlignment(); - bool IsAligned = Alignment == 0 || Alignment == VT.getSizeInBits()/8; // If we are saving a concatenation of two XMM registers, perform two stores. // On Sandy Bridge, 256-bit memory operations are executed by two // 128-bit ports. However, on Haswell it is better to issue a single 256-bit // memory operation. + unsigned Alignment = St->getAlignment(); + bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8; if (VT.is256BitVector() && !Subtarget->hasInt256() && StVT == VT && !IsAligned) { unsigned NumElems = VT.getVectorNumElements(); @@ -16749,7 +16979,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, St->getPointerInfo(), St->isVolatile(), St->isNonTemporal(), - std::max(Alignment/2U, 1U)); + std::min(16U, Alignment)); return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index da1dad0..2727e22 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -356,10 +356,17 @@ namespace llvm { // RDRAND - Get a random integer and indicate whether it is valid in CF. RDRAND, + // RDSEED - Get a NIST SP800-90B & C compliant random integer and + // indicate whether it is valid in CF. + RDSEED, + // PCMP*STRI PCMPISTRI, PCMPESTRI, + // XTEST - Test if in transactional execution. + XTEST, + // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - // Atomic 64-bit binary operations. @@ -716,6 +723,9 @@ namespace llvm { SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SelectionDAG &DAG) const; + /// \brief Reset the operation actions based on target options. + virtual void resetOperationActions(); + protected: std::pair<const TargetRegisterClass*, uint8_t> findRepresentativeClass(MVT VT) const; @@ -727,6 +737,10 @@ namespace llvm { const X86RegisterInfo *RegInfo; const DataLayout *TD; + /// Used to store the TargetOptions so that we don't waste time resetting + /// the operation actions unless we have to. + TargetOptions TO; + /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// floating point ops. /// When SSE is available, use it for f32 operations. diff --git a/lib/Target/X86/X86Instr3DNow.td b/lib/Target/X86/X86Instr3DNow.td index bb362f5..ba1aede 100644 --- a/lib/Target/X86/X86Instr3DNow.td +++ b/lib/Target/X86/X86Instr3DNow.td @@ -84,13 +84,16 @@ defm PI2FD : I3DNow_conv_rm_int<0x0D, "pi2fd">; defm PMULHRW : I3DNow_binop_rm_int<0xB7, "pmulhrw">; -def FEMMS : I3DNow<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)]>; +def FEMMS : I3DNow<0x0E, RawFrm, (outs), (ins), "femms", + [(int_x86_mmx_femms)]>; -def PREFETCH : I3DNow<0x0D, MRM0m, (outs), (ins i32mem:$addr), - "prefetch\t$addr", []>; +def PREFETCH : I3DNow<0x0D, MRM0m, (outs), (ins i8mem:$addr), + "prefetch\t$addr", + [(prefetch addr:$addr, (i32 0), imm, (i32 1))]>; -def PREFETCHW : I3DNow<0x0D, MRM1m, (outs), (ins i16mem:$addr), - "prefetchw\t$addr", []>; +def PREFETCHW : I<0x0D, MRM1m, (outs), (ins i8mem:$addr), "prefetchw\t$addr", + [(prefetch addr:$addr, (i32 1), (i32 3), (i32 1))]>, TB, + Requires<[HasPrefetchW]>; // "3DNowA" instructions defm PF2IW : I3DNow_conv_rm_int<0x1C, "pf2iw", "a">; diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index f406416..225e972 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -932,7 +932,8 @@ class BinOpMI8<string mnemonic, X86TypeInfo typeinfo, Format f, list<dag> pattern> : ITy<0x82, f, typeinfo, (outs), (ins typeinfo.MemOperand:$dst, typeinfo.Imm8Operand:$src), - mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM> { + mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM>, + Sched<[WriteALULd, WriteRMW]> { let ImmT = Imm8; // Always 8-bit immediate. } @@ -964,7 +965,7 @@ class BinOpAI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, Register areg, string operands> : ITy<opcode, RawFrm, typeinfo, (outs), (ins typeinfo.ImmOperand:$src), - mnemonic, operands, []> { + mnemonic, operands, []>, Sched<[WriteALU]> { let ImmT = typeinfo.ImmEncoding; let Uses = [areg]; let Defs = [areg]; @@ -1250,7 +1251,7 @@ let isCompare = 1, Defs = [EFLAGS] in { // register class is constrained to GR8_NOREX. let isPseudo = 1 in def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask), - "", [], IIC_BIN_NONMEM>; + "", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>; } //===----------------------------------------------------------------------===// @@ -1293,12 +1294,12 @@ let neverHasSideEffects = 1 in { let isCommutable = 1 in def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src), !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"), - [], IIC_MUL8>, T8XD, VEX_4V; + [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMul]>; let mayLoad = 1 in def rm : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src), !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"), - [], IIC_MUL8>, T8XD, VEX_4V; + [], IIC_MUL8>, T8XD, VEX_4V, Sched<[WriteIMulLd]>; } } @@ -1313,6 +1314,7 @@ let Predicates = [HasBMI2] in { // ADCX Instruction // let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { + let SchedRW = [WriteALU] in { def ADCX32rr : I<0xF6, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "adcx{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8, OpSize; @@ -1320,8 +1322,9 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { def ADCX64rr : I<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "adcx{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8, OpSize, REX_W, Requires<[In64BitMode]>; + } // SchedRW - let mayLoad = 1 in { + let mayLoad = 1, SchedRW = [WriteALULd] in { def ADCX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "adcx{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8, OpSize; @@ -1336,6 +1339,7 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { // ADOX Instruction // let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { + let SchedRW = [WriteALU] in { def ADOX32rr : I<0xF6, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "adox{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8XS; @@ -1343,8 +1347,9 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in { def ADOX64rr : I<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "adox{q}\t{$src, $dst|$dst, $src}", [], IIC_BIN_NONMEM>, T8XS, REX_W, Requires<[In64BitMode]>; + } // SchedRW - let mayLoad = 1 in { + let mayLoad = 1, SchedRW = [WriteALULd] in { def ADOX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "adox{l}\t{$src, $dst|$dst, $src}", [], IIC_BIN_MEM>, T8XS; diff --git a/lib/Target/X86/X86InstrCMovSetCC.td b/lib/Target/X86/X86InstrCMovSetCC.td index 8f2d0a1..a967a4d 100644 --- a/lib/Target/X86/X86InstrCMovSetCC.td +++ b/lib/Target/X86/X86InstrCMovSetCC.td @@ -16,7 +16,7 @@ // SetCC instructions. multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> { let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst", - isCommutable = 1 in { + isCommutable = 1, SchedRW = [WriteALU] in { def NAME#16rr : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"), @@ -37,7 +37,8 @@ multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> { IIC_CMOV32_RR>, TB; } - let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" in { + let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst", + SchedRW = [WriteALULd, ReadAfterLd] in { def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"), @@ -83,11 +84,11 @@ multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> { def r : I<opc, MRM0r, (outs GR8:$dst), (ins), !strconcat(Mnemonic, "\t$dst"), [(set GR8:$dst, (X86setcc OpNode, EFLAGS))], - IIC_SET_R>, TB; + IIC_SET_R>, TB, Sched<[WriteALU]>; def m : I<opc, MRM0m, (outs), (ins i8mem:$dst), !strconcat(Mnemonic, "\t$dst"), [(store (X86setcc OpNode, EFLAGS), addr:$dst)], - IIC_SET_M>, TB; + IIC_SET_M>, TB, Sched<[WriteALU, WriteStore]>; } // Uses = [EFLAGS] } diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 734e598..d9ff0c6 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -149,11 +149,12 @@ let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in { //===----------------------------------------------------------------------===// // EH Pseudo Instructions // +let SchedRW = [WriteSystem] in { let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in { def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), "ret\t#eh_return, addr: $addr", - [(X86ehret GR32:$addr)], IIC_RET>; + [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>; } @@ -161,7 +162,7 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in { def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), "ret\t#eh_return, addr: $addr", - [(X86ehret GR64:$addr)], IIC_RET>; + [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>; } @@ -186,6 +187,7 @@ let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, Requires<[In64BitMode]>; } } +} // SchedRW let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), @@ -220,7 +222,7 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1 in { def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "", - [(set GR8:$dst, 0)], IIC_ALU_NONMEM>; + [(set GR8:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; // We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller // encoding and avoids a partial-register update sometimes, but doing so @@ -229,11 +231,12 @@ def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "", // to an MCInst. def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), "", - [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize; + [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize, + Sched<[WriteZero]>; // FIXME: Set encoding to pseudo. def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "", - [(set GR32:$dst, 0)], IIC_ALU_NONMEM>; + [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; } // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a @@ -245,7 +248,7 @@ def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "", let Defs = [EFLAGS], isCodeGenOnly=1, AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "", - [(set GR64:$dst, 0)], IIC_ALU_NONMEM>; + [(set GR64:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>; // Materialize i64 constant where top 32-bits are zero. This could theoretically // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however @@ -254,10 +257,10 @@ let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1 in def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), "", [(set GR64:$dst, i64immZExt32:$src)], - IIC_ALU_NONMEM>; + IIC_ALU_NONMEM>, Sched<[WriteALU]>; // Use sbb to materialize carry bit. -let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1 in { +let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in { // FIXME: These are pseudo ops that should be replaced with Pat<> patterns. // However, Pat<> can't replicate the destination reg into the inputs of the // result. @@ -320,6 +323,7 @@ def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))), //===----------------------------------------------------------------------===// // String Pseudo Instructions // +let SchedRW = [WriteMicrocoded] in { let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}", [(X86rep_movs i8)], IIC_REP_MOVS>, REP, @@ -382,6 +386,7 @@ let Defs = [RCX,RDI], isCodeGenOnly = 1 in { [(X86rep_stos i64)], IIC_REP_STOS>, REP, Requires<[In64BitMode]>; } +} // SchedRW //===----------------------------------------------------------------------===// // Thread Local Storage Instructions @@ -594,12 +599,13 @@ defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">; let isCodeGenOnly = 1, Defs = [EFLAGS] in def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero), "or{l}\t{$zero, $dst|$dst, $zero}", - [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK; + [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK, + Sched<[WriteALULd, WriteRMW]>; let hasSideEffects = 1 in def Int_MemBarrier : I<0, Pseudo, (outs), (ins), "#MEMBARRIER", - [(X86MemBarrier)]>; + [(X86MemBarrier)]>, Sched<[WriteLoad]>; // RegOpc corresponds to the mr version of the instruction // ImmOpc corresponds to the mi version of the instruction @@ -607,7 +613,8 @@ def Int_MemBarrier : I<0, Pseudo, (outs), (ins), // ImmMod corresponds to the instruction format of the mi and mi8 versions multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, Format ImmMod, string mnemonic> { -let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { +let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, + SchedRW = [WriteALULd, WriteRMW] in { def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, @@ -694,7 +701,8 @@ defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">; // Optimized codegen when the non-memory output is not used. multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form, string mnemonic> { -let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { +let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, + SchedRW = [WriteALULd, WriteRMW] in { def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst), !strconcat(mnemonic, "{b}\t$dst"), @@ -728,7 +736,7 @@ let isCodeGenOnly = 1 in { multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form, string mnemonic, SDPatternOperator frag, InstrItinClass itin8, InstrItinClass itin> { -let isCodeGenOnly = 1 in { +let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in { let Defs = [AL, EFLAGS], Uses = [AL] in def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap), !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"), @@ -748,14 +756,15 @@ let isCodeGenOnly = 1 in { } } -let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in { +let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], + SchedRW = [WriteALULd, WriteRMW] in { defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b", X86cas8, i64mem, IIC_CMPX_LOCK_8B>; } let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], - Predicates = [HasCmpxchg16b] in { + Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in { defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b", X86cas16, i128mem, IIC_CMPX_LOCK_16B>, REX_W; @@ -768,7 +777,8 @@ defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic, string frag, InstrItinClass itin8, InstrItinClass itin> { - let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in { + let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1, + SchedRW = [WriteALULd, WriteRMW] in { def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), @@ -990,9 +1000,6 @@ def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), // This corresponds to add $foo@tpoff, %rax def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; -// This corresponds to mov foo@tpoff(%rbx), %eax -def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))), - (MOV64rm tglobaltlsaddr :$dst)>; // Direct PC relative function call for small code model. 32-bit displacement @@ -1192,7 +1199,8 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. -let AddedComplexity = 5 in { // Try this before the selecting to OR +// Try this before the selecting to OR. +let AddedComplexity = 5, SchedRW = [WriteALU] in { let isConvertibleToThreeAddress = 1, Constraints = "$src1 = $dst", Defs = [EFLAGS] in { @@ -1239,7 +1247,7 @@ def ADD64ri32_DB : I<0, Pseudo, [(set GR64:$dst, (or_is_add GR64:$src1, i64immSExt32:$src2))]>; } -} // AddedComplexity +} // AddedComplexity, SchedRW //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td index bfe9541..0e69651 100644 --- a/lib/Target/X86/X86InstrControl.td +++ b/lib/Target/X86/X86InstrControl.td @@ -20,7 +20,7 @@ // The X86retflag return instructions are variadic because we may add ST0 and // ST1 arguments when returning values on the x87 stack. let isTerminator = 1, isReturn = 1, isBarrier = 1, - hasCtrlDep = 1, FPForm = SpecialFP in { + hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in { def RET : I <0xC3, RawFrm, (outs), (ins variable_ops), "ret", [(X86retflag 0)], IIC_RET>; @@ -46,7 +46,7 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1, } // Unconditional branches. -let isBarrier = 1, isBranch = 1, isTerminator = 1 in { +let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in { def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst), "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>; def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst), @@ -58,7 +58,7 @@ let isBarrier = 1, isBranch = 1, isTerminator = 1 in { } // Conditional Branches. -let isBranch = 1, isTerminator = 1, Uses = [EFLAGS] in { +let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump] in { multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> { def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, [], IIC_Jcc>; @@ -85,7 +85,7 @@ defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>; defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>; // jcx/jecx/jrcx instructions. -let isBranch = 1, isTerminator = 1 in { +let isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in { // These are the 32-bit versions of this instruction for the asmparser. In // 32-bit mode, the address size prefix is jcxz and the unprefixed version is // jecxz. @@ -110,36 +110,46 @@ let isBranch = 1, isTerminator = 1 in { // Indirect branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst", - [(brind GR32:$dst)], IIC_JMP_REG>, Requires<[In32BitMode]>; + [(brind GR32:$dst)], IIC_JMP_REG>, Requires<[In32BitMode]>, + Sched<[WriteJump]>; def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst", - [(brind (loadi32 addr:$dst))], IIC_JMP_MEM>, Requires<[In32BitMode]>; + [(brind (loadi32 addr:$dst))], IIC_JMP_MEM>, + Requires<[In32BitMode]>, Sched<[WriteJumpLd]>; def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst", - [(brind GR64:$dst)], IIC_JMP_REG>, Requires<[In64BitMode]>; + [(brind GR64:$dst)], IIC_JMP_REG>, Requires<[In64BitMode]>, + Sched<[WriteJump]>; def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", - [(brind (loadi64 addr:$dst))], IIC_JMP_MEM>, Requires<[In64BitMode]>; + [(brind (loadi64 addr:$dst))], IIC_JMP_MEM>, + Requires<[In64BitMode]>, Sched<[WriteJumpLd]>; def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs), (ins i16imm:$off, i16imm:$seg), - "ljmp{w}\t{$seg, $off|$off, $seg}", [], IIC_JMP_FAR_PTR>, OpSize; + "ljmp{w}\t{$seg, $off|$off, $seg}", [], + IIC_JMP_FAR_PTR>, OpSize, Sched<[WriteJump]>; def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs), (ins i32imm:$off, i16imm:$seg), - "ljmp{l}\t{$seg, $off|$off, $seg}", [], IIC_JMP_FAR_PTR>; + "ljmp{l}\t{$seg, $off|$off, $seg}", [], + IIC_JMP_FAR_PTR>, Sched<[WriteJump]>; def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst), - "ljmp{q}\t{*}$dst", [], IIC_JMP_FAR_MEM>; + "ljmp{q}\t{*}$dst", [], IIC_JMP_FAR_MEM>, + Sched<[WriteJump]>; def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst), - "ljmp{w}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize; + "ljmp{w}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize, + Sched<[WriteJumpLd]>; def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst), - "ljmp{l}\t{*}$dst", [], IIC_JMP_FAR_MEM>; + "ljmp{l}\t{*}$dst", [], IIC_JMP_FAR_MEM>, + Sched<[WriteJumpLd]>; } // Loop instructions - +let SchedRW = [WriteJump] in { def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", [], IIC_LOOP>; def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", [], IIC_LOOPE>; def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", [], IIC_LOOPNE>; +} //===----------------------------------------------------------------------===// // Call Instructions... @@ -152,27 +162,32 @@ let isCall = 1 in let Uses = [ESP] in { def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm, (outs), (ins i32imm_pcrel:$dst), - "call{l}\t$dst", [], IIC_CALL_RI>, Requires<[In32BitMode]>; + "call{l}\t$dst", [], IIC_CALL_RI>, + Requires<[In32BitMode]>, Sched<[WriteJump]>; def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst), "call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>, - Requires<[In32BitMode]>; + Requires<[In32BitMode]>, Sched<[WriteJump]>; def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst), - "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], IIC_CALL_MEM>, - Requires<[In32BitMode]>; + "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], + IIC_CALL_MEM>, + Requires<[In32BitMode,FavorMemIndirectCall]>, + Sched<[WriteJumpLd]>; def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs), (ins i16imm:$off, i16imm:$seg), "lcall{w}\t{$seg, $off|$off, $seg}", [], - IIC_CALL_FAR_PTR>, OpSize; + IIC_CALL_FAR_PTR>, OpSize, Sched<[WriteJump]>; def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs), (ins i32imm:$off, i16imm:$seg), "lcall{l}\t{$seg, $off|$off, $seg}", [], - IIC_CALL_FAR_PTR>; + IIC_CALL_FAR_PTR>, Sched<[WriteJump]>; def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst), - "lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize; + "lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize, + Sched<[WriteJumpLd]>; def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst), - "lcall{l}\t{*}$dst", [], IIC_CALL_FAR_MEM>; + "lcall{l}\t{*}$dst", [], IIC_CALL_FAR_MEM>, + Sched<[WriteJumpLd]>; // callw for 16 bit code for the assembler. let isAsmParserOnly = 1 in @@ -185,7 +200,7 @@ let isCall = 1 in // Tail call stuff. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, - isCodeGenOnly = 1 in + isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in let Uses = [ESP] in { def TCRETURNdi : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$offset), []>; @@ -216,7 +231,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, // RSP is marked as a use to prevent stack-pointer assignments that appear // immediately before calls from potentially appearing dead. Uses for argument // registers are added manually. -let isCall = 1, Uses = [RSP] in { +let isCall = 1, Uses = [RSP], SchedRW = [WriteJump] in { // NOTE: this pattern doesn't match "X86call imm", because we do not know // that the offset between an arbitrary immediate and the call will fit in // the 32-bit pcrel field that we have. @@ -231,7 +246,7 @@ let isCall = 1, Uses = [RSP] in { def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst), "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))], IIC_CALL_MEM>, - Requires<[In64BitMode]>; + Requires<[In64BitMode,FavorMemIndirectCall]>; def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst), "lcall{q}\t{*}$dst", [], IIC_CALL_FAR_MEM>; @@ -245,13 +260,12 @@ let isCall = 1, isCodeGenOnly = 1 in def W64ALLOCA : Ii32PCRel<0xE8, RawFrm, (outs), (ins i64i32imm_pcrel:$dst), "call{q}\t$dst", [], IIC_CALL_RI>, - Requires<[IsWin64]>; + Requires<[IsWin64]>, Sched<[WriteJump]>; } let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, - isCodeGenOnly = 1 in - let Uses = [RSP], - usesCustomInserter = 1 in { + isCodeGenOnly = 1, Uses = [RSP], usesCustomInserter = 1, + SchedRW = [WriteJump] in { def TCRETURNdi64 : PseudoI<(outs), (ins i64i32imm_pcrel:$dst, i32imm:$offset), []>; diff --git a/lib/Target/X86/X86InstrExtension.td b/lib/Target/X86/X86InstrExtension.td index 2eb454d..6dc7175 100644 --- a/lib/Target/X86/X86InstrExtension.td +++ b/lib/Target/X86/X86InstrExtension.td @@ -42,48 +42,54 @@ let neverHasSideEffects = 1 in { let neverHasSideEffects = 1 in { def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>, - TB, OpSize; + TB, OpSize, Sched<[WriteALU]>; let mayLoad = 1 in def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>, - TB, OpSize; + TB, OpSize, Sched<[WriteALULd]>; } // neverHasSideEffects = 1 def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src), "movs{bl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB; + [(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB, + Sched<[WriteALU]>; def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src), "movs{bl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (sextloadi32i8 addr:$src))], IIC_MOVSX>, TB; + [(set GR32:$dst, (sextloadi32i8 addr:$src))], IIC_MOVSX>, TB, + Sched<[WriteALULd]>; def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src), "movs{wl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (sext GR16:$src))], IIC_MOVSX>, TB; + [(set GR32:$dst, (sext GR16:$src))], IIC_MOVSX>, TB, + Sched<[WriteALU]>; def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), "movs{wl|x}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>, - TB; + TB, Sched<[WriteALULd]>; let neverHasSideEffects = 1 in { def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>, - TB, OpSize; + TB, OpSize, Sched<[WriteALU]>; let mayLoad = 1 in def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>, - TB, OpSize; + TB, OpSize, Sched<[WriteALULd]>; } // neverHasSideEffects = 1 def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), "movz{bl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB; + [(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB, + Sched<[WriteALU]>; def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src), "movz{bl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (zextloadi32i8 addr:$src))], IIC_MOVZX>, TB; + [(set GR32:$dst, (zextloadi32i8 addr:$src))], IIC_MOVZX>, TB, + Sched<[WriteALULd]>; def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src), "movz{wl|x}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (zext GR16:$src))], IIC_MOVZX>, TB; + [(set GR32:$dst, (zext GR16:$src))], IIC_MOVZX>, TB, + Sched<[WriteALU]>; def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), "movz{wl|x}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (zextloadi32i16 addr:$src))], IIC_MOVZX>, - TB; + TB, Sched<[WriteALULd]>; // These are the same as the regular MOVZX32rr8 and MOVZX32rm8 // except that they use GR32_NOREX for the output operand register class @@ -92,12 +98,12 @@ let neverHasSideEffects = 1, isCodeGenOnly = 1 in { def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg, (outs GR32_NOREX:$dst), (ins GR8_NOREX:$src), "movz{bl|x}\t{$src, $dst|$dst, $src}", - [], IIC_MOVZX>, TB; + [], IIC_MOVZX>, TB, Sched<[WriteALU]>; let mayLoad = 1 in def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem, (outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src), "movz{bl|x}\t{$src, $dst|$dst, $src}", - [], IIC_MOVZX>, TB; + [], IIC_MOVZX>, TB, Sched<[WriteALULd]>; } // MOVSX64rr8 always has a REX prefix and it has an 8-bit register @@ -106,38 +112,42 @@ def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem, // were generalized, this would require a special register class. def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), "movs{bq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (sext GR8:$src))], IIC_MOVSX>, TB; + [(set GR64:$dst, (sext GR8:$src))], IIC_MOVSX>, TB, + Sched<[WriteALU]>; def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), "movs{bq|x}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i8 addr:$src))], IIC_MOVSX>, - TB; + TB, Sched<[WriteALULd]>; def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), "movs{wq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (sext GR16:$src))], IIC_MOVSX>, TB; + [(set GR64:$dst, (sext GR16:$src))], IIC_MOVSX>, TB, + Sched<[WriteALU]>; def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), "movs{wq|x}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i16 addr:$src))], IIC_MOVSX>, - TB; + TB, Sched<[WriteALULd]>; def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (sext GR32:$src))], IIC_MOVSX>; + [(set GR64:$dst, (sext GR32:$src))], IIC_MOVSX>, + Sched<[WriteALU]>; def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (sextloadi64i32 addr:$src))], IIC_MOVSX>; + [(set GR64:$dst, (sextloadi64i32 addr:$src))], IIC_MOVSX>, + Sched<[WriteALULd]>; // movzbq and movzwq encodings for the disassembler def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src), "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, - TB; + TB, Sched<[WriteALU]>; def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src), "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, - TB; + TB, Sched<[WriteALULd]>; def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, - TB; + TB, Sched<[WriteALU]>; def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, - TB; + TB, Sched<[WriteALULd]>; // FIXME: These should be Pat patterns. let isCodeGenOnly = 1 in { @@ -145,17 +155,19 @@ let isCodeGenOnly = 1 in { // Use movzbl instead of movzbq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), - "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB; + "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB, + Sched<[WriteALU]>; def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), "", [(set GR64:$dst, (zextloadi64i8 addr:$src))], IIC_MOVZX>, - TB; + TB, Sched<[WriteALULd]>; // Use movzwl instead of movzwq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), - "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB; + "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB, + Sched<[WriteALU]>; def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), "", [(set GR64:$dst, (zextloadi64i16 addr:$src))], - IIC_MOVZX>, TB; + IIC_MOVZX>, TB, Sched<[WriteALULd]>; // There's no movzlq instruction, but movl can be used for this purpose, using // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero @@ -165,9 +177,10 @@ def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), // necessarily all zero. In such cases, we fall back to these explicit zext // instructions. def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src), - "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>; + "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>, + Sched<[WriteALU]>; def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "", [(set GR64:$dst, (zextloadi64i32 addr:$src))], - IIC_MOVZX>; + IIC_MOVZX>, Sched<[WriteALULd]>; } diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index 568726e..2224a08 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -422,7 +422,7 @@ def IST_Fp32m80 : FpI_<(outs), (ins i32mem:$op, RFP80:$src), OneArgFP, []>; def IST_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, []>; } -let mayLoad = 1 in { +let mayLoad = 1, SchedRW = [WriteLoad] in { def LD_F32m : FPI<0xD9, MRM0m, (outs), (ins f32mem:$src), "fld{s}\t$src", IIC_FLD>; def LD_F64m : FPI<0xDD, MRM0m, (outs), (ins f64mem:$src), "fld{l}\t$src", @@ -436,7 +436,7 @@ def ILD_F32m : FPI<0xDB, MRM0m, (outs), (ins i32mem:$src), "fild{l}\t$src", def ILD_F64m : FPI<0xDF, MRM5m, (outs), (ins i64mem:$src), "fild{ll}\t$src", IIC_FILD>; } -let mayStore = 1 in { +let mayStore = 1, SchedRW = [WriteStore] in { def ST_F32m : FPI<0xD9, MRM2m, (outs), (ins f32mem:$dst), "fst{s}\t$dst", IIC_FST>; def ST_F64m : FPI<0xDD, MRM2m, (outs), (ins f64mem:$dst), "fst{l}\t$dst", @@ -481,7 +481,7 @@ def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, [(X86fp_to_i64mem RFP80:$src, addr:$op)]>; } // Predicates = [HasSSE3] -let mayStore = 1 in { +let mayStore = 1, SchedRW = [WriteStore] in { def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst", IIC_FST>; def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst", @@ -491,6 +491,7 @@ def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), } // FP Stack manipulation instructions. +let SchedRW = [WriteMove] in { def LD_Frr : FPI<0xC0, AddRegFrm, (outs), (ins RST:$op), "fld\t$op", IIC_FLD>, D9; def ST_Frr : FPI<0xD0, AddRegFrm, (outs), (ins RST:$op), "fst\t$op", @@ -499,6 +500,7 @@ def ST_FPrr : FPI<0xD8, AddRegFrm, (outs), (ins RST:$op), "fstp\t$op", IIC_FST>, DD; def XCH_F : FPI<0xC8, AddRegFrm, (outs), (ins RST:$op), "fxch\t$op", IIC_FXCH>, D9; +} // Floating point constant loads. let isReMaterializable = 1 in { @@ -516,19 +518,23 @@ def LD_Fp180 : FpI_<(outs RFP80:$dst), (ins), ZeroArgFP, [(set RFP80:$dst, fpimm1)]>; } +let SchedRW = [WriteZero] in { def LD_F0 : FPI<0xEE, RawFrm, (outs), (ins), "fldz", IIC_FLDZ>, D9; def LD_F1 : FPI<0xE8, RawFrm, (outs), (ins), "fld1", IIC_FIST>, D9; - +} // Floating point compares. +let SchedRW = [WriteFAdd] in { def UCOM_Fpr32 : FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, [(set FPSW, (trunc (X86cmp RFP32:$lhs, RFP32:$rhs)))]>; def UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP, [(set FPSW, (trunc (X86cmp RFP64:$lhs, RFP64:$rhs)))]>; def UCOM_Fpr80 : FpI_ <(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, [(set FPSW, (trunc (X86cmp RFP80:$lhs, RFP80:$rhs)))]>; +} // SchedRW } // Defs = [FPSW] +let SchedRW = [WriteFAdd] in { // CC = ST(0) cmp ST(i) let Defs = [EFLAGS, FPSW] in { def UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, @@ -566,8 +572,10 @@ def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), "fcompi\t$reg", IIC_FCOMI>, DF; } +} // SchedRW // Floating point flag ops. +let SchedRW = [WriteALU] in { let Defs = [AX], Uses = [FPSW] in def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags (outs), (ins), "fnstsw %ax", @@ -576,23 +584,26 @@ def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world (outs), (ins i16mem:$dst), "fnstcw\t$dst", [(X86fp_cwd_get16 addr:$dst)], IIC_FNSTCW>; - +} // SchedRW let mayLoad = 1 in def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16] - (outs), (ins i16mem:$dst), "fldcw\t$dst", [], IIC_FLDCW>; + (outs), (ins i16mem:$dst), "fldcw\t$dst", [], IIC_FLDCW>, + Sched<[WriteLoad]>; // FPU control instructions +let SchedRW = [WriteMicrocoded] in { let Defs = [FPSW] in def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", [], IIC_FNINIT>, DB; def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg), "ffree\t$reg", IIC_FFREE>, DD; - // Clear exceptions let Defs = [FPSW] in def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", [], IIC_FNCLEX>, DB; +} // SchedRW // Operandless floating-point instructions for the disassembler. +let SchedRW = [WriteMicrocoded] in { def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", [], IIC_WAIT>; def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", [], IIC_FNOP>, D9; @@ -627,6 +638,7 @@ def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src), def FXRSTOR64 : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src), "fxrstorq\t$src", [], IIC_FXRSTOR>, TB, REX_W, Requires<[In64BitMode]>; +} // SchedRW //===----------------------------------------------------------------------===// // Non-Instruction Patterns diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 44e574d..a71e024 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -35,24 +35,27 @@ def MRM_C3 : Format<35>; def MRM_C4 : Format<36>; def MRM_C8 : Format<37>; def MRM_C9 : Format<38>; -def MRM_E8 : Format<39>; -def MRM_F0 : Format<40>; -def MRM_F8 : Format<41>; -def MRM_F9 : Format<42>; +def MRM_CA : Format<39>; +def MRM_CB : Format<40>; +def MRM_E8 : Format<41>; +def MRM_F0 : Format<42>; def RawFrmImm8 : Format<43>; def RawFrmImm16 : Format<44>; -def MRM_D0 : Format<45>; -def MRM_D1 : Format<46>; -def MRM_D4 : Format<47>; -def MRM_D5 : Format<48>; -def MRM_D8 : Format<49>; -def MRM_D9 : Format<50>; -def MRM_DA : Format<51>; -def MRM_DB : Format<52>; -def MRM_DC : Format<53>; -def MRM_DD : Format<54>; -def MRM_DE : Format<55>; -def MRM_DF : Format<56>; +def MRM_F8 : Format<45>; +def MRM_F9 : Format<46>; +def MRM_D0 : Format<47>; +def MRM_D1 : Format<48>; +def MRM_D4 : Format<49>; +def MRM_D5 : Format<50>; +def MRM_D6 : Format<51>; +def MRM_D8 : Format<52>; +def MRM_D9 : Format<53>; +def MRM_DA : Format<54>; +def MRM_DB : Format<55>; +def MRM_DC : Format<56>; +def MRM_DD : Format<57>; +def MRM_DE : Format<58>; +def MRM_DF : Format<59>; // ImmType - This specifies the immediate type used by an instruction. This is // part of the ad-hoc solution used to emit machine instruction encodings by our @@ -208,47 +211,47 @@ class PseudoI<dag oops, dag iops, list<dag> pattern> } class I<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT, + list<dag> pattern, InstrItinClass itin = NoItinerary, Domain d = GenericDomain> : X86Inst<o, f, NoImm, outs, ins, asm, itin, d> { let Pattern = pattern; let CodeSize = 3; } class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT, + list<dag> pattern, InstrItinClass itin = NoItinerary, Domain d = GenericDomain> : X86Inst<o, f, Imm8, outs, ins, asm, itin, d> { let Pattern = pattern; let CodeSize = 3; } class Ii8PCRel<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm8PCRel, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; } class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm16, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; } class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm32, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; } class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm16PCRel, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; } class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm32PCRel, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; @@ -257,12 +260,12 @@ class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm, // FPStack Instruction Templates: // FPI - Floating Point Instruction template. class FPI<bits<8> o, Format F, dag outs, dag ins, string asm, - InstrItinClass itin = IIC_DEFAULT> + InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, [], itin> {} // FpI_ - Floating Point Pseudo Instruction template. Not Predicated. class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern, - InstrItinClass itin = IIC_DEFAULT> + InstrItinClass itin = NoItinerary> : X86Inst<0, Pseudo, NoImm, outs, ins, "", itin> { let FPForm = fp; let Pattern = pattern; @@ -275,14 +278,14 @@ class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern, // Iseg32 - 16-bit segment selector, 32-bit offset class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm16, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; } class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm32, outs, ins, asm, itin> { let Pattern = pattern; let CodeSize = 3; @@ -292,7 +295,7 @@ def __xs : XS; // SI - SSE 1 & 2 scalar instructions class SI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin> { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])); @@ -303,7 +306,7 @@ class SI<bits<8> o, Format F, dag outs, dag ins, string asm, // SIi8 - SSE 1 & 2 scalar instructions class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin> { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])); @@ -350,25 +353,25 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // VPSI - SSE1 instructions with TB prefix in AVX form. class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>; class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>; class PSI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB, Requires<[UseSSE1]>; class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB, Requires<[UseSSE1]>; class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS, Requires<[HasAVX]>; class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, TB, Requires<[HasAVX]>; @@ -388,42 +391,42 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm, // MMX operands. class SDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>; class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>; class S2SI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE2]>; class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>; class PDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize, Requires<[UseSSE2]>; class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize, Requires<[UseSSE2]>; class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD, Requires<[HasAVX]>; class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS, Requires<[HasAVX]>; class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB, OpSize, Requires<[HasAVX]>; class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>; class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>; // SSE3 Instruction Templates: @@ -433,15 +436,15 @@ class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // S3DI - SSE3 instructions with XD prefix. class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, XS, Requires<[UseSSE3]>; class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XD, Requires<[UseSSE3]>; class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize, Requires<[UseSSE3]>; @@ -458,19 +461,19 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, // classes. They need to be enabled even if AVX is enabled. class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, Requires<[UseSSSE3]>; class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, Requires<[UseSSSE3]>; class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, Requires<[HasSSSE3]>; class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, Requires<[HasSSSE3]>; @@ -480,11 +483,11 @@ class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm, // SS41AIi8 - SSE 4.1 instructions with TA prefix and ImmT == Imm8. // class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, Requires<[UseSSE41]>; class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, Requires<[UseSSE41]>; @@ -492,19 +495,19 @@ class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // // SS428I - SSE 4.2 instructions with T8 prefix. class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, Requires<[UseSSE42]>; // SS42FI - SSE 4.2 instructions with T8XD prefix. // NOTE: 'HasSSE42' is used as SS42FI is only used for CRC32 insns. class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, T8XD, Requires<[HasSSE42]>; // SS42AI = SSE 4.2 instructions with TA prefix class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, Requires<[UseSSE42]>; @@ -514,11 +517,11 @@ class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm, // AVX8I - AVX instructions with T8 and OpSize prefix. // AVXAIi8 - AVX instructions with TA, OpSize prefix and ImmT = Imm8. class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize, Requires<[HasAVX]>; class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, Requires<[HasAVX]>; @@ -528,11 +531,11 @@ class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // AVX28I - AVX2 instructions with T8 and OpSize prefix. // AVX2AIi8 - AVX2 instructions with TA, OpSize prefix and ImmT = Imm8. class AVX28I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize, Requires<[HasAVX2]>; class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, Requires<[HasAVX2]>; @@ -541,53 +544,53 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // AES8I // These use the same encoding as the SSE4.2 T8 and TA encodings. class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, Requires<[HasAES]>; class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, Requires<[HasAES]>; // PCLMUL Instruction Templates class PCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, Requires<[HasPCLMUL]>; class AVXPCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, VEX_4V, Requires<[HasAVX, HasPCLMUL]>; // FMA3 Instruction Templates class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, T8, - OpSize, VEX_4V, Requires<[HasFMA]>; + OpSize, VEX_4V, FMASC, Requires<[HasFMA]>; // FMA4 Instruction Templates class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, TA, - OpSize, VEX_4V, VEX_I8IMM, Requires<[HasFMA4]>; + OpSize, VEX_4V, VEX_I8IMM, FMASC, Requires<[HasFMA4]>; // XOP 2, 3 and 4 Operand Instruction Template class IXOP<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XOP, XOP9, Requires<[HasXOP]>; // XOP 2, 3 and 4 Operand Instruction Templates with imm byte class IXOPi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XOP, XOP8, Requires<[HasXOP]>; // XOP 5 operand instruction (VEX encoding!) class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag>pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag>pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize, VEX_4V, VEX_I8IMM, Requires<[HasXOP]>; @@ -595,33 +598,33 @@ class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm, // class RI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, REX_W; class RIi8 <bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, REX_W; class RIi32 <bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii32<o, F, outs, ins, asm, pattern, itin>, REX_W; class RIi64<bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : X86Inst<o, f, Imm64, outs, ins, asm, itin>, REX_W { let Pattern = pattern; let CodeSize = 3; } class RSSI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : SSI<o, F, outs, ins, asm, pattern, itin>, REX_W; class RSDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : SDI<o, F, outs, ins, asm, pattern, itin>, REX_W; class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : PDI<o, F, outs, ins, asm, pattern, itin>, REX_W; class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : VPDI<o, F, outs, ins, asm, pattern, itin>, VEX_W; // MMX Instruction templates @@ -635,23 +638,23 @@ class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm, // MMXID - MMX instructions with XD prefix. // MMXIS - MMX instructions with XS prefix. class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>; class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In64BitMode]>; class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, TB, REX_W, Requires<[HasMMX]>; class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : I<o, F, outs, ins, asm, pattern, itin>, TB, OpSize, Requires<[HasMMX]>; class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>; class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasMMX]>; class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern, InstrItinClass itin = IIC_DEFAULT> + list<dag> pattern, InstrItinClass itin = NoItinerary> : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasMMX]>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 17714ac..7c0423f 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -3655,7 +3655,16 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, const SmallVectorImpl<MachineOperand> &MOs, unsigned Size, unsigned Align) const { const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0; + bool isCallRegIndirect = TM.getSubtarget<X86Subtarget>().callRegIndirect(); bool isTwoAddrFold = false; + + // Atom favors register form of call. So, we do not fold loads into calls + // when X86Subtarget is Atom. + if (isCallRegIndirect && + (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) { + return NULL; + } + unsigned NumOps = MI->getDesc().getNumOperands(); bool isTwoAddr = NumOps > 1 && MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; @@ -4272,7 +4281,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= Alignment; Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl, - VT, MVT::Other, &AddrOps[0], AddrOps.size()); + VT, MVT::Other, AddrOps); NewNodes.push_back(Load); // Preserve memory reference information. @@ -4294,8 +4303,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, if (Load) BeforeOps.push_back(SDValue(Load, 0)); std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); - SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, &BeforeOps[0], - BeforeOps.size()); + SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); NewNodes.push_back(NewNode); // Emit the store instruction. @@ -4317,8 +4325,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, (*MMOs.first)->getAlignment() >= Alignment; SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, TM), - dl, MVT::Other, - &AddrOps[0], AddrOps.size()); + dl, MVT::Other, AddrOps); NewNodes.push_back(Store); // Preserve memory reference information. diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index d989ec7..3380d8c 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -142,6 +142,9 @@ def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>; def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand, [SDNPHasChain, SDNPSideEffect]>; +def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand, + [SDNPHasChain, SDNPSideEffect]>; + def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas, [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; @@ -603,7 +606,12 @@ def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">; def HasBMI : Predicate<"Subtarget->hasBMI()">; def HasBMI2 : Predicate<"Subtarget->hasBMI2()">; def HasRTM : Predicate<"Subtarget->hasRTM()">; +def HasHLE : Predicate<"Subtarget->hasHLE()">; +def HasTSX : Predicate<"Subtarget->hasRTM() || Subtarget->hasHLE()">; def HasADX : Predicate<"Subtarget->hasADX()">; +def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">; +def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">; +def HasPrefetchW : Predicate<"Subtarget->has3DNow() || Subtarget->hasPRFCHW()">; def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">; def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">; def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">; @@ -626,6 +634,7 @@ def OptForSize : Predicate<"OptForSize">; def OptForSpeed : Predicate<"!OptForSize">; def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">; def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">; +def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">; //===----------------------------------------------------------------------===// // X86 Instruction Format Definitions. @@ -758,7 +767,7 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{ // // Nop -let neverHasSideEffects = 1 in { +let neverHasSideEffects = 1, SchedRW = [WriteZero] in { def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>; def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero), "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize; @@ -769,8 +778,9 @@ let neverHasSideEffects = 1 in { // Constructing a stack frame. def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl), - "enter\t$len, $lvl", [], IIC_ENTER>; + "enter\t$len, $lvl", [], IIC_ENTER>, Sched<[WriteMicrocoded]>; +let SchedRW = [WriteALU] in { let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", [], IIC_LEAVE>, @@ -780,13 +790,14 @@ let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", [], IIC_LEAVE>, Requires<[In64BitMode]>; +} // SchedRW //===----------------------------------------------------------------------===// // Miscellaneous Instructions. // let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in { -let mayLoad = 1 in { +let mayLoad = 1, SchedRW = [WriteLoad] in { def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [], IIC_POP_REG16>, OpSize; def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [], @@ -803,9 +814,9 @@ def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", [], def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize; def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>, Requires<[In32BitMode]>; -} +} // mayLoad, SchedRW -let mayStore = 1 in { +let mayStore = 1, SchedRW = [WriteStore] in { def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[], IIC_PUSH_REG>, OpSize; def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[], @@ -832,29 +843,30 @@ def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>, def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>, Requires<[In32BitMode]>; -} +} // mayStore, SchedRW } let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in { -let mayLoad = 1 in { +let mayLoad = 1, SchedRW = [WriteLoad] in { def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>; def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>; def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", [], IIC_POP_MEM>; -} -let mayStore = 1 in { +} // mayLoad, SchedRW +let mayStore = 1, SchedRW = [WriteStore] in { def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", [], IIC_PUSH_REG>; def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", [], IIC_PUSH_REG>; def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [], IIC_PUSH_MEM>; -} +} // mayStore, SchedRW } -let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in { +let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1, + SchedRW = [WriteStore] in { def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm), "push{q}\t$imm", [], IIC_PUSH_IMM>; def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm), @@ -865,23 +877,24 @@ def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm), let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>, Sched<[WriteLoad]>; let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>, Sched<[WriteStore]>; let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP], - mayLoad=1, neverHasSideEffects=1 in { + mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in { def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l|d}", [], IIC_POP_A>, Requires<[In32BitMode]>; } let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], - mayStore=1, neverHasSideEffects=1 in { + mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in { def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l|d}", [], IIC_PUSH_A>, Requires<[In32BitMode]>; } -let Constraints = "$src = $dst" in { // GR32 = bswap GR32 +let Constraints = "$src = $dst", SchedRW = [WriteALU] in { +// GR32 = bswap GR32 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "bswap{l}\t$dst", @@ -890,60 +903,63 @@ def BSWAP32r : I<0xC8, AddRegFrm, def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src), "bswap{q}\t$dst", [(set GR64:$dst, (bswap GR64:$src))], IIC_BSWAP>, TB; -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW // Bit scan instructions. let Defs = [EFLAGS] in { def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "bsf{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))], - IIC_BSF>, TB, OpSize; + IIC_BSF>, TB, OpSize, Sched<[WriteShift]>; def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "bsf{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))], - IIC_BSF>, TB, OpSize; + IIC_BSF>, TB, OpSize, Sched<[WriteShiftLd]>; def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "bsf{l}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))], IIC_BSF>, TB; + [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))], IIC_BSF>, TB, + Sched<[WriteShift]>; def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "bsf{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))], - IIC_BSF>, TB; + IIC_BSF>, TB, Sched<[WriteShiftLd]>; def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "bsf{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))], - IIC_BSF>, TB; + IIC_BSF>, TB, Sched<[WriteShift]>; def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "bsf{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))], - IIC_BSF>, TB; + IIC_BSF>, TB, Sched<[WriteShiftLd]>; def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "bsr{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))], IIC_BSR>, - TB, OpSize; + TB, OpSize, Sched<[WriteShift]>; def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "bsr{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))], IIC_BSR>, TB, - OpSize; + OpSize, Sched<[WriteShiftLd]>; def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "bsr{l}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))], IIC_BSR>, TB; + [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))], IIC_BSR>, TB, + Sched<[WriteShift]>; def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "bsr{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))], - IIC_BSR>, TB; + IIC_BSR>, TB, Sched<[WriteShiftLd]>; def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "bsr{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BSR>, TB; + [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BSR>, TB, + Sched<[WriteShift]>; def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "bsr{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))], - IIC_BSR>, TB; + IIC_BSR>, TB, Sched<[WriteShiftLd]>; } // Defs = [EFLAGS] - +let SchedRW = [WriteMicrocoded] in { // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in { def MOVSB : I<0xA4, RawFrm, (outs), (ins), "movsb", [], IIC_MOVS>; @@ -971,12 +987,12 @@ def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmpsb", [], IIC_CMPS>; def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmpsw", [], IIC_CMPS>, OpSize; def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l|d}", [], IIC_CMPS>; def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", [], IIC_CMPS>; - +} // SchedRW //===----------------------------------------------------------------------===// // Move Instructions. // - +let SchedRW = [WriteMove] in { let neverHasSideEffects = 1 in { def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src), "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; @@ -987,6 +1003,7 @@ def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; } + let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src), "mov{b}\t{$src, $dst|$dst, $src}", @@ -1004,7 +1021,9 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>; } +} // SchedRW +let SchedRW = [WriteStore] in { def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(store (i8 imm:$src), addr:$dst)], IIC_MOV_MEM>; @@ -1017,9 +1036,11 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>; +} // SchedRW /// moffs8, moffs16 and moffs32 versions of moves. The immediate is a /// 32-bit offset from the PC. These are only valid in x86-32 mode. +let SchedRW = [WriteALU] in { def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src), "mov{b}\t{$src, %al|AL, $src}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; @@ -1038,6 +1059,7 @@ def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins), def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins), "mov{l}\t{%eax, $dst|$dst, EAX}", [], IIC_MOV_MEM>, Requires<[In32BitMode]>; +} // FIXME: These definitions are utterly broken // Just leave them commented out for now because they're useless outside @@ -1055,7 +1077,7 @@ def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), */ -let isCodeGenOnly = 1, hasSideEffects = 0 in { +let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in { def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src), "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), @@ -1066,7 +1088,7 @@ def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; } -let canFoldAsLoad = 1, isReMaterializable = 1 in { +let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in { def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(set GR8:$dst, (loadi8 addr:$src))], IIC_MOV_MEM>; @@ -1081,6 +1103,7 @@ def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), [(set GR64:$dst, (load addr:$src))], IIC_MOV_MEM>; } +let SchedRW = [WriteStore] in { def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(store GR8:$src, addr:$dst)], IIC_MOV_MEM>; @@ -1093,6 +1116,7 @@ def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store GR64:$src, addr:$dst)], IIC_MOV_MEM>; +} // SchedRW // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so // that they can be used for copying and storing h registers, which can't be @@ -1101,34 +1125,37 @@ let isCodeGenOnly = 1 in { let neverHasSideEffects = 1 in def MOV8rr_NOREX : I<0x88, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src), - "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>; + "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>, + Sched<[WriteMove]>; let mayStore = 1 in def MOV8mr_NOREX : I<0x88, MRMDestMem, (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], - IIC_MOV_MEM>; + IIC_MOV_MEM>, Sched<[WriteStore]>; let mayLoad = 1, neverHasSideEffects = 1, canFoldAsLoad = 1, isReMaterializable = 1 in def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], - IIC_MOV_MEM>; + IIC_MOV_MEM>, Sched<[WriteLoad]>; } // Condition code ops, incl. set if equal/not equal/... +let SchedRW = [WriteALU] in { let Defs = [EFLAGS], Uses = [AH] in def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", [(set EFLAGS, (X86sahf AH))], IIC_AHF>; let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", [], IIC_AHF>; // AH = flags - +} // SchedRW //===----------------------------------------------------------------------===// // Bit tests instructions: BT, BTS, BTR, BTC. let Defs = [EFLAGS] in { +let SchedRW = [WriteALU] in { def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))], IIC_BT_RR>, @@ -1139,13 +1166,14 @@ def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))], IIC_BT_RR>, TB; +} // SchedRW // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's // perspective, this is pretty bizarre. Make these instructions disassembly // only for now. -let mayLoad = 1, hasSideEffects = 0 in { +let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteALULd] in { def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi16 addr:$src1), GR16:$src2), @@ -1166,6 +1194,7 @@ let mayLoad = 1, hasSideEffects = 0 in { >, TB; } +let SchedRW = [WriteALU] in { def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))], @@ -1178,10 +1207,12 @@ def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))], IIC_BT_RI>, TB; +} // SchedRW // Note that these instructions don't need FastBTMem because that // only applies when the other operand is in a register. When it's // an immediate, bt is still fast. +let SchedRW = [WriteALU] in { def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2)) @@ -1194,8 +1225,10 @@ def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi64 addr:$src1), i64immSExt8:$src2))], IIC_BT_MI>, TB; +} // SchedRW let hasSideEffects = 0 in { +let SchedRW = [WriteALU] in { def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, OpSize, TB; @@ -1203,8 +1236,9 @@ def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, OpSize, TB; @@ -1214,6 +1248,7 @@ def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } +let SchedRW = [WriteALU] in { def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, OpSize, TB; @@ -1221,8 +1256,9 @@ def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, OpSize, TB; @@ -1232,6 +1268,7 @@ def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB; } +let SchedRW = [WriteALU] in { def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, OpSize, TB; @@ -1239,8 +1276,9 @@ def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, OpSize, TB; @@ -1250,6 +1288,7 @@ def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } +let SchedRW = [WriteALU] in { def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, OpSize, TB; @@ -1257,8 +1296,9 @@ def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, OpSize, TB; @@ -1268,6 +1308,7 @@ def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB; } +let SchedRW = [WriteALU] in { def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, OpSize, TB; @@ -1275,8 +1316,9 @@ def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, OpSize, TB; @@ -1286,6 +1328,7 @@ def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } +let SchedRW = [WriteALU] in { def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, OpSize, TB; @@ -1293,8 +1336,9 @@ def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2), "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, OpSize, TB; @@ -1315,7 +1359,7 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2), // operand is referenced, the atomicity is ensured. multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag, InstrItinClass itin> { - let Constraints = "$val = $dst" in { + let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in { def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), @@ -1350,6 +1394,7 @@ multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag, defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap", IIC_XCHG_MEM>; // Swap between registers. +let SchedRW = [WriteALU] in { let Constraints = "$val = $dst" in { def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src), "xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>; @@ -1374,9 +1419,9 @@ def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src), Requires<[In64BitMode]>; def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), "xchg{q}\t{$src, %rax|RAX, $src}", [], IIC_XCHG_REG>; +} // SchedRW - - +let SchedRW = [WriteALU] in { def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB; def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), @@ -1386,8 +1431,9 @@ def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB; def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB; +} // SchedRW -let mayLoad = 1, mayStore = 1 in { +let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB; def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), @@ -1400,6 +1446,7 @@ def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), } +let SchedRW = [WriteALU] in { def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), "cmpxchg{b}\t{$src, $dst|$dst, $src}", [], IIC_CMPXCHG_REG8>, TB; @@ -1412,7 +1459,9 @@ def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "cmpxchg{q}\t{$src, $dst|$dst, $src}", [], IIC_CMPXCHG_REG>, TB; +} // SchedRW +let SchedRW = [WriteALULd, WriteRMW] in { let mayLoad = 1, mayStore = 1 in { def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), "cmpxchg{b}\t{$src, $dst|$dst, $src}", [], @@ -1436,7 +1485,7 @@ let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst), "cmpxchg16b\t$dst", [], IIC_CMPXCHG_16B>, TB, Requires<[HasCmpxchg16b]>; - +} // SchedRW // Lock instruction prefix @@ -1459,17 +1508,21 @@ def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>; // String manipulation instructions +let SchedRW = [WriteMicrocoded] in { def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", [], IIC_LODS>; def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", [], IIC_LODS>, OpSize; def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", [], IIC_LODS>; def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", [], IIC_LODS>; +} +let SchedRW = [WriteSystem] in { def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", [], IIC_OUTS>; def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", [], IIC_OUTS>, OpSize; def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", [], IIC_OUTS>; - +} // Flag instructions +let SchedRW = [WriteALU] in { def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", [], IIC_CLC>; def STC : I<0xF9, RawFrm, (outs), (ins), "stc", [], IIC_STC>; def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", [], IIC_CLI>; @@ -1479,10 +1532,13 @@ def STD : I<0xFD, RawFrm, (outs), (ins), "std", [], IIC_STD>; def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", [], IIC_CMC>; def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", [], IIC_CLTS>, TB; +} // Table lookup instructions -def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", [], IIC_XLAT>; +def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", [], IIC_XLAT>, + Sched<[WriteLoad]>; +let SchedRW = [WriteMicrocoded] in { // ASCII Adjust After Addition // sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", [], IIC_AAA>, @@ -1512,7 +1568,9 @@ def DAA : I<0x27, RawFrm, (outs), (ins), "daa", [], IIC_DAA>, // sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS def DAS : I<0x2F, RawFrm, (outs), (ins), "das", [], IIC_DAS>, Requires<[In32BitMode]>; +} // SchedRW +let SchedRW = [WriteSystem] in { // Check Array Index Against Bounds def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize, @@ -1528,11 +1586,13 @@ def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_MEM>, Requires<[In32BitMode]>; +} // SchedRW //===----------------------------------------------------------------------===// // MOVBE Instructions // let Predicates = [HasMOVBE] in { + let SchedRW = [WriteALULd] in { def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "movbe{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>, @@ -1545,6 +1605,8 @@ let Predicates = [HasMOVBE] in { "movbe{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>, T8; + } + let SchedRW = [WriteStore] in { def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "movbe{w}\t{$src, $dst|$dst, $src}", [(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>, @@ -1557,6 +1619,7 @@ let Predicates = [HasMOVBE] in { "movbe{q}\t{$src, $dst|$dst, $src}", [(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>, T8; + } } //===----------------------------------------------------------------------===// @@ -1575,6 +1638,21 @@ let Predicates = [HasRDRAND], Defs = [EFLAGS] in { } //===----------------------------------------------------------------------===// +// RDSEED Instruction +// +let Predicates = [HasRDSEED], Defs = [EFLAGS] in { + def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), + "rdseed{w}\t$dst", + [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize, TB; + def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), + "rdseed{l}\t$dst", + [(set GR32:$dst, EFLAGS, (X86rdseed))]>, TB; + def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), + "rdseed{q}\t$dst", + [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB; +} + +//===----------------------------------------------------------------------===// // LZCNT Instruction // let Predicates = [HasLZCNT], Defs = [EFLAGS] in { @@ -1755,90 +1833,90 @@ include "X86InstrCompiler.td" // Assembler Mnemonic Aliases //===----------------------------------------------------------------------===// -def : MnemonicAlias<"call", "calll">, Requires<[In32BitMode]>; -def : MnemonicAlias<"call", "callq">, Requires<[In64BitMode]>; +def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>; -def : MnemonicAlias<"cbw", "cbtw">; -def : MnemonicAlias<"cwde", "cwtl">; -def : MnemonicAlias<"cwd", "cwtd">; -def : MnemonicAlias<"cdq", "cltd">; -def : MnemonicAlias<"cdqe", "cltq">; -def : MnemonicAlias<"cqo", "cqto">; +def : MnemonicAlias<"cbw", "cbtw", "att">; +def : MnemonicAlias<"cwde", "cwtl", "att">; +def : MnemonicAlias<"cwd", "cwtd", "att">; +def : MnemonicAlias<"cdq", "cltd", "att">; +def : MnemonicAlias<"cdqe", "cltq", "att">; +def : MnemonicAlias<"cqo", "cqto", "att">; // lret maps to lretl, it is not ambiguous with lretq. -def : MnemonicAlias<"lret", "lretl">; +def : MnemonicAlias<"lret", "lretl", "att">; -def : MnemonicAlias<"leavel", "leave">, Requires<[In32BitMode]>; -def : MnemonicAlias<"leaveq", "leave">, Requires<[In64BitMode]>; +def : MnemonicAlias<"leavel", "leave", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>; -def : MnemonicAlias<"loopz", "loope">; -def : MnemonicAlias<"loopnz", "loopne">; +def : MnemonicAlias<"loopz", "loope", "att">; +def : MnemonicAlias<"loopnz", "loopne", "att">; -def : MnemonicAlias<"pop", "popl">, Requires<[In32BitMode]>; -def : MnemonicAlias<"pop", "popq">, Requires<[In64BitMode]>; -def : MnemonicAlias<"popf", "popfl">, Requires<[In32BitMode]>; -def : MnemonicAlias<"popf", "popfq">, Requires<[In64BitMode]>; -def : MnemonicAlias<"popfd", "popfl">; +def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"popfd", "popfl", "att">; // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in // all modes. However: "push (addr)" and "push $42" should default to // pushl/pushq depending on the current mode. Similar for "pop %bx" -def : MnemonicAlias<"push", "pushl">, Requires<[In32BitMode]>; -def : MnemonicAlias<"push", "pushq">, Requires<[In64BitMode]>; -def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>; -def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>; -def : MnemonicAlias<"pushfd", "pushfl">; +def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"pushfd", "pushfl", "att">; -def : MnemonicAlias<"repe", "rep">; -def : MnemonicAlias<"repz", "rep">; -def : MnemonicAlias<"repnz", "repne">; +def : MnemonicAlias<"repe", "rep", "att">; +def : MnemonicAlias<"repz", "rep", "att">; +def : MnemonicAlias<"repnz", "repne", "att">; -def : MnemonicAlias<"retl", "ret">, Requires<[In32BitMode]>; -def : MnemonicAlias<"retq", "ret">, Requires<[In64BitMode]>; +def : MnemonicAlias<"retl", "ret", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"retq", "ret", "att">, Requires<[In64BitMode]>; -def : MnemonicAlias<"salb", "shlb">; -def : MnemonicAlias<"salw", "shlw">; -def : MnemonicAlias<"sall", "shll">; -def : MnemonicAlias<"salq", "shlq">; +def : MnemonicAlias<"salb", "shlb", "att">; +def : MnemonicAlias<"salw", "shlw", "att">; +def : MnemonicAlias<"sall", "shll", "att">; +def : MnemonicAlias<"salq", "shlq", "att">; -def : MnemonicAlias<"smovb", "movsb">; -def : MnemonicAlias<"smovw", "movsw">; -def : MnemonicAlias<"smovl", "movsl">; -def : MnemonicAlias<"smovq", "movsq">; +def : MnemonicAlias<"smovb", "movsb", "att">; +def : MnemonicAlias<"smovw", "movsw", "att">; +def : MnemonicAlias<"smovl", "movsl", "att">; +def : MnemonicAlias<"smovq", "movsq", "att">; -def : MnemonicAlias<"ud2a", "ud2">; -def : MnemonicAlias<"verrw", "verr">; +def : MnemonicAlias<"ud2a", "ud2", "att">; +def : MnemonicAlias<"verrw", "verr", "att">; // System instruction aliases. -def : MnemonicAlias<"iret", "iretl">; -def : MnemonicAlias<"sysret", "sysretl">; -def : MnemonicAlias<"sysexit", "sysexitl">; +def : MnemonicAlias<"iret", "iretl", "att">; +def : MnemonicAlias<"sysret", "sysretl", "att">; +def : MnemonicAlias<"sysexit", "sysexitl", "att">; -def : MnemonicAlias<"lgdtl", "lgdt">, Requires<[In32BitMode]>; -def : MnemonicAlias<"lgdtq", "lgdt">, Requires<[In64BitMode]>; -def : MnemonicAlias<"lidtl", "lidt">, Requires<[In32BitMode]>; -def : MnemonicAlias<"lidtq", "lidt">, Requires<[In64BitMode]>; -def : MnemonicAlias<"sgdtl", "sgdt">, Requires<[In32BitMode]>; -def : MnemonicAlias<"sgdtq", "sgdt">, Requires<[In64BitMode]>; -def : MnemonicAlias<"sidtl", "sidt">, Requires<[In32BitMode]>; -def : MnemonicAlias<"sidtq", "sidt">, Requires<[In64BitMode]>; +def : MnemonicAlias<"lgdtl", "lgdt", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"lgdtq", "lgdt", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"lidtl", "lidt", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"lidtq", "lidt", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"sgdtl", "sgdt", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"sgdtq", "sgdt", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"sidtl", "sidt", "att">, Requires<[In32BitMode]>; +def : MnemonicAlias<"sidtq", "sidt", "att">, Requires<[In64BitMode]>; // Floating point stack aliases. -def : MnemonicAlias<"fcmovz", "fcmove">; -def : MnemonicAlias<"fcmova", "fcmovnbe">; -def : MnemonicAlias<"fcmovnae", "fcmovb">; -def : MnemonicAlias<"fcmovna", "fcmovbe">; -def : MnemonicAlias<"fcmovae", "fcmovnb">; -def : MnemonicAlias<"fcomip", "fcompi">; -def : MnemonicAlias<"fildq", "fildll">; -def : MnemonicAlias<"fistpq", "fistpll">; -def : MnemonicAlias<"fisttpq", "fisttpll">; -def : MnemonicAlias<"fldcww", "fldcw">; -def : MnemonicAlias<"fnstcww", "fnstcw">; -def : MnemonicAlias<"fnstsww", "fnstsw">; -def : MnemonicAlias<"fucomip", "fucompi">; -def : MnemonicAlias<"fwait", "wait">; +def : MnemonicAlias<"fcmovz", "fcmove", "att">; +def : MnemonicAlias<"fcmova", "fcmovnbe", "att">; +def : MnemonicAlias<"fcmovnae", "fcmovb", "att">; +def : MnemonicAlias<"fcmovna", "fcmovbe", "att">; +def : MnemonicAlias<"fcmovae", "fcmovnb", "att">; +def : MnemonicAlias<"fcomip", "fcompi", "att">; +def : MnemonicAlias<"fildq", "fildll", "att">; +def : MnemonicAlias<"fistpq", "fistpll", "att">; +def : MnemonicAlias<"fisttpq", "fisttpll", "att">; +def : MnemonicAlias<"fldcww", "fldcw", "att">; +def : MnemonicAlias<"fnstcww", "fnstcw", "att">; +def : MnemonicAlias<"fnstsww", "fnstsw", "att">; +def : MnemonicAlias<"fucomip", "fucompi", "att">; +def : MnemonicAlias<"fwait", "wait", "att">; class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond> diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 127af6f..49721df 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -20,6 +20,7 @@ // MMX Multiclasses //===----------------------------------------------------------------------===// +let Sched = WriteVecALU in { def MMX_INTALU_ITINS : OpndItins< IIC_MMX_ALU_RR, IIC_MMX_ALU_RM >; @@ -35,11 +36,14 @@ def MMX_PHADDSUBW : OpndItins< def MMX_PHADDSUBD : OpndItins< IIC_MMX_PHADDSUBD_RR, IIC_MMX_PHADDSUBD_RM >; +} +let Sched = WriteVecIMul in def MMX_PMUL_ITINS : OpndItins< IIC_MMX_PMUL, IIC_MMX_PMUL >; +let Sched = WriteVecALU in { def MMX_PSADBW_ITINS : OpndItins< IIC_MMX_PSADBW, IIC_MMX_PSADBW >; @@ -47,11 +51,13 @@ def MMX_PSADBW_ITINS : OpndItins< def MMX_MISC_FUNC_ITINS : OpndItins< IIC_MMX_MISC_FUNC_MEM, IIC_MMX_MISC_FUNC_REG >; +} def MMX_SHIFT_ITINS : ShiftOpndItins< IIC_MMX_SHIFT_RR, IIC_MMX_SHIFT_RM, IIC_MMX_SHIFT_RI >; +let Sched = WriteShuffle in { def MMX_UNPCK_H_ITINS : OpndItins< IIC_MMX_UNPCK_H_RR, IIC_MMX_UNPCK_H_RM >; @@ -67,7 +73,9 @@ def MMX_PCK_ITINS : OpndItins< def MMX_PSHUF_ITINS : OpndItins< IIC_MMX_PSHUF, IIC_MMX_PSHUF >; +} // Sched +let Sched = WriteCvtF2I in { def MMX_CVT_PD_ITINS : OpndItins< IIC_MMX_CVT_PD_RR, IIC_MMX_CVT_PD_RM >; @@ -75,6 +83,7 @@ def MMX_CVT_PD_ITINS : OpndItins< def MMX_CVT_PS_ITINS : OpndItins< IIC_MMX_CVT_PS_RR, IIC_MMX_CVT_PS_RM >; +} let Constraints = "$src1 = $dst" in { // MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic. @@ -84,7 +93,8 @@ let Constraints = "$src1 = $dst" in { def irr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr> { + [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr>, + Sched<[itins.Sched]> { let isCommutable = Commutable; } def irm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), @@ -92,7 +102,7 @@ let Constraints = "$src1 = $dst" in { !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, (bitconvert (load_mmx addr:$src2))))], - itins.rm>; + itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>; } multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, @@ -101,17 +111,19 @@ let Constraints = "$src1 = $dst" in { def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr>; + [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr>, + Sched<[WriteVecShift]>; def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, (bitconvert (load_mmx addr:$src2))))], - itins.rm>; + itins.rm>, Sched<[WriteVecShiftLd, ReadAfterLd]>; def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst), (ins VR64:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))], itins.ri>; + [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))], itins.ri>, + Sched<[WriteVecShift]>; } } @@ -120,13 +132,14 @@ multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr, Intrinsic IntId64, OpndItins itins> { def rr64 : MMXSS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), - [(set VR64:$dst, (IntId64 VR64:$src))], itins.rr>; + [(set VR64:$dst, (IntId64 VR64:$src))], itins.rr>, + Sched<[itins.Sched]>; def rm64 : MMXSS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR64:$dst, (IntId64 (bitconvert (memopmmx addr:$src))))], - itins.rm>; + itins.rm>, Sched<[itins.Sched.Folded]>; } /// Binary MMX instructions requiring SSSE3. @@ -137,13 +150,15 @@ multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr, def rr64 : MMXSS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))], itins.rr>; + [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))], itins.rr>, + Sched<[itins.Sched]>; def rm64 : MMXSS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId64 VR64:$src1, - (bitconvert (memopmmx addr:$src2))))], itins.rm>; + (bitconvert (memopmmx addr:$src2))))], itins.rm>, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } } @@ -164,9 +179,11 @@ multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag, string asm, OpndItins itins, Domain d> { def irr : MMXPI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm, - [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr, d>; + [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr, d>, + Sched<[itins.Sched]>; def irm : MMXPI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm, - [(set DstRC:$dst, (Int (ld_frag addr:$src)))], itins.rm, d>; + [(set DstRC:$dst, (Int (ld_frag addr:$src)))], itins.rm, d>, + Sched<[itins.Sched.Folded]>; } multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC, @@ -174,11 +191,11 @@ multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC, PatFrag ld_frag, string asm, Domain d> { def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst),(ins DstRC:$src1, SrcRC:$src2), asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], - IIC_DEFAULT, d>; + NoItinerary, d>; def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins DstRC:$src1, x86memop:$src2), asm, [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], - IIC_DEFAULT, d>; + NoItinerary, d>; } //===----------------------------------------------------------------------===// @@ -197,16 +214,17 @@ def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (x86mmx (scalar_to_vector GR32:$src)))], - IIC_MMX_MOV_MM_RM>; + IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>; let canFoldAsLoad = 1 in def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (x86mmx (scalar_to_vector (loadi32 addr:$src))))], - IIC_MMX_MOV_MM_RM>; + IIC_MMX_MOV_MM_RM>, Sched<[WriteLoad]>; let mayStore = 1 in def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), - "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>; + "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>, + Sched<[WriteStore]>; // Low word of MMX to GPR. def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1, @@ -214,16 +232,18 @@ def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1, def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, - (MMX_X86movd2w (x86mmx VR64:$src)))], IIC_MMX_MOV_REG_MM>; + (MMX_X86movd2w (x86mmx VR64:$src)))], + IIC_MMX_MOV_REG_MM>, Sched<[WriteMove]>; let neverHasSideEffects = 1 in def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), "movd\t{$src, $dst|$dst, $src}", - [], IIC_MMX_MOV_MM_RM>; + [], IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>; // These are 64 bit moves, but since the OS X assembler doesn't // recognize a register-register movq, we write them as // movd. +let SchedRW = [WriteMove] in { def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR64:$src), "movd\t{$src, $dst|$dst, $src}", @@ -237,6 +257,9 @@ let neverHasSideEffects = 1 in def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), "movq\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOVQ_RR>; +} // SchedRW + +let SchedRW = [WriteLoad] in { let canFoldAsLoad = 1 in def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), "movq\t{$src, $dst|$dst, $src}", @@ -246,7 +269,9 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), "movq\t{$src, $dst|$dst, $src}", [(store (x86mmx VR64:$src), addr:$dst)], IIC_MMX_MOVQ_RM>; +} // SchedRW +let SchedRW = [WriteMove] in { def MMX_MOVDQ2Qrr : MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}", [(set VR64:$dst, @@ -271,11 +296,12 @@ def MMX_MOVQ2FR64rr: MMXS2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst), def MMX_MOVFR642Qrr: MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOVQ_RR>; +} // SchedRW def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), "movntq\t{$src, $dst|$dst, $src}", [(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)], - IIC_MMX_MOVQ_RM>; + IIC_MMX_MOVQ_RM>, Sched<[WriteStore]>; let AddedComplexity = 15 in // movd to MMX register zero-extends @@ -283,7 +309,7 @@ def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))], - IIC_MMX_MOV_MM_RM>; + IIC_MMX_MOV_MM_RM>, Sched<[WriteMove]>; let AddedComplexity = 20 in def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), @@ -291,7 +317,7 @@ def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), [(set VR64:$dst, (x86mmx (X86vzmovl (x86mmx (scalar_to_vector (loadi32 addr:$src))))))], - IIC_MMX_MOV_MM_RM>; + IIC_MMX_MOV_MM_RM>, Sched<[WriteLoad]>; // Arithmetic Instructions defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b, @@ -491,14 +517,14 @@ def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg, "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR64:$dst, (int_x86_sse_pshuf_w VR64:$src1, imm:$src2))], - IIC_MMX_PSHUF>; + IIC_MMX_PSHUF>, Sched<[WriteShuffle]>; def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2), "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR64:$dst, (int_x86_sse_pshuf_w (load_mmx addr:$src1), imm:$src2))], - IIC_MMX_PSHUF>; + IIC_MMX_PSHUF>, Sched<[WriteShuffleLd]>; @@ -532,7 +558,7 @@ def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg, "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1, (iPTR imm:$src2)))], - IIC_MMX_PEXTR>; + IIC_MMX_PEXTR>, Sched<[WriteShuffle]>; let Constraints = "$src1 = $dst" in { def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg, (outs VR64:$dst), @@ -540,7 +566,7 @@ let Constraints = "$src1 = $dst" in { "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1, GR32:$src2, (iPTR imm:$src3)))], - IIC_MMX_PINSRW>; + IIC_MMX_PINSRW>, Sched<[WriteShuffle]>; def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem, (outs VR64:$dst), @@ -549,7 +575,7 @@ let Constraints = "$src1 = $dst" in { [(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1, (i32 (anyext (loadi16 addr:$src2))), (iPTR imm:$src3)))], - IIC_MMX_PINSRW>; + IIC_MMX_PINSRW>, Sched<[WriteShuffleLd, ReadAfterLd]>; } // Mask creation @@ -570,6 +596,7 @@ def : Pat<(x86mmx (MMX_X86movdq2q (loadv2i64 addr:$src))), (x86mmx (MMX_MOVQ64rm addr:$src))>; // Misc. +let SchedRW = [WriteShuffle] in { let Uses = [EDI] in def MMX_MASKMOVQ : MMXI<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask), "maskmovq\t{$mask, $src|$src, $mask}", @@ -580,6 +607,7 @@ def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask), "maskmovq\t{$mask, $src|$src, $mask}", [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)], IIC_MMX_MASKMOV>; +} // 64-bit bit convert. let Predicates = [HasSSE2] in { diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 105963f..cce938b 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -35,6 +35,7 @@ class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm, // scalar +let Sched = WriteFAdd in { def SSE_ALU_F32S : OpndItins< IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM >; @@ -42,6 +43,7 @@ def SSE_ALU_F32S : OpndItins< def SSE_ALU_F64S : OpndItins< IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM >; +} def SSE_ALU_ITINS_S : SizeItins< SSE_ALU_F32S, SSE_ALU_F64S @@ -76,6 +78,7 @@ def SSE_DIV_ITINS_S : SizeItins< >; // parallel +let Sched = WriteFAdd in { def SSE_ALU_F32P : OpndItins< IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM >; @@ -83,6 +86,7 @@ def SSE_ALU_F32P : OpndItins< def SSE_ALU_F64P : OpndItins< IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM >; +} def SSE_ALU_ITINS_P : SizeItins< SSE_ALU_F32P, SSE_ALU_F64P @@ -184,14 +188,16 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC, !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (!cast<Intrinsic>( !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr)) - RC:$src1, RC:$src2))], itins.rr>; + RC:$src1, RC:$src2))], itins.rr>, + Sched<[itins.Sched]>; def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2), !if(Is2Addr, !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr)) - RC:$src1, mem_cpat:$src2))], itins.rm>; + RC:$src1, mem_cpat:$src2))], itins.rm>, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } /// sse12_fp_packed - SSE 1 & 2 packed instructions class @@ -226,13 +232,13 @@ multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d, !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - pat_rr, IIC_DEFAULT, d>, + pat_rr, NoItinerary, d>, Sched<[WriteVecLogic]>; def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - pat_rm, IIC_DEFAULT, d>, + pat_rm, NoItinerary, d>, Sched<[WriteVecLogicLd, ReadAfterLd]>; } @@ -364,7 +370,7 @@ let Predicates = [HasAVX] in { // Alias instructions that map fld0 to xorps for sse or vxorps for avx. // This is expanded by ExpandPostRAPseudos. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1 in { + isPseudo = 1, SchedRW = [WriteZero] in { def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "", [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>; def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "", @@ -381,7 +387,7 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1 in { + isPseudo = 1, SchedRW = [WriteZero] in { def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", [(set VR128:$dst, (v4f32 immAllZerosV))]>; } @@ -398,7 +404,7 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0)>; // at the rename stage without using any execution unit, so SET0PSY // and SET0PDY can be used for vector int instructions without penalty let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1, Predicates = [HasAVX] in { + isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in { def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "", [(set VR256:$dst, (v8f32 immAllZerosV))]>; } @@ -436,7 +442,7 @@ def : Pat<(bc_v4i64 (v8f32 immAllZerosV)), // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-ones value if folding it would be beneficial. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1 in { + isPseudo = 1, SchedRW = [WriteZero] in { def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "", [(set VR128:$dst, (v4i32 immAllOnesV))]>; let Predicates = [HasAVX2] in @@ -470,7 +476,7 @@ multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt, def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), !strconcat(base_opc, asm_opr), - [], IIC_SSE_MOV_S_RR>; + [], IIC_SSE_MOV_S_RR>, Sched<[WriteMove]>; } multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt, @@ -848,7 +854,7 @@ def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), } // SchedRW // For disassembler -let isCodeGenOnly = 1, hasSideEffects = 0 in { +let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in { def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), "movaps\t{$src, $dst|$dst, $src}", [], @@ -924,7 +930,7 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), } // SchedRW // For disassembler -let isCodeGenOnly = 1, hasSideEffects = 0 in { +let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in { def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), "movaps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>; @@ -1070,7 +1076,7 @@ let Predicates = [UseSSE1] in { // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper // bits are disregarded. FIXME: Set encoding to pseudo! -let neverHasSideEffects = 1 in { +let neverHasSideEffects = 1, SchedRW = [WriteMove] in { def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), "movaps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>, VEX; @@ -1087,7 +1093,7 @@ def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper // bits are disregarded. FIXME: Set encoding to pseudo! -let canFoldAsLoad = 1, isReMaterializable = 1 in { +let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in { let isCodeGenOnly = 1 in { def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), "movaps\t{$src, $dst|$dst, $src}", @@ -1436,11 +1442,13 @@ multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, X86MemOperand x86memop, string asm> { let neverHasSideEffects = 1 in { def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src), - !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>; + !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>, + Sched<[WriteCvtI2F]>; let mayLoad = 1 in def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins DstRC:$src1, x86memop:$src), - !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>; + !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>, + Sched<[WriteCvtI2FLd, ReadAfterLd]>; } // neverHasSideEffects = 1 } @@ -1740,13 +1748,15 @@ let neverHasSideEffects = 1 in { def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src1, FR64:$src2), "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], - IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG; + IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG, + Sched<[WriteCvtF2F]>; let mayLoad = 1 in def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins FR64:$src1, f64mem:$src2), "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], IIC_SSE_CVT_Scalar_RM>, - XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG; + XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; } def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>, @@ -1755,26 +1765,28 @@ def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>, def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround FR64:$src))], - IIC_SSE_CVT_Scalar_RR>; + IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>; def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround (loadf64 addr:$src)))], IIC_SSE_CVT_Scalar_RM>, XD, - Requires<[UseSSE2, OptForSize]>; + Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>; def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>; + IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>, + Sched<[WriteCvtF2F]>; def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2), "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, sse_load_f64:$src2))], - IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>; + IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; let Constraints = "$src1 = $dst" in { def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg, @@ -1782,13 +1794,15 @@ def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg, "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>; + IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>, + Sched<[WriteCvtF2F]>; def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2), "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, sse_load_f64:$src2))], - IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>; + IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; } // Convert scalar single to scalar double @@ -1798,13 +1812,15 @@ def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src1, FR32:$src2), "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], IIC_SSE_CVT_Scalar_RR>, - XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG; + XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG, + Sched<[WriteCvtF2F]>; let mayLoad = 1 in def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins FR32:$src1, f32mem:$src2), "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], IIC_SSE_CVT_Scalar_RM>, - XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>; + XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; } def : Pat<(f64 (fextend FR32:$src)), @@ -1823,12 +1839,12 @@ def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (fextend FR32:$src))], IIC_SSE_CVT_Scalar_RR>, XS, - Requires<[UseSSE2]>; + Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>; def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (extloadf32 addr:$src))], IIC_SSE_CVT_Scalar_RM>, XS, - Requires<[UseSSE2, OptForSize]>; + Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>; // extload f32 -> f64. This matches load+fextend because we have a hack in // the isel (PreprocessForFPConvert) that can introduce loads after dag @@ -1845,57 +1861,61 @@ def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg, "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>; + IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>, + Sched<[WriteCvtF2F]>; def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2), "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))], - IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>; + IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>; + IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>, + Sched<[WriteCvtF2F]>; def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2), "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))], - IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>; + IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>, + Sched<[WriteCvtF2FLd, ReadAfterLd]>; } // Convert packed single/double fp to doubleword def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))], - IIC_SSE_CVT_PS_RR>, VEX; + IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>; def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX; + IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>; def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2dq_256 VR256:$src))], - IIC_SSE_CVT_PS_RR>, VEX, VEX_L; + IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>; def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX, VEX_L; + IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>; def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))], - IIC_SSE_CVT_PS_RR>; + IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>; def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>; + IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>; // Convert Packed Double FP to Packed DW Integers @@ -1906,7 +1926,7 @@ let Predicates = [HasAVX] in { def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vcvtpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>, - VEX; + VEX, Sched<[WriteCvtF2I]>; // XMM only def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}", @@ -1914,18 +1934,20 @@ def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}", def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "vcvtpd2dqx\t{$src, $dst|$dst, $src}", [(set VR128:$dst, - (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX; + (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX, + Sched<[WriteCvtF2ILd]>; // YMM only def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, - (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L; + (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L, + Sched<[WriteCvtF2I]>; def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>, - VEX, VEX_L; + VEX, VEX_L, Sched<[WriteCvtF2ILd]>; def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}", (VCVTPD2DQYrr VR128:$dst, VR256:$src)>; } @@ -1934,11 +1956,11 @@ def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvtpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>; + IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>; def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))], - IIC_SSE_CVT_PD_RR>; + IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>; // Convert with truncation packed single/double fp to doubleword // SSE2 packed instructions with XS prefix @@ -1946,32 +1968,33 @@ def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))], - IIC_SSE_CVT_PS_RR>, VEX; + IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>; def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX; + IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>; def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256 VR256:$src))], - IIC_SSE_CVT_PS_RR>, VEX, VEX_L; + IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>; def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX, VEX_L; + IIC_SSE_CVT_PS_RM>, VEX, VEX_L, + Sched<[WriteCvtF2ILd]>; def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))], - IIC_SSE_CVT_PS_RR>; + IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>; def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>; + IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>; let Predicates = [HasAVX] in { def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))), @@ -2021,7 +2044,7 @@ def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))], - IIC_SSE_CVT_PD_RR>, VEX; + IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>; // The assembler can recognize rr 256-bit instructions by seeing a ymm // register, but the same isn't true when using memory operands instead. @@ -2034,19 +2057,19 @@ def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvttpd2dqx\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq (memopv2f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>, VEX; + IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>; // YMM only def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "cvttpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvtt_pd2dq_256 VR256:$src))], - IIC_SSE_CVT_PD_RR>, VEX, VEX_L; + IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>; def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "cvttpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>, VEX, VEX_L; + IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>; def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}", (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>; @@ -2060,12 +2083,13 @@ let Predicates = [HasAVX] in { def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))], - IIC_SSE_CVT_PD_RR>; + IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>; def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src), "cvttpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq (memopv2f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>; + IIC_SSE_CVT_PD_RM>, + Sched<[WriteCvtF2ILd]>; // Convert packed single to packed double let Predicates = [HasAVX] in { @@ -2073,32 +2097,32 @@ let Predicates = [HasAVX] in { def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))], - IIC_SSE_CVT_PD_RR>, TB, VEX; + IIC_SSE_CVT_PD_RR>, TB, VEX, Sched<[WriteCvtF2F]>; def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))], - IIC_SSE_CVT_PD_RM>, TB, VEX; + IIC_SSE_CVT_PD_RM>, TB, VEX, Sched<[WriteCvtF2FLd]>; def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2_pd_256 VR128:$src))], - IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L; + IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L, Sched<[WriteCvtF2F]>; def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L; + IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L, Sched<[WriteCvtF2FLd]>; } let Predicates = [UseSSE2] in { def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))], - IIC_SSE_CVT_PD_RR>, TB; + IIC_SSE_CVT_PD_RR>, TB, Sched<[WriteCvtF2F]>; def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))], - IIC_SSE_CVT_PD_RM>, TB; + IIC_SSE_CVT_PD_RM>, TB, Sched<[WriteCvtF2FLd]>; } // Convert Packed DW Integers to Packed Double FP @@ -2106,30 +2130,33 @@ let Predicates = [HasAVX] in { let neverHasSideEffects = 1, mayLoad = 1 in def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", - []>, VEX; + []>, VEX, Sched<[WriteCvtI2FLd]>; def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, - (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX; + (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX, + Sched<[WriteCvtI2F]>; def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtdq2_pd_256 - (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L; + (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L, + Sched<[WriteCvtI2FLd]>; def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, - (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L; + (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L, + Sched<[WriteCvtI2F]>; } let neverHasSideEffects = 1, mayLoad = 1 in def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "cvtdq2pd\t{$src, $dst|$dst, $src}", [], - IIC_SSE_CVT_PD_RR>; + IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>; def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))], - IIC_SSE_CVT_PD_RM>; + IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>; // AVX 256-bit register conversion intrinsics let Predicates = [HasAVX] in { @@ -2146,7 +2173,7 @@ let Predicates = [HasAVX] in { def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))], - IIC_SSE_CVT_PD_RR>, VEX; + IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>; // XMM only def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}", @@ -2155,31 +2182,31 @@ def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvtpd2psx\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>, VEX; + IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>; // YMM only def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "cvtpd2ps{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvt_pd2_ps_256 VR256:$src))], - IIC_SSE_CVT_PD_RR>, VEX, VEX_L; + IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>; def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "cvtpd2ps{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>, VEX, VEX_L; + IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>; def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}", (VCVTPD2PSYrr VR128:$dst, VR256:$src)>; def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))], - IIC_SSE_CVT_PD_RR>; + IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>; def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))], - IIC_SSE_CVT_PD_RM>; + IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>; // AVX 256-bit register conversion intrinsics @@ -2244,11 +2271,12 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop, let neverHasSideEffects = 1 in { def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [], - IIC_SSE_ALU_F32S_RR>; + IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>; let mayLoad = 1 in def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [], - IIC_SSE_ALU_F32S_RM>; + IIC_SSE_ALU_F32S_RM>, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } } @@ -2394,10 +2422,11 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop, let neverHasSideEffects = 1 in { def rri_alt : PIi8<0xC2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc), - asm_alt, [], IIC_SSE_CMPP_RR, d>; + asm_alt, [], IIC_SSE_CMPP_RR, d>, Sched<[WriteFAdd]>; def rmi_alt : PIi8<0xC2, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc), - asm_alt, [], IIC_SSE_CMPP_RM, d>; + asm_alt, [], IIC_SSE_CMPP_RM, d>, + Sched<[WriteFAddLd, ReadAfterLd]>; } } @@ -2694,18 +2723,18 @@ let Predicates = [HasAVX] in { // Assembler Only def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, - SSEPackedSingle>, TB, VEX; + SSEPackedSingle>, TB, VEX, Sched<[WriteVecLogic]>; def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, SSEPackedDouble>, TB, - OpSize, VEX; + OpSize, VEX, Sched<[WriteVecLogic]>; def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, - SSEPackedSingle>, TB, VEX, VEX_L; + SSEPackedSingle>, TB, VEX, VEX_L, Sched<[WriteVecLogic]>; def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, SSEPackedDouble>, TB, - OpSize, VEX, VEX_L; + OpSize, VEX, VEX_L, Sched<[WriteVecLogic]>; } defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps", @@ -3458,7 +3487,7 @@ def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst), //===----------------------------------------------------------------------===// // Prefetch intrinsic. -let Predicates = [HasSSE1] in { +let Predicates = [HasSSE1], SchedRW = [WriteLoad] in { def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))], IIC_SSE_PREFETCH>, TB; @@ -3473,6 +3502,8 @@ def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src), IIC_SSE_PREFETCH>, TB; } +// FIXME: How should these memory instructions be modeled? +let SchedRW = [WriteLoad] in { // Flush cache def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src), "clflush\t$src", [(int_x86_sse2_clflush addr:$src)], @@ -3492,6 +3523,7 @@ def LFENCE : I<0xAE, MRM_E8, (outs), (ins), def MFENCE : I<0xAE, MRM_F0, (outs), (ins), "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>, TB, Requires<[HasSSE2]>; +} // SchedRW def : Pat<(X86SFence), (SFENCE)>; def : Pat<(X86LFence), (LFENCE)>; @@ -3503,17 +3535,17 @@ def : Pat<(X86MFence), (MFENCE)>; def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src), "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)], - IIC_SSE_LDMXCSR>, VEX; + IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>; def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)], - IIC_SSE_STMXCSR>, VEX; + IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>; def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src), "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)], - IIC_SSE_LDMXCSR>; + IIC_SSE_LDMXCSR>, Sched<[WriteLoad]>; def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)], - IIC_SSE_STMXCSR>; + IIC_SSE_STMXCSR>, Sched<[WriteStore]>; //===---------------------------------------------------------------------===// // SSE2 - Move Aligned/Unaligned Packed Integer Instructions @@ -4430,12 +4462,12 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), // Move Packed Doubleword Int first element to Doubleword Int // let SchedRW = [WriteMove] in { -def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), - "vmov{d|q}\t{$src, $dst|$dst, $src}", +def VMOVPQIto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), + "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (vector_extract (v2i64 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>, - TB, OpSize, VEX, VEX_W, Requires<[HasAVX, In64BitMode]>; + VEX; def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", @@ -4480,23 +4512,24 @@ def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bitconvert FR32:$src))], - IIC_SSE_MOVD_ToGP>, VEX; + IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>; def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (bitconvert FR32:$src)), addr:$dst)], - IIC_SSE_MOVDQ>, VEX; + IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>; def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bitconvert FR32:$src))], - IIC_SSE_MOVD_ToGP>; + IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>; def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), "movd\t{$src, $dst|$dst, $src}", [(store (i32 (bitconvert FR32:$src)), addr:$dst)], - IIC_SSE_MOVDQ>; + IIC_SSE_MOVDQ>, Sched<[WriteStore]>; //===---------------------------------------------------------------------===// // Patterns and instructions to describe movd/movq to XMM register zero-extends // +let SchedRW = [WriteMove] in { let AddedComplexity = 15 in { def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", @@ -4522,8 +4555,9 @@ def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), (v2i64 (scalar_to_vector GR64:$src)))))], IIC_SSE_MOVDQ>; } +} // SchedRW -let AddedComplexity = 20 in { +let AddedComplexity = 20, SchedRW = [WriteLoad] in { def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, @@ -4536,7 +4570,7 @@ def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), (v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))))], IIC_SSE_MOVDQ>; -} +} // AddedComplexity, SchedRW let Predicates = [HasAVX] in { // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part. @@ -4585,6 +4619,8 @@ def : InstAlias<"movq\t{$src, $dst|$dst, $src}", //===---------------------------------------------------------------------===// // Move Quadword Int to Packed Quadword Int // + +let SchedRW = [WriteLoad] in { def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "vmovq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, @@ -4596,10 +4632,12 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), (v2i64 (scalar_to_vector (loadi64 addr:$src))))], IIC_SSE_MOVDQ>, XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix +} // SchedRW //===---------------------------------------------------------------------===// // Move Packed Quadword Int to Quadword Int // +let SchedRW = [WriteStore] in { def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(store (i64 (vector_extract (v2i64 VR128:$src), @@ -4610,17 +4648,19 @@ def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), [(store (i64 (vector_extract (v2i64 VR128:$src), (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>; +} // SchedRW //===---------------------------------------------------------------------===// // Store / copy lower 64-bits of a XMM register. // def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", - [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX; + [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX, + Sched<[WriteStore]>; def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)], - IIC_SSE_MOVDQ>; + IIC_SSE_MOVDQ>, Sched<[WriteStore]>; let AddedComplexity = 20 in def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), @@ -4629,7 +4669,7 @@ def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), (v2i64 (X86vzmovl (v2i64 (scalar_to_vector (loadi64 addr:$src))))))], IIC_SSE_MOVDQ>, - XS, VEX, Requires<[HasAVX]>; + XS, VEX, Requires<[HasAVX]>, Sched<[WriteLoad]>; let AddedComplexity = 20 in def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), @@ -4638,7 +4678,7 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), (v2i64 (X86vzmovl (v2i64 (scalar_to_vector (loadi64 addr:$src))))))], IIC_SSE_MOVDQ>, - XS, Requires<[UseSSE2]>; + XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>; let Predicates = [HasAVX], AddedComplexity = 20 in { def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))), @@ -4668,6 +4708,7 @@ def : Pat<(v4i64 (X86vzload addr:$src)), // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in // IA32 document. movq xmm1, xmm2 does clear the high bits. // +let SchedRW = [WriteVecLogic] in { let AddedComplexity = 15 in def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vmovq\t{$src, $dst|$dst, $src}", @@ -4680,7 +4721,9 @@ def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))], IIC_SSE_MOVQ_RR>, XS, Requires<[UseSSE2]>; +} // SchedRW +let SchedRW = [WriteVecLogicLd] in { let AddedComplexity = 20 in def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "vmovq\t{$src, $dst|$dst, $src}", @@ -4696,6 +4739,7 @@ def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), IIC_SSE_MOVDQ>, XS, Requires<[UseSSE2]>; } +} // SchedRW let AddedComplexity = 20 in { let Predicates = [HasAVX] in { @@ -4713,6 +4757,7 @@ let AddedComplexity = 20 in { } // Instructions to match in the assembler +let SchedRW = [WriteMove] in { def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, VEX, VEX_W; @@ -4723,16 +4768,19 @@ def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "movd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, VEX, VEX_W; +} // SchedRW // Instructions for the disassembler // xr = XMM register // xm = mem64 +let SchedRW = [WriteMove] in { let Predicates = [HasAVX] in def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS; def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS; +} // SchedRW //===---------------------------------------------------------------------===// // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP @@ -4743,11 +4791,11 @@ multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr, def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set RC:$dst, (vt (OpNode RC:$src)))], - IIC_SSE_MOV_LH>; + IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>; def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set RC:$dst, (OpNode (mem_frag addr:$src)))], - IIC_SSE_MOV_LH>; + IIC_SSE_MOV_LH>, Sched<[WriteShuffleLd]>; } let Predicates = [HasAVX] in { @@ -4803,25 +4851,27 @@ multiclass sse3_replicate_dfp<string OpcodeStr> { let neverHasSideEffects = 1 in def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), - [], IIC_SSE_MOV_LH>; + [], IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>; def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR128:$dst, (v2f64 (X86Movddup (scalar_to_vector (loadf64 addr:$src)))))], - IIC_SSE_MOV_LH>; + IIC_SSE_MOV_LH>, Sched<[WriteShuffleLd]>; } // FIXME: Merge with above classe when there're patterns for the ymm version multiclass sse3_replicate_dfp_y<string OpcodeStr> { def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), - [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>; + [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>, + Sched<[WriteShuffle]>; def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR256:$dst, (v4f64 (X86Movddup - (scalar_to_vector (loadf64 addr:$src)))))]>; + (scalar_to_vector (loadf64 addr:$src)))))]>, + Sched<[WriteShuffleLd]>; } let Predicates = [HasAVX] in { @@ -4869,6 +4919,7 @@ let Predicates = [UseSSE3] in { // SSE3 - Move Unaligned Integer //===---------------------------------------------------------------------===// +let SchedRW = [WriteLoad] in { let Predicates = [HasAVX] in { def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "vlddqu\t{$src, $dst|$dst, $src}", @@ -4882,6 +4933,7 @@ def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "lddqu\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))], IIC_SSE_LDDQU>; +} //===---------------------------------------------------------------------===// // SSE3 - Arithmetic @@ -4895,13 +4947,15 @@ multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC, !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>; + [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>, + Sched<[itins.Sched]>; def rm : I<0xD0, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>; + [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } let Predicates = [HasAVX] in { @@ -4938,14 +4992,15 @@ multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC, !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>; + [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>, + Sched<[WriteFAdd]>; def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))], - IIC_SSE_HADDSUB_RM>; + IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>; } multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC, X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> { @@ -4953,14 +5008,15 @@ multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC, !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>; + [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>, + Sched<[WriteFAdd]>; def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))], - IIC_SSE_HADDSUB_RM>; + IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>; } let Predicates = [HasAVX] in { @@ -5009,7 +5065,7 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, (ins VR128:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>, - OpSize; + OpSize, Sched<[WriteVecALU]>; def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), @@ -5017,7 +5073,7 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, [(set VR128:$dst, (IntId128 (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>, - OpSize; + OpSize, Sched<[WriteVecALULd]>; } /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}. @@ -5027,16 +5083,27 @@ multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr, (ins VR256:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR256:$dst, (IntId256 VR256:$src))]>, - OpSize; + OpSize, Sched<[WriteVecALU]>; def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR256:$dst, (IntId256 - (bitconvert (memopv4i64 addr:$src))))]>, OpSize; + (bitconvert (memopv4i64 addr:$src))))]>, OpSize, + Sched<[WriteVecALULd]>; } +// Helper fragments to match sext vXi1 to vXiY. +def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)), + VR128:$src))>; +def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i32 15)))>; +def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i32 31)))>; +def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)), + VR256:$src))>; +def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i32 15)))>; +def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i32 31)))>; + let Predicates = [HasAVX] in { defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128>, VEX; @@ -5044,6 +5111,19 @@ let Predicates = [HasAVX] in { int_x86_ssse3_pabs_w_128>, VEX; defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128>, VEX; + + def : Pat<(xor + (bc_v2i64 (v16i1sextv16i8)), + (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))), + (VPABSBrr128 VR128:$src)>; + def : Pat<(xor + (bc_v2i64 (v8i1sextv8i16)), + (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))), + (VPABSWrr128 VR128:$src)>; + def : Pat<(xor + (bc_v2i64 (v4i1sextv4i32)), + (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))), + (VPABSDrr128 VR128:$src)>; } let Predicates = [HasAVX2] in { @@ -5053,6 +5133,19 @@ let Predicates = [HasAVX2] in { int_x86_avx2_pabs_w>, VEX, VEX_L; defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd", int_x86_avx2_pabs_d>, VEX, VEX_L; + + def : Pat<(xor + (bc_v4i64 (v32i1sextv32i8)), + (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))), + (VPABSBrr256 VR256:$src)>; + def : Pat<(xor + (bc_v4i64 (v16i1sextv16i16)), + (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))), + (VPABSWrr256 VR256:$src)>; + def : Pat<(xor + (bc_v4i64 (v8i1sextv8i32)), + (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))), + (VPABSDrr256 VR256:$src)>; } defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", @@ -5062,10 +5155,26 @@ defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128>; +let Predicates = [HasSSSE3] in { + def : Pat<(xor + (bc_v2i64 (v16i1sextv16i8)), + (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))), + (PABSBrr128 VR128:$src)>; + def : Pat<(xor + (bc_v2i64 (v8i1sextv8i16)), + (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))), + (PABSWrr128 VR128:$src)>; + def : Pat<(xor + (bc_v2i64 (v4i1sextv4i32)), + (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))), + (PABSDrr128 VR128:$src)>; +} + //===---------------------------------------------------------------------===// // SSSE3 - Packed Binary Operator Instructions //===---------------------------------------------------------------------===// +let Sched = WriteVecALU in { def SSE_PHADDSUBD : OpndItins< IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM >; @@ -5075,12 +5184,16 @@ def SSE_PHADDSUBSW : OpndItins< def SSE_PHADDSUBW : OpndItins< IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM >; +} +let Sched = WriteShuffle in def SSE_PSHUFB : OpndItins< IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM >; +let Sched = WriteVecALU in def SSE_PSIGN : OpndItins< IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM >; +let Sched = WriteVecIMul in def SSE_PMULHRSW : OpndItins< IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW >; @@ -5097,7 +5210,7 @@ multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>, - OpSize; + OpSize, Sched<[itins.Sched]>; def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, @@ -5105,7 +5218,8 @@ multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set RC:$dst, (OpVT (OpNode RC:$src1, - (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize; + (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}. @@ -5119,7 +5233,7 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, - OpSize; + OpSize, Sched<[itins.Sched]>; def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), !if(Is2Addr, @@ -5127,7 +5241,8 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr, !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set VR128:$dst, (IntId128 VR128:$src1, - (bitconvert (memopv2i64 addr:$src2))))]>, OpSize; + (bitconvert (memopv2i64 addr:$src2))))]>, OpSize, + Sched<[itins.Sched.Folded, ReadAfterLd]>; } multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr, @@ -5269,7 +5384,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> { !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), !strconcat(asm, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - [], IIC_SSE_PALIGNR>, OpSize; + [], IIC_SSE_PALIGNR>, OpSize, Sched<[WriteShuffle]>; let mayLoad = 1 in def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2, i8imm:$src3), @@ -5277,7 +5392,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> { !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), !strconcat(asm, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - [], IIC_SSE_PALIGNR>, OpSize; + [], IIC_SSE_PALIGNR>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>; } } @@ -5287,13 +5402,13 @@ multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> { (ins VR256:$src1, VR256:$src2, i8imm:$src3), !strconcat(asm, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), - []>, OpSize; + []>, OpSize, Sched<[WriteShuffle]>; let mayLoad = 1 in def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2, i8imm:$src3), !strconcat(asm, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), - []>, OpSize; + []>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>; } } @@ -5341,6 +5456,7 @@ def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))), // SSSE3 - Thread synchronization //===---------------------------------------------------------------------===// +let SchedRW = [WriteSystem] in { let usesCustomInserter = 1 in { def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3), [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>, @@ -5354,6 +5470,7 @@ let Uses = [ECX, EAX] in def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>, TB, Requires<[HasSSE3]>; +} // SchedRW def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>; def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>; @@ -6773,7 +6890,7 @@ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr, !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))], - IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; + NoItinerary, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2, RC:$src3), @@ -6782,7 +6899,7 @@ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr, [(set RC:$dst, (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)), RC:$src3))], - IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; + NoItinerary, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; } let Predicates = [HasAVX] in { diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index 1185941..5b6298b 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -15,7 +15,7 @@ let Defs = [EFLAGS] in { -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1), "shl{b}\t{%cl, $dst|$dst, CL}", @@ -62,9 +62,10 @@ def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1), "shl{q}\t$dst", [], IIC_SR>; } // hasSideEffects = 0 } // isConvertibleToThreeAddress = 1 -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { // FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern // using CL? let Uses = [CL] in { @@ -118,8 +119,9 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst), "shl{q}\t$dst", [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)], IIC_SR>; +} // SchedRW -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1), "shr{b}\t{%cl, $dst|$dst, CL}", @@ -163,9 +165,10 @@ def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1), def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1), "shr{q}\t$dst", [(set GR64:$dst, (srl GR64:$src1, (i8 1)))], IIC_SR>; -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst), "shr{b}\t{%cl, $dst|$dst, CL}", @@ -216,8 +219,9 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst), "shr{q}\t$dst", [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)], IIC_SR>; +} // SchedRW -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1), "sar{b}\t{%cl, $dst|$dst, CL}", @@ -273,9 +277,10 @@ def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), "sar{q}\t$dst", [(set GR64:$dst, (sra GR64:$src1, (i8 1)))], IIC_SR>; -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst), "sar{b}\t{%cl, $dst|$dst, CL}", @@ -330,13 +335,14 @@ def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst), "sar{q}\t$dst", [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)], IIC_SR>; +} // SchedRW //===----------------------------------------------------------------------===// // Rotate instructions //===----------------------------------------------------------------------===// let hasSideEffects = 0 in { -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1), "rcl{b}\t$dst", [], IIC_SR>; def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt), @@ -405,6 +411,7 @@ def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1), } // Constraints = "$src = $dst" +let SchedRW = [WriteShiftLd, WriteRMW] in { def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst), "rcl{b}\t$dst", [], IIC_SR>; def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt), @@ -458,9 +465,10 @@ def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst), def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst), "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>; } +} // SchedRW } // hasSideEffects = 0 -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { // FIXME: provide shorter instructions when imm8 == 1 let Uses = [CL] in { def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1), @@ -512,8 +520,9 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "rol{q}\t$dst", [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))], IIC_SR>; -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst), "rol{b}\t{%cl, $dst|$dst, CL}", @@ -568,8 +577,9 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst), "rol{q}\t$dst", [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)], IIC_SR>; +} // SchedRW -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1), "ror{b}\t{%cl, $dst|$dst, CL}", @@ -620,8 +630,9 @@ def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "ror{q}\t$dst", [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))], IIC_SR>; -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst), "ror{b}\t{%cl, $dst|$dst, CL}", @@ -676,13 +687,14 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), "ror{q}\t$dst", [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)], IIC_SR>; +} // SchedRW //===----------------------------------------------------------------------===// // Double shift instructions (generalizations of rotate) //===----------------------------------------------------------------------===// -let Constraints = "$src1 = $dst" in { +let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { let Uses = [CL] in { def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), @@ -765,8 +777,9 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg, (i8 imm:$src3)))], IIC_SHD64_REG_IM>, TB; } -} // Constraints = "$src = $dst" +} // Constraints = "$src = $dst", SchedRW +let SchedRW = [WriteShiftLd, WriteRMW] in { let Uses = [CL] in { def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", @@ -840,6 +853,7 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem, (i8 imm:$src3)), addr:$dst)], IIC_SHD64_MEM_IM>, TB; +} // SchedRW } // Defs = [EFLAGS] @@ -857,12 +871,12 @@ multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> { let neverHasSideEffects = 1 in { def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, i8imm:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - []>, TAXD, VEX; + []>, TAXD, VEX, Sched<[WriteShift]>; let mayLoad = 1 in def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, i8imm:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - []>, TAXD, VEX; + []>, TAXD, VEX, Sched<[WriteShiftLd]>; } } @@ -870,11 +884,17 @@ multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> { let neverHasSideEffects = 1 in { def rr : I<0xF7, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, - VEX_4VOp3; + VEX_4VOp3, Sched<[WriteShift]>; let mayLoad = 1 in def rm : I<0xF7, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, - VEX_4VOp3; + VEX_4VOp3, + Sched<[WriteShiftLd, + // x86memop:$src1 + ReadDefault, ReadDefault, ReadDefault, ReadDefault, + ReadDefault, + // RC:$src1 + ReadAfterLd]>; } } diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td index 3caa1b5..bab3cdd 100644 --- a/lib/Target/X86/X86InstrSystem.td +++ b/lib/Target/X86/X86InstrSystem.td @@ -13,6 +13,7 @@ // //===----------------------------------------------------------------------===// +let SchedRW = [WriteSystem] in { let Defs = [RAX, RDX] in def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)], IIC_RDTSC>, TB; @@ -35,6 +36,7 @@ let Uses = [EFLAGS] in def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>; def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3", [(int_x86_int (i8 3))], IIC_INT3>; +} // SchedRW def : Pat<(debugtrap), (INT3)>; @@ -43,6 +45,7 @@ def : Pat<(debugtrap), // FIXME: This doesn't work because InstAlias can't match immediate constants. //def : InstAlias<"int\t$3", (INT3)>; +let SchedRW = [WriteSystem] in { def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", [(int_x86_int imm:$trap)], IIC_INT>; @@ -65,11 +68,13 @@ def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize; def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>; def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>, Requires<[In64BitMode]>; +} // SchedRW //===----------------------------------------------------------------------===// // Input/Output Instructions. // +let SchedRW = [WriteSystem] in { let Defs = [AL], Uses = [DX] in def IN8rr : I<0xEC, RawFrm, (outs), (ins), "in{b}\t{%dx, %al|AL, DX}", [], IIC_IN_RR>; @@ -113,10 +118,12 @@ def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port), def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", [], IIC_INS>; def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", [], IIC_INS>, OpSize; def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", [], IIC_INS>; +} // SchedRW //===----------------------------------------------------------------------===// // Moves to and from debug registers +let SchedRW = [WriteSystem] in { def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src), "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB; def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src), @@ -126,10 +133,12 @@ def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src), "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB; def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB; +} // SchedRW //===----------------------------------------------------------------------===// // Moves to and from control registers +let SchedRW = [WriteSystem] in { def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src), "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB; def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src), @@ -139,6 +148,7 @@ def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src), "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB; def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB; +} // SchedRW //===----------------------------------------------------------------------===// // Segment override instruction prefixes @@ -155,6 +165,7 @@ def GS_PREFIX : I<0x65, RawFrm, (outs), (ins), "gs", []>; // Moves to and from segment registers. // +let SchedRW = [WriteMove] in { def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src), "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>, OpSize; def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src), @@ -182,10 +193,12 @@ def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src), "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>; def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>; +} // SchedRW //===----------------------------------------------------------------------===// // Segmentation support instructions. +let SchedRW = [WriteSystem] in { def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", [], IIC_SWAPGS>, TB; def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), @@ -347,10 +360,12 @@ def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), "verw\t$seg", [], IIC_VERW_MEM>, TB; def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), "verw\t$seg", [], IIC_VERW_REG>, TB; +} // SchedRW //===----------------------------------------------------------------------===// // Descriptor-table support instructions +let SchedRW = [WriteSystem] in { def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins), "sgdt{w}\t$dst", [], IIC_SGDT>, TB, OpSize, Requires<[In32BitMode]>; def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins), @@ -385,9 +400,11 @@ def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src), "lldt{w}\t$src", [], IIC_LLDT_REG>, TB; def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src), "lldt{w}\t$src", [], IIC_LLDT_MEM>, TB; - +} // SchedRW + //===----------------------------------------------------------------------===// // Specialized register support +let SchedRW = [WriteSystem] in { def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", [], IIC_WRMSR>, TB; def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", [], IIC_RDMSR>, TB; def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", [], IIC_RDPMC>, TB; @@ -410,14 +427,18 @@ def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src), "lmsw{w}\t$src", [], IIC_LMSW_REG>, TB; def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", [], IIC_CPUID>, TB; +} // SchedRW //===----------------------------------------------------------------------===// // Cache instructions +let SchedRW = [WriteSystem] in { def INVD : I<0x08, RawFrm, (outs), (ins), "invd", [], IIC_INVD>, TB; def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", [], IIC_INVD>, TB; +} // SchedRW //===----------------------------------------------------------------------===// // XSAVE instructions +let SchedRW = [WriteSystem] in { let Defs = [RDX, RAX], Uses = [RCX] in def XGETBV : I<0x01, MRM_D0, (outs), (ins), "xgetbv", []>, TB; @@ -428,16 +449,17 @@ let Uses = [RDX, RAX] in { def XSAVE : I<0xAE, MRM4m, (outs opaque512mem:$dst), (ins), "xsave\t$dst", []>, TB; def XSAVE64 : I<0xAE, MRM4m, (outs opaque512mem:$dst), (ins), - "xsaveq\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; + "xsave{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; def XRSTOR : I<0xAE, MRM5m, (outs), (ins opaque512mem:$dst), "xrstor\t$dst", []>, TB; def XRSTOR64 : I<0xAE, MRM5m, (outs), (ins opaque512mem:$dst), - "xrstorq\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; + "xrstor{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; def XSAVEOPT : I<0xAE, MRM6m, (outs opaque512mem:$dst), (ins), "xsaveopt\t$dst", []>, TB; def XSAVEOPT64 : I<0xAE, MRM6m, (outs opaque512mem:$dst), (ins), - "xsaveoptq\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; + "xsaveopt{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>; } +} // SchedRW //===----------------------------------------------------------------------===// // VIA PadLock crypto instructions @@ -493,8 +515,15 @@ let Predicates = [HasFSGSBase, In64BitMode] in { //===----------------------------------------------------------------------===// // INVPCID Instruction def INVPCID32 : I<0x82, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2), - "invpcid {$src2, $src1|$src1, $src2}", []>, OpSize, T8, + "invpcid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8, Requires<[In32BitMode]>; def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2), - "invpcid {$src2, $src1|$src1, $src2}", []>, OpSize, T8, + "invpcid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8, Requires<[In64BitMode]>; + +//===----------------------------------------------------------------------===// +// SMAP Instruction +let Defs = [EFLAGS], Uses = [EFLAGS] in { + def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, TB; + def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, TB; +} diff --git a/lib/Target/X86/X86InstrTSX.td b/lib/Target/X86/X86InstrTSX.td index a37a8cc..363a190 100644 --- a/lib/Target/X86/X86InstrTSX.td +++ b/lib/Target/X86/X86InstrTSX.td @@ -15,6 +15,9 @@ //===----------------------------------------------------------------------===// // TSX instructions +def X86xtest: SDNode<"X86ISD::XTEST", SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>, + [SDNPHasChain, SDNPSideEffect]>; + let usesCustomInserter = 1 in def XBEGIN : I<0, Pseudo, (outs GR32:$dst), (ins), "# XBEGIN", [(set GR32:$dst, (int_x86_xbegin))]>, @@ -27,6 +30,10 @@ def XBEGIN_4 : Ii32PCRel<0xc7, MRM_F8, (outs), (ins brtarget:$dst), def XEND : I<0x01, MRM_D5, (outs), (ins), "xend", [(int_x86_xend)]>, TB, Requires<[HasRTM]>; +let Defs = [EFLAGS] in +def XTEST : I<0x01, MRM_D6, (outs), (ins), + "xtest", [(set EFLAGS, (X86xtest))]>, TB, Requires<[HasTSX]>; + def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm), "xabort\t$imm", [(int_x86_xabort imm:$imm)]>, Requires<[HasRTM]>; diff --git a/lib/Target/X86/X86SchedHaswell.td b/lib/Target/X86/X86SchedHaswell.td new file mode 100644 index 0000000..84c9203 --- /dev/null +++ b/lib/Target/X86/X86SchedHaswell.td @@ -0,0 +1,126 @@ +//=- X86SchedHaswell.td - X86 Haswell Scheduling -------------*- tablegen -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the machine model for Haswell to support instruction +// scheduling and other instruction cost heuristics. +// +//===----------------------------------------------------------------------===// + +def HaswellModel : SchedMachineModel { + // All x86 instructions are modeled as a single micro-op, and HW can decode 4 + // instructions per cycle. + let IssueWidth = 4; + let MinLatency = 0; // 0 = Out-of-order execution. + let LoadLatency = 4; + let ILPWindow = 30; + let MispredictPenalty = 16; +} + +let SchedModel = HaswellModel in { + +// Haswell can issue micro-ops to 8 different ports in one cycle. + +// Ports 0, 1, 5, 6 and 7 handle all computation. +// Port 4 gets the data half of stores. Store data can be available later than +// the store address, but since we don't model the latency of stores, we can +// ignore that. +// Ports 2 and 3 are identical. They handle loads and the address half of +// stores. Port 7 can handle address calculations. +def HWPort0 : ProcResource<1>; +def HWPort1 : ProcResource<1>; +def HWPort2 : ProcResource<1>; +def HWPort3 : ProcResource<1>; +def HWPort4 : ProcResource<1>; +def HWPort5 : ProcResource<1>; +def HWPort6 : ProcResource<1>; +def HWPort7 : ProcResource<1>; + +// Many micro-ops are capable of issuing on multiple ports. +def HWPort23 : ProcResGroup<[HWPort2, HWPort3]>; +def HWPort237 : ProcResGroup<[HWPort2, HWPort3, HWPort7]>; +def HWPort05 : ProcResGroup<[HWPort0, HWPort5]>; +def HWPort056 : ProcResGroup<[HWPort0, HWPort5, HWPort6]>; +def HWPort15 : ProcResGroup<[HWPort1, HWPort5]>; +def HWPort015 : ProcResGroup<[HWPort0, HWPort1, HWPort5]>; +def HWPort0156: ProcResGroup<[HWPort0, HWPort1, HWPort5, HWPort6]>; + +// Integer division issued on port 0. +def HWDivider : ProcResource<1>; + +// Loads are 4 cycles, so ReadAfterLd registers needn't be available until 4 +// cycles after the memory operand. +def : ReadAdvance<ReadAfterLd, 4>; + +// Many SchedWrites are defined in pairs with and without a folded load. +// Instructions with folded loads are usually micro-fused, so they only appear +// as two micro-ops when queued in the reservation station. +// This multiclass defines the resource usage for variants with and without +// folded loads. +multiclass HWWriteResPair<X86FoldableSchedWrite SchedRW, + ProcResourceKind ExePort, + int Lat> { + // Register variant is using a single cycle on ExePort. + def : WriteRes<SchedRW, [ExePort]> { let Latency = Lat; } + + // Memory variant also uses a cycle on port 2/3 and adds 4 cycles to the + // latency. + def : WriteRes<SchedRW.Folded, [HWPort23, ExePort]> { + let Latency = !add(Lat, 4); + } +} + +// A folded store needs a cycle on port 4 for the store data, but it does not +// need an extra port 2/3 cycle to recompute the address. +def : WriteRes<WriteRMW, [HWPort4]>; + +def : WriteRes<WriteStore, [HWPort237, HWPort4]>; +def : WriteRes<WriteLoad, [HWPort23]> { let Latency = 4; } +def : WriteRes<WriteMove, [HWPort0156]>; +def : WriteRes<WriteZero, []>; + +defm : HWWriteResPair<WriteALU, HWPort0156, 1>; +defm : HWWriteResPair<WriteIMul, HWPort1, 3>; +defm : HWWriteResPair<WriteShift, HWPort056, 1>; +defm : HWWriteResPair<WriteJump, HWPort5, 1>; + +// This is for simple LEAs with one or two input operands. +// The complex ones can only execute on port 1, and they require two cycles on +// the port to read all inputs. We don't model that. +def : WriteRes<WriteLEA, [HWPort15]>; + +// This is quite rough, latency depends on the dividend. +def : WriteRes<WriteIDiv, [HWPort0, HWDivider]> { + let Latency = 25; + let ResourceCycles = [1, 10]; +} +def : WriteRes<WriteIDivLd, [HWPort23, HWPort0, HWDivider]> { + let Latency = 29; + let ResourceCycles = [1, 1, 10]; +} + +// Scalar and vector floating point. +defm : HWWriteResPair<WriteFAdd, HWPort1, 3>; +defm : HWWriteResPair<WriteFMul, HWPort0, 5>; +defm : HWWriteResPair<WriteFDiv, HWPort0, 12>; // 10-14 cycles. +defm : HWWriteResPair<WriteFRcp, HWPort0, 5>; +defm : HWWriteResPair<WriteFSqrt, HWPort0, 15>; +defm : HWWriteResPair<WriteCvtF2I, HWPort1, 3>; +defm : HWWriteResPair<WriteCvtI2F, HWPort1, 4>; +defm : HWWriteResPair<WriteCvtF2F, HWPort1, 3>; + +// Vector integer operations. +defm : HWWriteResPair<WriteVecShift, HWPort05, 1>; +defm : HWWriteResPair<WriteVecLogic, HWPort015, 1>; +defm : HWWriteResPair<WriteVecALU, HWPort15, 1>; +defm : HWWriteResPair<WriteVecIMul, HWPort0, 5>; +defm : HWWriteResPair<WriteShuffle, HWPort15, 1>; + +def : WriteRes<WriteSystem, [HWPort0156]> { let Latency = 100; } +def : WriteRes<WriteMicrocoded, [HWPort0156]> { let Latency = 100; } +} // SchedModel diff --git a/lib/Target/X86/X86SchedSandyBridge.td b/lib/Target/X86/X86SchedSandyBridge.td new file mode 100644 index 0000000..b36b3ad --- /dev/null +++ b/lib/Target/X86/X86SchedSandyBridge.td @@ -0,0 +1,122 @@ +//=- X86SchedSandyBridge.td - X86 Sandy Bridge Scheduling ----*- tablegen -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the machine model for Sandy Bridge to support instruction +// scheduling and other instruction cost heuristics. +// +//===----------------------------------------------------------------------===// + +def SandyBridgeModel : SchedMachineModel { + // All x86 instructions are modeled as a single micro-op, and SB can decode 4 + // instructions per cycle. + // FIXME: Identify instructions that aren't a single fused micro-op. + let IssueWidth = 4; + let MinLatency = 0; // 0 = Out-of-order execution. + let LoadLatency = 4; + let ILPWindow = 20; + let MispredictPenalty = 16; +} + +let SchedModel = SandyBridgeModel in { + +// Sandy Bridge can issue micro-ops to 6 different ports in one cycle. + +// Ports 0, 1, and 5 handle all computation. +def SBPort0 : ProcResource<1>; +def SBPort1 : ProcResource<1>; +def SBPort5 : ProcResource<1>; + +// Ports 2 and 3 are identical. They handle loads and the address half of +// stores. +def SBPort23 : ProcResource<2>; + +// Port 4 gets the data half of stores. Store data can be available later than +// the store address, but since we don't model the latency of stores, we can +// ignore that. +def SBPort4 : ProcResource<1>; + +// Many micro-ops are capable of issuing on multiple ports. +def SBPort05 : ProcResGroup<[SBPort0, SBPort5]>; +def SBPort15 : ProcResGroup<[SBPort1, SBPort5]>; +def SBPort015 : ProcResGroup<[SBPort0, SBPort1, SBPort5]>; + +// Integer division issued on port 0. +def SBDivider : ProcResource<1>; + +// Loads are 4 cycles, so ReadAfterLd registers needn't be available until 4 +// cycles after the memory operand. +def : ReadAdvance<ReadAfterLd, 4>; + +// Many SchedWrites are defined in pairs with and without a folded load. +// Instructions with folded loads are usually micro-fused, so they only appear +// as two micro-ops when queued in the reservation station. +// This multiclass defines the resource usage for variants with and without +// folded loads. +multiclass SBWriteResPair<X86FoldableSchedWrite SchedRW, + ProcResourceKind ExePort, + int Lat> { + // Register variant is using a single cycle on ExePort. + def : WriteRes<SchedRW, [ExePort]> { let Latency = Lat; } + + // Memory variant also uses a cycle on port 2/3 and adds 4 cycles to the + // latency. + def : WriteRes<SchedRW.Folded, [SBPort23, ExePort]> { + let Latency = !add(Lat, 4); + } +} + +// A folded store needs a cycle on port 4 for the store data, but it does not +// need an extra port 2/3 cycle to recompute the address. +def : WriteRes<WriteRMW, [SBPort4]>; + +def : WriteRes<WriteStore, [SBPort23, SBPort4]>; +def : WriteRes<WriteLoad, [SBPort23]> { let Latency = 4; } +def : WriteRes<WriteMove, [SBPort015]>; +def : WriteRes<WriteZero, []>; + +defm : SBWriteResPair<WriteALU, SBPort015, 1>; +defm : SBWriteResPair<WriteIMul, SBPort1, 3>; +defm : SBWriteResPair<WriteShift, SBPort05, 1>; +defm : SBWriteResPair<WriteJump, SBPort5, 1>; + +// This is for simple LEAs with one or two input operands. +// The complex ones can only execute on port 1, and they require two cycles on +// the port to read all inputs. We don't model that. +def : WriteRes<WriteLEA, [SBPort15]>; + +// This is quite rough, latency depends on the dividend. +def : WriteRes<WriteIDiv, [SBPort0, SBDivider]> { + let Latency = 25; + let ResourceCycles = [1, 10]; +} +def : WriteRes<WriteIDivLd, [SBPort23, SBPort0, SBDivider]> { + let Latency = 29; + let ResourceCycles = [1, 1, 10]; +} + +// Scalar and vector floating point. +defm : SBWriteResPair<WriteFAdd, SBPort1, 3>; +defm : SBWriteResPair<WriteFMul, SBPort0, 5>; +defm : SBWriteResPair<WriteFDiv, SBPort0, 12>; // 10-14 cycles. +defm : SBWriteResPair<WriteFRcp, SBPort0, 5>; +defm : SBWriteResPair<WriteFSqrt, SBPort0, 15>; +defm : SBWriteResPair<WriteCvtF2I, SBPort1, 3>; +defm : SBWriteResPair<WriteCvtI2F, SBPort1, 4>; +defm : SBWriteResPair<WriteCvtF2F, SBPort1, 3>; + +// Vector integer operations. +defm : SBWriteResPair<WriteVecShift, SBPort05, 1>; +defm : SBWriteResPair<WriteVecLogic, SBPort015, 1>; +defm : SBWriteResPair<WriteVecALU, SBPort15, 1>; +defm : SBWriteResPair<WriteVecIMul, SBPort0, 5>; +defm : SBWriteResPair<WriteShuffle, SBPort15, 1>; + +def : WriteRes<WriteSystem, [SBPort015]> { let Latency = 100; } +def : WriteRes<WriteMicrocoded, [SBPort015]> { let Latency = 100; } +} // SchedModel diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td index da0ca7d..9fbde88 100644 --- a/lib/Target/X86/X86Schedule.td +++ b/lib/Target/X86/X86Schedule.td @@ -53,6 +53,10 @@ def WriteLoad : SchedWrite; def WriteStore : SchedWrite; def WriteMove : SchedWrite; +// Idioms that clear a register, like xorps %xmm0, %xmm0. +// These can often bypass execution ports completely. +def WriteZero : SchedWrite; + // Branches don't produce values, so they have no latency, but they still // consume resources. Indirect branches can fold loads. defm WriteJump : X86SchedWritePair; @@ -63,6 +67,10 @@ defm WriteFMul : X86SchedWritePair; // Floating point multiplication. defm WriteFDiv : X86SchedWritePair; // Floating point division. defm WriteFSqrt : X86SchedWritePair; // Floating point square root. defm WriteFRcp : X86SchedWritePair; // Floating point reciprocal. +defm WriteFMA : X86SchedWritePair; // Fused Multiply Add. + +// FMA Scheduling helper class. +class FMASC { X86FoldableSchedWrite Sched = WriteFAdd; } // Vector integer operations. defm WriteVecALU : X86SchedWritePair; // Vector integer ALU op, no logicals. @@ -79,9 +87,14 @@ defm WriteCvtF2I : X86SchedWritePair; // Float -> Integer. defm WriteCvtI2F : X86SchedWritePair; // Integer -> Float. defm WriteCvtF2F : X86SchedWritePair; // Float -> Float size conversion. +// Catch-all for expensive system instructions. +def WriteSystem : SchedWrite; + +// Old microcoded instructions that nobody use. +def WriteMicrocoded : SchedWrite; + //===----------------------------------------------------------------------===// // Instruction Itinerary classes used for X86 -def IIC_DEFAULT : InstrItinClass; def IIC_ALU_MEM : InstrItinClass; def IIC_ALU_NONMEM : InstrItinClass; def IIC_LEA : InstrItinClass; @@ -556,3 +569,5 @@ def GenericModel : SchedMachineModel { } include "X86ScheduleAtom.td" +include "X86SchedSandyBridge.td" +include "X86SchedHaswell.td" diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td index 1e5f2d6..cce8f1b 100644 --- a/lib/Target/X86/X86ScheduleAtom.td +++ b/lib/Target/X86/X86ScheduleAtom.td @@ -33,7 +33,6 @@ def AtomItineraries : ProcessorItineraries< // InstrItinData<class, [InstrStage<N, [P0], 0>, InstrStage<N, [P1]>] >, // // Default is 1 cycle, port0 or port1 - InstrItinData<IIC_DEFAULT, [InstrStage<1, [Port0, Port1]>] >, InstrItinData<IIC_ALU_MEM, [InstrStage<1, [Port0]>] >, InstrItinData<IIC_ALU_NONMEM, [InstrStage<1, [Port0, Port1]>] >, InstrItinData<IIC_LEA, [InstrStage<1, [Port1]>] >, diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 0f2c008..448d2e6 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -37,8 +37,7 @@ using namespace llvm; /// ClassifyBlockAddressReference - Classify a blockaddress reference for the /// current subtarget according to how we should reference it in a non-pcrel /// context. -unsigned char X86Subtarget:: -ClassifyBlockAddressReference() const { +unsigned char X86Subtarget::ClassifyBlockAddressReference() const { if (isPICStyleGOT()) // 32-bit ELF targets. return X86II::MO_GOTOFF; @@ -283,6 +282,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { HasLZCNT = true; ToggleFeature(X86::FeatureLZCNT); } + if (IsIntel && ((ECX >> 8) & 0x1)) { + HasPRFCHW = true; + ToggleFeature(X86::FeaturePRFCHW); + } if (IsAMD) { if ((ECX >> 6) & 0x1) { HasSSE4A = true; @@ -310,6 +313,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { HasBMI = true; ToggleFeature(X86::FeatureBMI); } + if ((EBX >> 4) & 0x1) { + HasHLE = true; + ToggleFeature(X86::FeatureHLE); + } if (IsIntel && ((EBX >> 5) & 0x1)) { X86SSELevel = AVX2; ToggleFeature(X86::FeatureAVX2); @@ -322,6 +329,14 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { HasRTM = true; ToggleFeature(X86::FeatureRTM); } + if (IsIntel && ((EBX >> 19) & 0x1)) { + HasADX = true; + ToggleFeature(X86::FeatureADX); + } + if (IsIntel && ((EBX >> 18) & 0x1)) { + HasRDSEED = true; + ToggleFeature(X86::FeatureRDSEED); + } } } } @@ -439,7 +454,10 @@ void X86Subtarget::initializeEnvironment() { HasBMI = false; HasBMI2 = false; HasRTM = false; + HasHLE = false; HasADX = false; + HasPRFCHW = false; + HasRDSEED = false; IsBTMemSlow = false; IsUAMemFast = false; HasVectorUAMem = false; @@ -448,6 +466,8 @@ void X86Subtarget::initializeEnvironment() { HasSlowDivide = false; PostRAScheduler = false; PadShortFunctions = false; + CallRegIndirect = false; + LEAUsesAG = false; stackAlignment = 4; // FIXME: this is a known good value for Yonah. How about others? MaxInlineSizeThreshold = 128; diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index e97da4b..66832b9 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -121,9 +121,18 @@ protected: /// HasRTM - Processor has RTM instructions. bool HasRTM; + /// HasHLE - Processor has HLE. + bool HasHLE; + /// HasADX - Processor has ADX instructions. bool HasADX; + /// HasPRFCHW - Processor has PRFCHW instructions. + bool HasPRFCHW; + + /// HasRDSEED - Processor has RDSEED instructions. + bool HasRDSEED; + /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow. bool IsBTMemSlow; @@ -153,6 +162,13 @@ protected: /// a stall when returning too early. bool PadShortFunctions; + /// CallRegIndirect - True if the Calls with memory reference should be converted + /// to a register-based indirect call. + bool CallRegIndirect; + /// LEAUsesAG - True if the LEA instruction inputs have to be ready at + /// address generation (AG) time. + bool LEAUsesAG; + /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -253,7 +269,10 @@ public: bool hasBMI() const { return HasBMI; } bool hasBMI2() const { return HasBMI2; } bool hasRTM() const { return HasRTM; } + bool hasHLE() const { return HasHLE; } bool hasADX() const { return HasADX; } + bool hasPRFCHW() const { return HasPRFCHW; } + bool hasRDSEED() const { return HasRDSEED; } bool isBTMemSlow() const { return IsBTMemSlow; } bool isUnalignedMemAccessFast() const { return IsUAMemFast; } bool hasVectorUAMem() const { return HasVectorUAMem; } @@ -261,6 +280,8 @@ public: bool useLeaForSP() const { return UseLeaForSP; } bool hasSlowDivide() const { return HasSlowDivide; } bool padShortFunctions() const { return PadShortFunctions; } + bool callRegIndirect() const { return CallRegIndirect; } + bool LEAusesAG() const { return LEAUsesAG; } bool isAtom() const { return X86ProcFamily == IntelAtom; } diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 8aa58a2..00fa47f 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -215,6 +215,11 @@ bool X86PassConfig::addPreEmitPass() { addPass(createX86PadShortFunctions()); ShouldPrint = true; } + if (getOptLevel() != CodeGenOpt::None && + getX86Subtarget().LEAusesAG()){ + addPass(createX86FixupLEAs()); + ShouldPrint = true; + } return ShouldPrint; } diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index be2a997..eba9d78 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -86,7 +86,9 @@ public: virtual unsigned getNumberOfRegisters(bool Vector) const; virtual unsigned getRegisterBitWidth(bool Vector) const; virtual unsigned getMaximumUnrollFactor() const; - virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind, + OperandValueKind) const; virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, Type *SubTp) const; virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, @@ -162,13 +164,109 @@ unsigned X86TTI::getMaximumUnrollFactor() const { return 2; } -unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { +unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, + OperandValueKind Op1Info, + OperandValueKind Op2Info) const { // Legalize the type. std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); + static const CostTblEntry<MVT> AVX2CostTable[] = { + // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to + // customize them to detect the cases where shift amount is a scalar one. + { ISD::SHL, MVT::v4i32, 1 }, + { ISD::SRL, MVT::v4i32, 1 }, + { ISD::SRA, MVT::v4i32, 1 }, + { ISD::SHL, MVT::v8i32, 1 }, + { ISD::SRL, MVT::v8i32, 1 }, + { ISD::SRA, MVT::v8i32, 1 }, + { ISD::SHL, MVT::v2i64, 1 }, + { ISD::SRL, MVT::v2i64, 1 }, + { ISD::SHL, MVT::v4i64, 1 }, + { ISD::SRL, MVT::v4i64, 1 }, + + { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. + { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. + + { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. + { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. + + { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. + { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. + { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. + }; + + // Look for AVX2 lowering tricks. + if (ST->hasAVX2()) { + int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable), + ISD, LT.second); + if (Idx != -1) + return LT.first * AVX2CostTable[Idx].Cost; + } + + static const CostTblEntry<MVT> SSE2UniformConstCostTable[] = { + // We don't correctly identify costs of casts because they are marked as + // custom. + // Constant splats are cheaper for the following instructions. + { ISD::SHL, MVT::v16i8, 1 }, // psllw. + { ISD::SHL, MVT::v8i16, 1 }, // psllw. + { ISD::SHL, MVT::v4i32, 1 }, // pslld + { ISD::SHL, MVT::v2i64, 1 }, // psllq. + + { ISD::SRL, MVT::v16i8, 1 }, // psrlw. + { ISD::SRL, MVT::v8i16, 1 }, // psrlw. + { ISD::SRL, MVT::v4i32, 1 }, // psrld. + { ISD::SRL, MVT::v2i64, 1 }, // psrlq. + + { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. + { ISD::SRA, MVT::v8i16, 1 }, // psraw. + { ISD::SRA, MVT::v4i32, 1 }, // psrad. + }; + + if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && + ST->hasSSE2()) { + int Idx = CostTableLookup<MVT>(SSE2UniformConstCostTable, + array_lengthof(SSE2UniformConstCostTable), + ISD, LT.second); + if (Idx != -1) + return LT.first * SSE2UniformConstCostTable[Idx].Cost; + } + + + static const CostTblEntry<MVT> SSE2CostTable[] = { + // We don't correctly identify costs of casts because they are marked as + // custom. + // For some cases, where the shift amount is a scalar we would be able + // to generate better code. Unfortunately, when this is the case the value + // (the splat) will get hoisted out of the loop, thereby making it invisible + // to ISel. The cost model must return worst case assumptions because it is + // used for vectorization and we don't want to make vectorized code worse + // than scalar code. + { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. + { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. + { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. + { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. + + { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. + { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. + { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. + { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. + + { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. + { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. + { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. + { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. + }; + + if (ST->hasSSE2()) { + int Idx = CostTableLookup<MVT>(SSE2CostTable, array_lengthof(SSE2CostTable), + ISD, LT.second); + if (Idx != -1) + return LT.first * SSE2CostTable[Idx].Cost; + } + static const CostTblEntry<MVT> AVX1CostTable[] = { // We don't have to scalarize unsupported ops. We can issue two half-sized // operations and we only need to extract the upper YMM half. @@ -213,7 +311,8 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { return 6; // Fallback to the default implementation. - return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty); + return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, + Op2Info); } unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, @@ -235,9 +334,44 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); + std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); + std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); + + static const TypeConversionCostTblEntry<MVT> SSE2ConvTbl[] = { + // These are somewhat magic numbers justified by looking at the output of + // Intel's IACA, running some kernels and making sure when we take + // legalization into account the throughput will be overestimated. + { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, + { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, + { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, + { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, + { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, + { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, + { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, + { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, + // There are faster sequences for float conversions. + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, + }; + + if (ST->hasSSE2() && !ST->hasAVX()) { + int Idx = ConvertCostTableLookup<MVT>(SSE2ConvTbl, + array_lengthof(SSE2ConvTbl), + ISD, LTDest.second, LTSrc.second); + if (Idx != -1) + return LTSrc.first * SSE2ConvTbl[Idx].Cost; + } + EVT SrcTy = TLI->getValueType(Src); EVT DstTy = TLI->getValueType(Dst); + // The function getSimpleVT only handles simple value types. if (!SrcTy.isSimple() || !DstTy.isSimple()) return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); @@ -248,17 +382,40 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 }, - { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, - { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, - { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, - { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, + + { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, + { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, + { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, + { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, + { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, + { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, + { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, + { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, + + { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, + { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, + { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, + { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, + { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, + { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, + { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, + { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, + { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 }, { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 }, { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 }, { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 }, - { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, - { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 8 }, + { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, + { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, }; diff --git a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp index 7e7d396..7b99967 100644 --- a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp +++ b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp @@ -89,6 +89,11 @@ static DecodeStatus DecodeGRRegsRegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeRRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeBitpOperand(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder); @@ -214,6 +219,18 @@ static DecodeStatus DecodeGRRegsRegisterClass(MCInst &Inst, return MCDisassembler::Success; } +static DecodeStatus DecodeRRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) +{ + if (RegNo > 15) + return MCDisassembler::Fail; + unsigned Reg = getReg(Decoder, XCore::RRegsRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeBitpOperand(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder) { if (Val > 11) diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 019c457..beeb07f 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -261,7 +261,7 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize); MBB.erase(MBBI); } else { - int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + int Opcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize); } } @@ -371,7 +371,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, .addImm(Amount); } else { assert(Old->getOpcode() == XCore::ADJCALLSTACKUP); - int Opcode = isU6 ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + int Opcode = isU6 ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; New=BuildMI(MF, Old->getDebugLoc(), TII.get(Opcode), XCore::SP) .addImm(Amount); } @@ -409,7 +409,7 @@ XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } if (RegInfo->requiresRegisterScavenging(MF)) { // Reserve a slot close to SP or frame pointer. - RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); } diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp index fbf86c5..d16811c 100644 --- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp +++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -185,36 +185,36 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; return CurDAG->getMachineNode(XCore::LADD_l5r, dl, MVT::i32, MVT::i32, - Ops, 3); + Ops); } case XCoreISD::LSUB: { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; return CurDAG->getMachineNode(XCore::LSUB_l5r, dl, MVT::i32, MVT::i32, - Ops, 3); + Ops); } case XCoreISD::MACCU: { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3) }; return CurDAG->getMachineNode(XCore::MACCU_l4r, dl, MVT::i32, MVT::i32, - Ops, 4); + Ops); } case XCoreISD::MACCS: { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3) }; return CurDAG->getMachineNode(XCore::MACCS_l4r, dl, MVT::i32, MVT::i32, - Ops, 4); + Ops); } case XCoreISD::LMUL: { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3) }; return CurDAG->getMachineNode(XCore::LMUL_l6r, dl, MVT::i32, MVT::i32, - Ops, 4); + Ops); } case XCoreISD::CRC8: { SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; return CurDAG->getMachineNode(XCore::CRC8_l4r, dl, MVT::i32, MVT::i32, - Ops, 3); + Ops); } case ISD::BRIND: if (SDNode *ResNode = SelectBRIND(N)) diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td index e140ef2..03653cb 100644 --- a/lib/Target/XCore/XCoreInstrInfo.td +++ b/lib/Target/XCore/XCoreInstrInfo.td @@ -281,9 +281,9 @@ multiclass FRU6_LRU6_backwards_branch<bits<6> opc, string OpcStr> { } multiclass FRU6_LRU6_cp<bits<6> opc, string OpcStr> { - def _ru6: _FRU6<opc, (outs GRRegs:$a), (ins i32imm:$b), + def _ru6: _FRU6<opc, (outs RRegs:$a), (ins i32imm:$b), !strconcat(OpcStr, " $a, cp[$b]"), []>; - def _lru6: _FLRU6<opc, (outs GRRegs:$a), (ins i32imm:$b), + def _lru6: _FLRU6<opc, (outs RRegs:$a), (ins i32imm:$b), !strconcat(OpcStr, " $a, cp[$b]"), []>; } @@ -515,29 +515,29 @@ def LMUL_l6r : _FL6R< //let Uses = [DP] in ... let neverHasSideEffects = 1, isReMaterializable = 1 in -def LDAWDP_ru6: _FRU6<0b011000, (outs GRRegs:$a), (ins MEMii:$b), +def LDAWDP_ru6: _FRU6<0b011000, (outs RRegs:$a), (ins MEMii:$b), "ldaw $a, dp[$b]", []>; let isReMaterializable = 1 in -def LDAWDP_lru6: _FLRU6<0b011000, (outs GRRegs:$a), (ins MEMii:$b), +def LDAWDP_lru6: _FLRU6<0b011000, (outs RRegs:$a), (ins MEMii:$b), "ldaw $a, dp[$b]", - [(set GRRegs:$a, ADDRdpii:$b)]>; + [(set RRegs:$a, ADDRdpii:$b)]>; let mayLoad=1 in -def LDWDP_ru6: _FRU6<0b010110, (outs GRRegs:$a), (ins MEMii:$b), +def LDWDP_ru6: _FRU6<0b010110, (outs RRegs:$a), (ins MEMii:$b), "ldw $a, dp[$b]", []>; -def LDWDP_lru6: _FLRU6<0b010110, (outs GRRegs:$a), (ins MEMii:$b), +def LDWDP_lru6: _FLRU6<0b010110, (outs RRegs:$a), (ins MEMii:$b), "ldw $a, dp[$b]", - [(set GRRegs:$a, (load ADDRdpii:$b))]>; + [(set RRegs:$a, (load ADDRdpii:$b))]>; let mayStore=1 in -def STWDP_ru6 : _FRU6<0b010100, (outs), (ins GRRegs:$a, MEMii:$b), +def STWDP_ru6 : _FRU6<0b010100, (outs), (ins RRegs:$a, MEMii:$b), "stw $a, dp[$b]", []>; -def STWDP_lru6 : _FLRU6<0b010100, (outs), (ins GRRegs:$a, MEMii:$b), +def STWDP_lru6 : _FLRU6<0b010100, (outs), (ins RRegs:$a, MEMii:$b), "stw $a, dp[$b]", - [(store GRRegs:$a, ADDRdpii:$b)]>; + [(store RRegs:$a, ADDRdpii:$b)]>; //let Uses = [CP] in .. let mayLoad = 1, isReMaterializable = 1, neverHasSideEffects = 1 in @@ -545,46 +545,38 @@ defm LDWCP : FRU6_LRU6_cp<0b011011, "ldw">; let Uses = [SP] in { let mayStore=1 in { -def STWSP_ru6 : _FRU6<0b010101, (outs), (ins GRRegs:$a, i32imm:$b), +def STWSP_ru6 : _FRU6<0b010101, (outs), (ins RRegs:$a, i32imm:$b), "stw $a, sp[$b]", - [(XCoreStwsp GRRegs:$a, immU6:$b)]>; + [(XCoreStwsp RRegs:$a, immU6:$b)]>; -def STWSP_lru6 : _FLRU6<0b010101, (outs), (ins GRRegs:$a, i32imm:$b), +def STWSP_lru6 : _FLRU6<0b010101, (outs), (ins RRegs:$a, i32imm:$b), "stw $a, sp[$b]", - [(XCoreStwsp GRRegs:$a, immU16:$b)]>; + [(XCoreStwsp RRegs:$a, immU16:$b)]>; } let mayLoad=1 in { -def LDWSP_ru6 : _FRU6<0b010111, (outs GRRegs:$a), (ins i32imm:$b), +def LDWSP_ru6 : _FRU6<0b010111, (outs RRegs:$a), (ins i32imm:$b), "ldw $a, sp[$b]", []>; -def LDWSP_lru6 : _FLRU6<0b010111, (outs GRRegs:$a), (ins i32imm:$b), +def LDWSP_lru6 : _FLRU6<0b010111, (outs RRegs:$a), (ins i32imm:$b), "ldw $a, sp[$b]", []>; } let neverHasSideEffects = 1 in { -def LDAWSP_ru6 : _FRU6<0b011001, (outs GRRegs:$a), (ins i32imm:$b), +def LDAWSP_ru6 : _FRU6<0b011001, (outs RRegs:$a), (ins i32imm:$b), "ldaw $a, sp[$b]", []>; -def LDAWSP_lru6 : _FLRU6<0b011001, (outs GRRegs:$a), (ins i32imm:$b), +def LDAWSP_lru6 : _FLRU6<0b011001, (outs RRegs:$a), (ins i32imm:$b), "ldaw $a, sp[$b]", []>; - -let isCodeGenOnly = 1 in -def LDAWSP_ru6_RRegs : _FRU6<0b011001, (outs RRegs:$a), (ins i32imm:$b), - "ldaw $a, sp[$b]", []>; - -let isCodeGenOnly = 1 in -def LDAWSP_lru6_RRegs : _FLRU6<0b011001, (outs RRegs:$a), (ins i32imm:$b), - "ldaw $a, sp[$b]", []>; } } let isReMaterializable = 1 in { -def LDC_ru6 : _FRU6<0b011010, (outs GRRegs:$a), (ins i32imm:$b), - "ldc $a, $b", [(set GRRegs:$a, immU6:$b)]>; +def LDC_ru6 : _FRU6<0b011010, (outs RRegs:$a), (ins i32imm:$b), + "ldc $a, $b", [(set RRegs:$a, immU6:$b)]>; -def LDC_lru6 : _FLRU6<0b011010, (outs GRRegs:$a), (ins i32imm:$b), - "ldc $a, $b", [(set GRRegs:$a, immU16:$b)]>; +def LDC_lru6 : _FLRU6<0b011010, (outs RRegs:$a), (ins i32imm:$b), + "ldc $a, $b", [(set RRegs:$a, immU16:$b)]>; } def SETC_ru6 : _FRU6<0b111010, (outs), (ins GRRegs:$a, i32imm:$b), @@ -932,6 +924,9 @@ def BR_JT32 : PseudoInstXCore<(outs), (ins InlineJT32:$t, GRRegs:$i), "bru $i\n$t", [(XCoreBR_JT32 tjumptable:$t, GRRegs:$i)]>; +let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in +def BRU_1r : _F1R<0b001010, (outs), (ins GRRegs:$a), "bru $a", []>; + let Defs=[SP], neverHasSideEffects=1 in def SETSP_1r : _F1R<0b001011, (outs), (ins GRRegs:$a), "set sp, $a", []>; diff --git a/lib/Target/XCore/XCoreRegisterInfo.td b/lib/Target/XCore/XCoreRegisterInfo.td index 4c771e9..6694b28 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.td +++ b/lib/Target/XCore/XCoreRegisterInfo.td @@ -51,6 +51,9 @@ def GRRegs : RegisterClass<"XCore", [i32], 32, R11)>; // Reserved -def RRegs : RegisterClass<"XCore", [i32], 32, (add CP, DP, SP, LR)> { +def RRegs : RegisterClass<"XCore", [i32], 32, + (add R0, R1, R2, R3, + R4, R5, R6, R7, R8, R9, R10, + R11, CP, DP, SP, LR)> { let isAllocatable = 0; } diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp index 8336d3a..b63495b 100644 --- a/lib/Transforms/IPO/ConstantMerge.cpp +++ b/lib/Transforms/IPO/ConstantMerge.cpp @@ -66,9 +66,8 @@ ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); } static void FindUsedValues(GlobalVariable *LLVMUsed, SmallPtrSet<const GlobalValue*, 8> &UsedValues) { if (LLVMUsed == 0) return; - ConstantArray *Inits = dyn_cast<ConstantArray>(LLVMUsed->getInitializer()); - if (Inits == 0) return; - + ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); + for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) if (GlobalValue *GV = dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts())) diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp index a75212a..bc5109b 100644 --- a/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/lib/Transforms/IPO/FunctionAttrs.cpp @@ -1,4 +1,4 @@ -//===- FunctionAttrs.cpp - Pass which marks functions readnone or readonly ===// +//===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// // // The LLVM Compiler Infrastructure // @@ -14,6 +14,8 @@ // to the function does not create any copies of the pointer value that // outlive the call. This more or less means that the pointer is only // dereferenced, and not returned from the function or stored in a global. +// Finally, well-known library call declarations are marked with all +// attributes that are consistent with the function's standard definition. // This pass is implemented as a bottom-up traversal of the call-graph. // //===----------------------------------------------------------------------===// @@ -32,12 +34,14 @@ #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/InstIterator.h" +#include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; STATISTIC(NumReadNone, "Number of functions marked readnone"); STATISTIC(NumReadOnly, "Number of functions marked readonly"); STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); STATISTIC(NumNoAlias, "Number of function returns marked noalias"); +STATISTIC(NumAnnotated, "Number of attributes added to library functions"); namespace { struct FunctionAttrs : public CallGraphSCCPass { @@ -62,14 +66,63 @@ namespace { // AddNoAliasAttrs - Deduce noalias attributes for the SCC. bool AddNoAliasAttrs(const CallGraphSCC &SCC); + // Utility methods used by inferPrototypeAttributes to add attributes + // and maintain annotation statistics. + + void setDoesNotAccessMemory(Function &F) { + if (!F.doesNotAccessMemory()) { + F.setDoesNotAccessMemory(); + ++NumAnnotated; + } + } + + void setOnlyReadsMemory(Function &F) { + if (!F.onlyReadsMemory()) { + F.setOnlyReadsMemory(); + ++NumAnnotated; + } + } + + void setDoesNotThrow(Function &F) { + if (!F.doesNotThrow()) { + F.setDoesNotThrow(); + ++NumAnnotated; + } + } + + void setDoesNotCapture(Function &F, unsigned n) { + if (!F.doesNotCapture(n)) { + F.setDoesNotCapture(n); + ++NumAnnotated; + } + } + + void setDoesNotAlias(Function &F, unsigned n) { + if (!F.doesNotAlias(n)) { + F.setDoesNotAlias(n); + ++NumAnnotated; + } + } + + // inferPrototypeAttributes - Analyze the name and prototype of the + // given function and set any applicable attributes. Returns true + // if any attributes were set and false otherwise. + bool inferPrototypeAttributes(Function &F); + + // annotateLibraryCalls - Adds attributes to well-known standard library + // call declarations. + bool annotateLibraryCalls(const CallGraphSCC &SCC); + virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired<AliasAnalysis>(); + AU.addRequired<TargetLibraryInfo>(); CallGraphSCCPass::getAnalysisUsage(AU); } private: AliasAnalysis *AA; + TargetLibraryInfo *TLI; }; } @@ -77,6 +130,7 @@ char FunctionAttrs::ID = 0; INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs", "Deduce function attributes", false, false) INITIALIZE_AG_DEPENDENCY(CallGraph) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) INITIALIZE_PASS_END(FunctionAttrs, "functionattrs", "Deduce function attributes", false, false) @@ -598,10 +652,693 @@ bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) { return MadeChange; } +/// inferPrototypeAttributes - Analyze the name and prototype of the +/// given function and set any applicable attributes. Returns true +/// if any attributes were set and false otherwise. +bool FunctionAttrs::inferPrototypeAttributes(Function &F) { + FunctionType *FTy = F.getFunctionType(); + LibFunc::Func TheLibFunc; + if (!(TLI->getLibFunc(F.getName(), TheLibFunc) && TLI->has(TheLibFunc))) + return false; + + switch (TheLibFunc) { + case LibFunc::strlen: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::strchr: + case LibFunc::strrchr: + if (FTy->getNumParams() != 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isIntegerTy()) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + break; + case LibFunc::strcpy: + case LibFunc::stpcpy: + case LibFunc::strcat: + case LibFunc::strtol: + case LibFunc::strtod: + case LibFunc::strtof: + case LibFunc::strtoul: + case LibFunc::strtoll: + case LibFunc::strtold: + case LibFunc::strncat: + case LibFunc::strncpy: + case LibFunc::stpncpy: + case LibFunc::strtoull: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::strxfrm: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::strcmp: + case LibFunc::strspn: + case LibFunc::strncmp: + case LibFunc::strcspn: + case LibFunc::strcoll: + case LibFunc::strcasecmp: + case LibFunc::strncasecmp: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::strstr: + case LibFunc::strpbrk: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::strtok: + case LibFunc::strtok_r: + if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::scanf: + case LibFunc::setbuf: + case LibFunc::setvbuf: + if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::strdup: + case LibFunc::strndup: + if (FTy->getNumParams() < 1 || !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + break; + case LibFunc::stat: + case LibFunc::sscanf: + case LibFunc::sprintf: + case LibFunc::statvfs: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::snprintf: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(2)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 3); + break; + case LibFunc::setitimer: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(1)->isPointerTy() || + !FTy->getParamType(2)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + setDoesNotCapture(F, 3); + break; + case LibFunc::system: + if (FTy->getNumParams() != 1 || + !FTy->getParamType(0)->isPointerTy()) + return false; + // May throw; "system" is a valid pthread cancellation point. + setDoesNotCapture(F, 1); + break; + case LibFunc::malloc: + if (FTy->getNumParams() != 1 || + !FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + break; + case LibFunc::memcmp: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::memchr: + case LibFunc::memrchr: + if (FTy->getNumParams() != 3) + return false; + setOnlyReadsMemory(F); + setDoesNotThrow(F); + break; + case LibFunc::modf: + case LibFunc::modff: + case LibFunc::modfl: + case LibFunc::memcpy: + case LibFunc::memccpy: + case LibFunc::memmove: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::memalign: + if (!FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotAlias(F, 0); + break; + case LibFunc::mkdir: + case LibFunc::mktime: + if (FTy->getNumParams() == 0 || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::realloc: + if (FTy->getNumParams() != 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + break; + case LibFunc::read: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(1)->isPointerTy()) + return false; + // May throw; "read" is a valid pthread cancellation point. + setDoesNotCapture(F, 2); + break; + case LibFunc::rmdir: + case LibFunc::rewind: + case LibFunc::remove: + case LibFunc::realpath: + if (FTy->getNumParams() < 1 || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::rename: + case LibFunc::readlink: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::write: + if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy()) + return false; + // May throw; "write" is a valid pthread cancellation point. + setDoesNotCapture(F, 2); + break; + case LibFunc::bcopy: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::bcmp: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setOnlyReadsMemory(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::bzero: + if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::calloc: + if (FTy->getNumParams() != 2 || + !FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + break; + case LibFunc::chmod: + case LibFunc::chown: + case LibFunc::ctermid: + case LibFunc::clearerr: + case LibFunc::closedir: + if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::atoi: + case LibFunc::atol: + case LibFunc::atof: + case LibFunc::atoll: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setOnlyReadsMemory(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::access: + if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::fopen: + if (FTy->getNumParams() != 2 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::fdopen: + if (FTy->getNumParams() != 2 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 2); + break; + case LibFunc::feof: + case LibFunc::free: + case LibFunc::fseek: + case LibFunc::ftell: + case LibFunc::fgetc: + case LibFunc::fseeko: + case LibFunc::ftello: + case LibFunc::fileno: + case LibFunc::fflush: + case LibFunc::fclose: + case LibFunc::fsetpos: + case LibFunc::flockfile: + case LibFunc::funlockfile: + case LibFunc::ftrylockfile: + if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::ferror: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setOnlyReadsMemory(F); + break; + case LibFunc::fputc: + case LibFunc::fstat: + case LibFunc::frexp: + case LibFunc::frexpf: + case LibFunc::frexpl: + case LibFunc::fstatvfs: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::fgets: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(2)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 3); + case LibFunc::fread: + case LibFunc::fwrite: + if (FTy->getNumParams() != 4 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(3)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 4); + case LibFunc::fputs: + case LibFunc::fscanf: + case LibFunc::fprintf: + case LibFunc::fgetpos: + if (FTy->getNumParams() < 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::getc: + case LibFunc::getlogin_r: + case LibFunc::getc_unlocked: + if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::getenv: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setOnlyReadsMemory(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::gets: + case LibFunc::getchar: + setDoesNotThrow(F); + break; + case LibFunc::getitimer: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::getpwnam: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::ungetc: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::uname: + case LibFunc::unlink: + case LibFunc::unsetenv: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::utime: + case LibFunc::utimes: + if (FTy->getNumParams() != 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::putc: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::puts: + case LibFunc::printf: + case LibFunc::perror: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::pread: + case LibFunc::pwrite: + if (FTy->getNumParams() != 4 || !FTy->getParamType(1)->isPointerTy()) + return false; + // May throw; these are valid pthread cancellation points. + setDoesNotCapture(F, 2); + break; + case LibFunc::putchar: + setDoesNotThrow(F); + break; + case LibFunc::popen: + if (FTy->getNumParams() != 2 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::pclose: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::vscanf: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::vsscanf: + case LibFunc::vfscanf: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(1)->isPointerTy() || + !FTy->getParamType(2)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::valloc: + if (!FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + break; + case LibFunc::vprintf: + if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::vfprintf: + case LibFunc::vsprintf: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::vsnprintf: + if (FTy->getNumParams() != 4 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(2)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 3); + break; + case LibFunc::open: + if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) + return false; + // May throw; "open" is a valid pthread cancellation point. + setDoesNotCapture(F, 1); + break; + case LibFunc::opendir: + if (FTy->getNumParams() != 1 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + break; + case LibFunc::tmpfile: + if (!FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + break; + case LibFunc::times: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::htonl: + case LibFunc::htons: + case LibFunc::ntohl: + case LibFunc::ntohs: + setDoesNotThrow(F); + setDoesNotAccessMemory(F); + break; + case LibFunc::lstat: + if (FTy->getNumParams() != 2 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::lchown: + if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::qsort: + if (FTy->getNumParams() != 4 || !FTy->getParamType(3)->isPointerTy()) + return false; + // May throw; places call through function pointer. + setDoesNotCapture(F, 4); + break; + case LibFunc::dunder_strdup: + case LibFunc::dunder_strndup: + if (FTy->getNumParams() < 1 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + break; + case LibFunc::dunder_strtok_r: + if (FTy->getNumParams() != 3 || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::under_IO_getc: + if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::under_IO_putc: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::dunder_isoc99_scanf: + if (FTy->getNumParams() < 1 || + !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::stat64: + case LibFunc::lstat64: + case LibFunc::statvfs64: + case LibFunc::dunder_isoc99_sscanf: + if (FTy->getNumParams() < 1 || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::fopen64: + if (FTy->getNumParams() != 2 || + !FTy->getReturnType()->isPointerTy() || + !FTy->getParamType(0)->isPointerTy() || + !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + setDoesNotCapture(F, 1); + setDoesNotCapture(F, 2); + break; + case LibFunc::fseeko64: + case LibFunc::ftello64: + if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 1); + break; + case LibFunc::tmpfile64: + if (!FTy->getReturnType()->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotAlias(F, 0); + break; + case LibFunc::fstat64: + case LibFunc::fstatvfs64: + if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) + return false; + setDoesNotThrow(F); + setDoesNotCapture(F, 2); + break; + case LibFunc::open64: + if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) + return false; + // May throw; "open" is a valid pthread cancellation point. + setDoesNotCapture(F, 1); + break; + default: + // Didn't mark any attributes. + return false; + } + + return true; +} + +/// annotateLibraryCalls - Adds attributes to well-known standard library +/// call declarations. +bool FunctionAttrs::annotateLibraryCalls(const CallGraphSCC &SCC) { + bool MadeChange = false; + + // Check each function in turn annotating well-known library function + // declarations with attributes. + for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { + Function *F = (*I)->getFunction(); + + if (F != 0 && F->isDeclaration()) + MadeChange |= inferPrototypeAttributes(*F); + } + + return MadeChange; +} + bool FunctionAttrs::runOnSCC(CallGraphSCC &SCC) { AA = &getAnalysis<AliasAnalysis>(); + TLI = &getAnalysis<TargetLibraryInfo>(); - bool Changed = AddReadAttrs(SCC); + bool Changed = annotateLibraryCalls(SCC); + Changed |= AddReadAttrs(SCC); Changed |= AddNoCaptureAttrs(SCC); Changed |= AddNoAliasAttrs(SCC); return Changed; diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp index dc99492..201f320 100644 --- a/lib/Transforms/IPO/GlobalDCE.cpp +++ b/lib/Transforms/IPO/GlobalDCE.cpp @@ -42,6 +42,7 @@ namespace { private: SmallPtrSet<GlobalValue*, 32> AliveGlobals; + SmallPtrSet<Constant *, 8> SeenConstants; /// GlobalIsNeeded - mark the specific global value as needed, and /// recursively mark anything that it uses as also needed. @@ -151,6 +152,7 @@ bool GlobalDCE::runOnModule(Module &M) { // Make sure that all memory is released AliveGlobals.clear(); + SeenConstants.clear(); return Changed; } @@ -190,12 +192,15 @@ void GlobalDCE::GlobalIsNeeded(GlobalValue *G) { void GlobalDCE::MarkUsedGlobalsAsNeeded(Constant *C) { if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) return GlobalIsNeeded(GV); - + // Loop over all of the operands of the constant, adding any globals they // use to the list of needed globals. - for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I) - if (Constant *OpC = dyn_cast<Constant>(*I)) - MarkUsedGlobalsAsNeeded(OpC); + for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I) { + // If we've already processed this constant there's no need to do it again. + Constant *Op = dyn_cast<Constant>(*I); + if (Op && SeenConstants.insert(Op)) + MarkUsedGlobalsAsNeeded(Op); + } } // RemoveUnusedGlobalValue - Loop over all of the uses of the specified diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 2b9d667..b035a82 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -470,8 +470,9 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV, static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, DataLayout *TD, TargetLibraryInfo *TLI) { bool Changed = false; - for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { - User *U = *UI++; + SmallVector<User*, 8> WorkList(V->use_begin(), V->use_end()); + while (!WorkList.empty()) { + User *U = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (Init) { @@ -534,7 +535,6 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, // us, and if they are all dead, nuke them without remorse. if (SafeToDestroyConstant(C)) { C->destroyConstant(); - // This could have invalidated UI, start over from scratch. CleanupConstantGlobalUsers(V, Init, TD, TLI); return true; } diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index 892100f..4ce749c 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -72,6 +72,15 @@ STATISTIC(NumThunksWritten, "Number of thunks generated"); STATISTIC(NumAliasesWritten, "Number of aliases generated"); STATISTIC(NumDoubleWeak, "Number of new functions created"); +/// Returns the type id for a type to be hashed. We turn pointer types into +/// integers here because the actual compare logic below considers pointers and +/// integers of the same size as equal. +static Type::TypeID getTypeIDForHash(Type *Ty) { + if (Ty->isPointerTy()) + return Type::IntegerTyID; + return Ty->getTypeID(); +} + /// Creates a hash-code for the function which is the same for any two /// functions that will compare equal, without looking at the instructions /// inside the function. @@ -83,9 +92,9 @@ static unsigned profileFunction(const Function *F) { ID.AddInteger(F->getCallingConv()); ID.AddBoolean(F->hasGC()); ID.AddBoolean(FTy->isVarArg()); - ID.AddInteger(FTy->getReturnType()->getTypeID()); + ID.AddInteger(getTypeIDForHash(FTy->getReturnType())); for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) - ID.AddInteger(FTy->getParamType(i)->getTypeID()); + ID.AddInteger(getTypeIDForHash(FTy->getParamType(i))); return ID.ComputeHash(); } @@ -200,8 +209,7 @@ private: // Any two pointers in the same address space are equivalent, intptr_t and // pointers are equivalent. Otherwise, standard type equivalence rules apply. -bool FunctionComparator::isEquivalentType(Type *Ty1, - Type *Ty2) const { +bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const { if (Ty1 == Ty2) return true; if (Ty1->getTypeID() != Ty2->getTypeID()) { @@ -740,7 +748,13 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { if (NewG->getReturnType()->isVoidTy()) { Builder.CreateRetVoid(); } else { - Builder.CreateRet(Builder.CreateBitCast(CI, NewG->getReturnType())); + Type *RetTy = NewG->getReturnType(); + if (CI->getType()->isIntegerTy() && RetTy->isPointerTy()) + Builder.CreateRet(Builder.CreateIntToPtr(CI, RetTy)); + else if (CI->getType()->isPointerTy() && RetTy->isIntegerTy()) + Builder.CreateRet(Builder.CreatePtrToInt(CI, RetTy)); + else + Builder.CreateRet(Builder.CreateBitCast(CI, RetTy)); } NewG->copyAttributesFrom(G); diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp index 027a9f2..986c0b8 100644 --- a/lib/Transforms/IPO/PassManagerBuilder.cpp +++ b/lib/Transforms/IPO/PassManagerBuilder.cpp @@ -33,7 +33,12 @@ RunLoopVectorization("vectorize-loops", cl::desc("Run the Loop vectorization passes")); static cl::opt<bool> -RunBBVectorization("vectorize", cl::desc("Run the BB vectorization passes")); +RunSLPVectorization("vectorize-slp", + cl::desc("Run the SLP vectorization passes")); + +static cl::opt<bool> +RunBBVectorization("vectorize-slp-aggressive", + cl::desc("Run the BB vectorization passes")); static cl::opt<bool> UseGVNAfterVectorization("use-gvn-after-vectorization", @@ -52,7 +57,8 @@ PassManagerBuilder::PassManagerBuilder() { DisableSimplifyLibCalls = false; DisableUnitAtATime = false; DisableUnrollLoops = false; - Vectorize = RunBBVectorization; + BBVectorize = RunBBVectorization; + SLPVectorize = RunSLPVectorization; LoopVectorize = RunLoopVectorization; } @@ -207,7 +213,10 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { addExtensionsToPM(EP_ScalarOptimizerLate, MPM); - if (Vectorize) { + if (SLPVectorize) + MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. + + if (BBVectorize) { MPM.add(createBBVectorizePass()); MPM.add(createInstructionCombiningPass()); if (OptLevel > 1 && UseGVNAfterVectorization) @@ -321,6 +330,14 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM, PM.add(createGlobalDCEPass()); } +inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) { + return reinterpret_cast<PassManagerBuilder*>(P); +} + +inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) { + return reinterpret_cast<LLVMPassManagerBuilderRef>(P); +} + LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() { PassManagerBuilder *PMB = new PassManagerBuilder(); return wrap(PMB); diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp index 5f8681f..3396f79 100644 --- a/lib/Transforms/IPO/StripSymbols.cpp +++ b/lib/Transforms/IPO/StripSymbols.cpp @@ -195,10 +195,9 @@ static void findUsedValues(GlobalVariable *LLVMUsed, SmallPtrSet<const GlobalValue*, 8> &UsedValues) { if (LLVMUsed == 0) return; UsedValues.insert(LLVMUsed); - - ConstantArray *Inits = dyn_cast<ConstantArray>(LLVMUsed->getInitializer()); - if (Inits == 0) return; - + + ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); + for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) if (GlobalValue *GV = dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts())) diff --git a/lib/Transforms/InstCombine/CMakeLists.txt b/lib/Transforms/InstCombine/CMakeLists.txt index 72cfe2c..a25696e 100644 --- a/lib/Transforms/InstCombine/CMakeLists.txt +++ b/lib/Transforms/InstCombine/CMakeLists.txt @@ -9,7 +9,7 @@ add_llvm_library(LLVMInstCombine InstCombineMulDivRem.cpp InstCombinePHI.cpp InstCombineSelect.cpp - InstCombineShifts.cpp + InstCombineShifts.cpp InstCombineSimplifyDemanded.cpp InstCombineVectorOps.cpp ) diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h index 1f6a3a5e..2a36074 100644 --- a/lib/Transforms/InstCombine/InstCombine.h +++ b/lib/Transforms/InstCombine/InstCombine.h @@ -233,6 +233,7 @@ private: Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS); Value *EmitGEPOffset(User *GEP); + Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); public: // InsertNewInstBefore - insert an instruction New before instruction Old diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 3c5781c..b96eb51 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -24,9 +24,9 @@ namespace { /// Class representing coefficient of floating-point addend. /// This class needs to be highly efficient, which is especially true for /// the constructor. As of I write this comment, the cost of the default - /// constructor is merely 4-byte-store-zero (Assuming compiler is able to + /// constructor is merely 4-byte-store-zero (Assuming compiler is able to /// perform write-merging). - /// + /// class FAddendCoef { public: // The constructor has to initialize a APFloat, which is uncessary for @@ -37,31 +37,31 @@ namespace { // FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {} ~FAddendCoef(); - + void set(short C) { assert(!insaneIntVal(C) && "Insane coefficient"); IsFp = false; IntVal = C; } - + void set(const APFloat& C); - + void negate(); - + bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); } Value *getValue(Type *) const; - + // If possible, don't define operator+/operator- etc because these // operators inevitably call FAddendCoef's constructor which is not cheap. void operator=(const FAddendCoef &A); void operator+=(const FAddendCoef &A); void operator-=(const FAddendCoef &A); void operator*=(const FAddendCoef &S); - + bool isOne() const { return isInt() && IntVal == 1; } bool isTwo() const { return isInt() && IntVal == 2; } bool isMinusOne() const { return isInt() && IntVal == -1; } bool isMinusTwo() const { return isInt() && IntVal == -2; } - + private: bool insaneIntVal(int V) { return V > 4 || V < -4; } APFloat *getFpValPtr(void) @@ -74,18 +74,28 @@ namespace { return *getFpValPtr(); } - APFloat &getFpVal(void) - { assert(IsFp && BufHasFpVal && "Incorret state"); return *getFpValPtr(); } - + APFloat &getFpVal(void) { + assert(IsFp && BufHasFpVal && "Incorret state"); + return *getFpValPtr(); + } + bool isInt() const { return !IsFp; } + // If the coefficient is represented by an integer, promote it to a + // floating point. + void convertToFpType(const fltSemantics &Sem); + + // Construct an APFloat from a signed integer. + // TODO: We should get rid of this function when APFloat can be constructed + // from an *SIGNED* integer. + APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val); private: bool IsFp; - + // True iff FpValBuf contains an instance of APFloat. bool BufHasFpVal; - + // The integer coefficient of an individual addend is either 1 or -1, // and we try to simplify at most 4 addends from neighboring at most // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt @@ -94,7 +104,7 @@ namespace { AlignedCharArrayUnion<APFloat> FpValBuf; }; - + /// FAddend is used to represent floating-point addend. An addend is /// represented as <C, V>, where the V is a symbolic value, and C is a /// constant coefficient. A constant addend is represented as <C, 0>. @@ -102,10 +112,10 @@ namespace { class FAddend { public: FAddend() { Val = 0; } - + Value *getSymVal (void) const { return Val; } const FAddendCoef &getCoef(void) const { return Coeff; } - + bool isConstant() const { return Val == 0; } bool isZero() const { return Coeff.isZero(); } @@ -114,17 +124,17 @@ namespace { { Coeff.set(Coefficient); Val = V; } void set(const ConstantFP* Coefficient, Value *V) { Coeff.set(Coefficient->getValueAPF()); Val = V; } - + void negate() { Coeff.negate(); } - + /// Drill down the U-D chain one step to find the definition of V, and /// try to break the definition into one or two addends. static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1); - + /// Similar to FAddend::drillDownOneStep() except that the value being /// splitted is the addend itself. unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const; - + void operator+=(const FAddend &T) { assert((Val == T.Val) && "Symbolic-values disagree"); Coeff += T.Coeff; @@ -132,12 +142,12 @@ namespace { private: void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; } - + // This addend has the value of "Coeff * Val". Value *Val; FAddendCoef Coeff; }; - + /// FAddCombine is the class for optimizing an unsafe fadd/fsub along /// with its neighboring at most two instructions. /// @@ -145,17 +155,17 @@ namespace { public: FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(0) {} Value *simplify(Instruction *FAdd); - + private: typedef SmallVector<const FAddend*, 4> AddendVect; - + Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota); Value *performFactorization(Instruction *I); /// Convert given addend to a Value Value *createAddendVal(const FAddend &A, bool& NeedNeg); - + /// Return the number of instructions needed to emit the N-ary addition. unsigned calcInstrNumber(const AddendVect& Vect); Value *createFSub(Value *Opnd0, Value *Opnd1); @@ -165,10 +175,10 @@ namespace { Value *createFNeg(Value *V); Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota); void createInstPostProc(Instruction *NewInst); - + InstCombiner::BuilderTy *Builder; Instruction *Instr; - + private: // Debugging stuff are clustered here. #ifndef NDEBUG @@ -180,7 +190,7 @@ namespace { void incCreateInstNum() {} #endif }; -} +} //===----------------------------------------------------------------------===// // @@ -203,10 +213,34 @@ void FAddendCoef::set(const APFloat& C) { } else *P = C; - IsFp = BufHasFpVal = true; + IsFp = BufHasFpVal = true; } -void FAddendCoef::operator=(const FAddendCoef& That) { +void FAddendCoef::convertToFpType(const fltSemantics &Sem) { + if (!isInt()) + return; + + APFloat *P = getFpValPtr(); + if (IntVal > 0) + new(P) APFloat(Sem, IntVal); + else { + new(P) APFloat(Sem, 0 - IntVal); + P->changeSign(); + } + IsFp = BufHasFpVal = true; +} + +APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) { + if (Val >= 0) + return APFloat(Sem, Val); + + APFloat T(Sem, 0 - Val); + T.changeSign(); + + return T; +} + +void FAddendCoef::operator=(const FAddendCoef &That) { if (That.isInt()) set(That.IntVal); else @@ -222,16 +256,16 @@ void FAddendCoef::operator+=(const FAddendCoef &That) { getFpVal().add(That.getFpVal(), RndMode); return; } - + if (isInt()) { const APFloat &T = That.getFpVal(); - set(T); - getFpVal().add(APFloat(T.getSemantics(), IntVal), RndMode); + convertToFpType(T.getSemantics()); + getFpVal().add(T, RndMode); return; } - + APFloat &T = getFpVal(); - T.add(APFloat(T.getSemantics(), That.IntVal), RndMode); + T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode); } void FAddendCoef::operator-=(const FAddendCoef &That) { @@ -243,16 +277,16 @@ void FAddendCoef::operator-=(const FAddendCoef &That) { getFpVal().subtract(That.getFpVal(), RndMode); return; } - + if (isInt()) { const APFloat &T = That.getFpVal(); - set(T); - getFpVal().subtract(APFloat(T.getSemantics(), IntVal), RndMode); + convertToFpType(T.getSemantics()); + getFpVal().subtract(T, RndMode); return; } APFloat &T = getFpVal(); - T.subtract(APFloat(T.getSemantics(), IntVal), RndMode); + T.subtract(createAPFloatFromInt(T.getSemantics(), IntVal), RndMode); } void FAddendCoef::operator*=(const FAddendCoef &That) { @@ -271,15 +305,16 @@ void FAddendCoef::operator*=(const FAddendCoef &That) { return; } - const fltSemantics &Semantic = + const fltSemantics &Semantic = isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics(); if (isInt()) - set(APFloat(Semantic, IntVal)); + convertToFpType(Semantic); APFloat &F0 = getFpVal(); if (That.isInt()) - F0.multiply(APFloat(Semantic, That.IntVal), APFloat::rmNearestTiesToEven); + F0.multiply(createAPFloatFromInt(Semantic, That.IntVal), + APFloat::rmNearestTiesToEven); else F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven); @@ -305,11 +340,11 @@ Value *FAddendCoef::getValue(Type *Ty) const { // A - B <1, A>, <1,B> // 0 - B <-1, B> // C * A, <C, A> -// A + C <1, A> <C, NULL> +// A + C <1, A> <C, NULL> // 0 +/- 0 <0, NULL> (corner case) // // Legend: A and B are not constant, C is constant -// +// unsigned FAddend::drillValueDownOneStep (Value *Val, FAddend &Addend0, FAddend &Addend1) { Instruction *I = 0; @@ -380,7 +415,7 @@ unsigned FAddend::drillAddendDownOneStep return 0; unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1); - if (!BreakNum || Coeff.isOne()) + if (!BreakNum || Coeff.isOne()) return BreakNum; Addend0.Scale(Coeff); @@ -402,10 +437,10 @@ unsigned FAddend::drillAddendDownOneStep Value *FAddCombine::performFactorization(Instruction *I) { assert((I->getOpcode() == Instruction::FAdd || I->getOpcode() == Instruction::FSub) && "Expect add/sub"); - + Instruction *I0 = dyn_cast<Instruction>(I->getOperand(0)); Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1)); - + if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode()) return 0; @@ -420,14 +455,14 @@ Value *FAddCombine::performFactorization(Instruction *I) { Value *Opnd1_0 = I1->getOperand(0); Value *Opnd1_1 = I1->getOperand(1); - // Input Instr I Factor AddSub0 AddSub1 + // Input Instr I Factor AddSub0 AddSub1 // ---------------------------------------------- // (x*y) +/- (x*z) x y z // (y/x) +/- (z/x) x y z // Value *Factor = 0; Value *AddSub0 = 0, *AddSub1 = 0; - + if (isMpy) { if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1) Factor = Opnd0_0; @@ -459,7 +494,7 @@ Value *FAddCombine::performFactorization(Instruction *I) { if (isMpy) return createFMul(Factor, NewAddSub); - + return createFDiv(NewAddSub, Factor); } @@ -473,7 +508,7 @@ Value *FAddCombine::simplify(Instruction *I) { assert((I->getOpcode() == Instruction::FAdd || I->getOpcode() == Instruction::FSub) && "Expect add/sub"); - // Save the instruction before calling other member-functions. + // Save the instruction before calling other member-functions. Instr = I; FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1; @@ -484,7 +519,7 @@ Value *FAddCombine::simplify(Instruction *I) { unsigned Opnd0_ExpNum = 0; unsigned Opnd1_ExpNum = 0; - if (!Opnd0.isConstant()) + if (!Opnd0.isConstant()) Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1); // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1. @@ -506,7 +541,7 @@ Value *FAddCombine::simplify(Instruction *I) { Value *V0 = I->getOperand(0); Value *V1 = I->getOperand(1); - InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) && + InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) && (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1; if (Value *R = simplifyFAdd(AllOpnds, InstQuota)) @@ -546,7 +581,7 @@ Value *FAddCombine::simplify(Instruction *I) { return R; } - // step 6: Try factorization as the last resort, + // step 6: Try factorization as the last resort, return performFactorization(I); } @@ -555,7 +590,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { unsigned AddendNum = Addends.size(); assert(AddendNum <= 4 && "Too many addends"); - // For saving intermediate results; + // For saving intermediate results; unsigned NextTmpIdx = 0; FAddend TmpResult[3]; @@ -571,7 +606,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { AddendVect SimpVect; // The outer loop works on one symbolic-value at a time. Suppose the input - // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ... + // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ... // The symbolic-values will be processed in this order: x, y, z. // for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) { @@ -598,7 +633,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { if (T && T->getSymVal() == Val) { // Set null such that next iteration of the outer loop will not process // this addend again. - Addends[SameSymIdx] = 0; + Addends[SameSymIdx] = 0; SimpVect.push_back(T); } } @@ -611,7 +646,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { R += *SimpVect[Idx]; // Pop all addends being folded and push the resulting folded addend. - SimpVect.resize(StartIdx); + SimpVect.resize(StartIdx); if (Val != 0) { if (!R.isZero()) { SimpVect.push_back(&R); @@ -624,7 +659,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { } } - assert((NextTmpIdx <= sizeof(TmpResult)/sizeof(TmpResult[0]) + 1) && + assert((NextTmpIdx <= sizeof(TmpResult)/sizeof(TmpResult[0]) + 1) && "out-of-bound access"); if (ConstAdd) @@ -646,7 +681,7 @@ Value *FAddCombine::createNaryFAdd assert(!Opnds.empty() && "Expect at least one addend"); // Step 1: Check if the # of instructions needed exceeds the quota. - // + // unsigned InstrNeeded = calcInstrNumber(Opnds); if (InstrNeeded > InstrQuota) return 0; @@ -667,7 +702,7 @@ Value *FAddCombine::createNaryFAdd // Iterate the addends, creating fadd/fsub using adjacent two addends. for (AddendVect::const_iterator I = Opnds.begin(), E = Opnds.end(); I != E; I++) { - bool NeedNeg; + bool NeedNeg; Value *V = createAddendVal(**I, NeedNeg); if (!LastVal) { LastVal = V; @@ -693,7 +728,7 @@ Value *FAddCombine::createNaryFAdd } #ifndef NDEBUG - assert(CreateInstrNum == InstrNeeded && + assert(CreateInstrNum == InstrNeeded && "Inconsistent in instruction numbers"); #endif @@ -751,8 +786,8 @@ unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) { unsigned OpndNum = Opnds.size(); unsigned InstrNeeded = OpndNum - 1; - // The number of addends in the form of "(-1)*x". - unsigned NegOpndNum = 0; + // The number of addends in the form of "(-1)*x". + unsigned NegOpndNum = 0; // Adjust the number of instructions needed to emit the N-ary add. for (AddendVect::const_iterator I = Opnds.begin(), E = Opnds.end(); diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 990cbc3..ec75dd2 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -266,9 +266,8 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, return 0; } - -/// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is -/// true, otherwise (V < Lo || V >= Hi). In practice, we emit the more efficient +/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise +/// (V < Lo || V >= Hi). In practice, we emit the more efficient /// (V-Lo) \<u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates /// whether to treat the V, Lo and HI as signed or not. IB is the location to /// insert new instructions. @@ -935,6 +934,9 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) { Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) { if (LHS->getPredicate() == FCmpInst::FCMP_ORD && RHS->getPredicate() == FCmpInst::FCMP_ORD) { + if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) + return 0; + // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y) if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1))) if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) { @@ -1545,14 +1547,6 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) { switch (RHSCC) { default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: - if (LHSCst == SubOne(RHSCst)) { - // (X == 13 | X == 14) -> X-13 <u 2 - Constant *AddCST = ConstantExpr::getNeg(LHSCst); - Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); - AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst); - return Builder->CreateICmpULT(Add, AddCST); - } - if (LHS->getOperand(0) == RHS->getOperand(0)) { // if LHSCst and RHSCst differ only by one bit: // (A == C1 || A == C2) -> (A & ~(C1 ^ C2)) == C1 @@ -1566,6 +1560,14 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) { } } + if (LHSCst == SubOne(RHSCst)) { + // (X == 13 | X == 14) -> X-13 <u 2 + Constant *AddCST = ConstantExpr::getNeg(LHSCst); + Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off"); + AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst); + return Builder->CreateICmpULT(Add, AddCST); + } + break; // (X == 13 | X == 15) -> no change case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index 64cd1bd..78b4a2c 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1372,7 +1372,8 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, NestF->getType() == PointerType::getUnqual(NewFTy) ? NestF : ConstantExpr::getBitCast(NestF, PointerType::getUnqual(NewFTy)); - const AttributeSet &NewPAL = AttributeSet::get(FTy->getContext(), NewAttrs); + const AttributeSet &NewPAL = + AttributeSet::get(FTy->getContext(), NewAttrs); Instruction *NewCaller; if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index d162223..2ee1278 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1610,6 +1610,9 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI, /// OptimizeIntToFloatBitCast - See if we can optimize an integer->float/double /// bitcast. The various long double bitcasts can't get in here. static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){ + // We need to know the target byte order to perform this optimization. + if (!IC.getDataLayout()) return 0; + Value *Src = CI.getOperand(0); Type *DestTy = CI.getType(); @@ -1631,7 +1634,10 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){ VecInput = IC.Builder->CreateBitCast(VecInput, VecTy); } - return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(0)); + unsigned Elt = 0; + if (IC.getDataLayout()->isBigEndian()) + Elt = VecTy->getPrimitiveSizeInBits() / DestWidth - 1; + return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt)); } } @@ -1653,6 +1659,8 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){ } unsigned Elt = ShAmt->getZExtValue() / DestWidth; + if (IC.getDataLayout()->isBigEndian()) + Elt = VecTy->getPrimitiveSizeInBits() / DestWidth - 1 - Elt; return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt)); } } diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index 32fdb9b..4c252c0 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -139,6 +139,31 @@ static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS, } } +/// Returns true if the exploded icmp can be expressed as a signed comparison +/// to zero and updates the predicate accordingly. +/// The signedness of the comparison is preserved. +static bool isSignTest(ICmpInst::Predicate &pred, const ConstantInt *RHS) { + if (!ICmpInst::isSigned(pred)) + return false; + + if (RHS->isZero()) + return ICmpInst::isRelational(pred); + + if (RHS->isOne()) { + if (pred == ICmpInst::ICMP_SLT) { + pred = ICmpInst::ICMP_SLE; + return true; + } + } else if (RHS->isAllOnesValue()) { + if (pred == ICmpInst::ICMP_SGT) { + pred = ICmpInst::ICMP_SGE; + return true; + } + } + + return false; +} + // isHighOnes - Return true if the constant is of the form 1+0+. // This is the same as lowones(~X). static bool isHighOnes(const ConstantInt *CI) { @@ -207,7 +232,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, Constant *Init = GV->getInitializer(); if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) return 0; - + uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); if (ArrayElementCount > 1024) return 0; // Don't blow up on huge arrays. @@ -443,20 +468,29 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, } - // If a 32-bit or 64-bit magic bitvector captures the entire comparison state + // If a magic bitvector captures the entire comparison state // of this load, replace it with computation that does: // ((magic_cst >> i) & 1) != 0 - if (ArrayElementCount <= 32 || - (TD && ArrayElementCount <= 64 && TD->isLegalInteger(64))) { - Type *Ty; - if (ArrayElementCount <= 32) + { + Type *Ty = 0; + + // Look for an appropriate type: + // - The type of Idx if the magic fits + // - The smallest fitting legal type if we have a DataLayout + // - Default to i32 + if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) + Ty = Idx->getType(); + else if (TD) + Ty = TD->getSmallestLegalIntType(Init->getContext(), ArrayElementCount); + else if (ArrayElementCount <= 32) Ty = Type::getInt32Ty(Init->getContext()); - else - Ty = Type::getInt64Ty(Init->getContext()); - Value *V = Builder->CreateIntCast(Idx, Ty, false); - V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); - V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V); - return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); + + if (Ty != 0) { + Value *V = Builder->CreateIntCast(Idx, Ty, false); + V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); + V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V); + return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); + } } return 0; @@ -1273,6 +1307,23 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, break; } + case Instruction::Mul: { // (icmp pred (mul X, Val), CI) + ConstantInt *Val = dyn_cast<ConstantInt>(LHSI->getOperand(1)); + if (!Val) break; + + // If this is a signed comparison to 0 and the mul is sign preserving, + // use the mul LHS operand instead. + ICmpInst::Predicate pred = ICI.getPredicate(); + if (isSignTest(pred, RHS) && !Val->isZero() && + cast<BinaryOperator>(LHSI)->hasNoSignedWrap()) + return new ICmpInst(Val->isNegative() ? + ICmpInst::getSwappedPredicate(pred) : pred, + LHSI->getOperand(0), + Constant::getNullValue(RHS->getType())); + + break; + } + case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI) ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1)); if (!ShAmt) break; @@ -1304,6 +1355,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), ConstantExpr::getLShr(RHS, ShAmt)); + // If the shift is NSW and we compare to 0, then it is just shifting out + // sign bits, no need for an AND either. + if (cast<BinaryOperator>(LHSI)->hasNoSignedWrap() && RHSV == 0) + return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), + ConstantExpr::getLShr(RHS, ShAmt)); + if (LHSI->hasOneUse()) { // Otherwise strength reduce the shift into an and. uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits); @@ -1318,6 +1375,15 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, } } + // If this is a signed comparison to 0 and the shift is sign preserving, + // use the shift LHS operand instead. + ICmpInst::Predicate pred = ICI.getPredicate(); + if (isSignTest(pred, RHS) && + cast<BinaryOperator>(LHSI)->hasNoSignedWrap()) + return new ICmpInst(pred, + LHSI->getOperand(0), + Constant::getNullValue(RHS->getType())); + // Otherwise, if this is a comparison of the sign bit, simplify to and/test. bool TrueIfSigned = false; if (LHSI->hasOneUse() && @@ -1532,6 +1598,19 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, return new ICmpInst(pred, X, NegX); } } + break; + case Instruction::Mul: + if (RHSV == 0 && BO->hasNoSignedWrap()) { + if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) { + // The trivial case (mul X, 0) is handled by InstSimplify + // General case : (mul X, C) != 0 iff X != 0 + // (mul X, C) == 0 iff X == 0 + if (!BOC->isZero()) + return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), + Constant::getNullValue(RHS->getType())); + } + } + break; default: break; } } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) { @@ -2408,6 +2487,55 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { return new ICmpInst(Pred, Y, Z); } + // icmp slt (X + -1), Y -> icmp sle X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && + match(B, m_AllOnes())) + return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); + + // icmp sge (X + -1), Y -> icmp sgt X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && + match(B, m_AllOnes())) + return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); + + // icmp sle (X + 1), Y -> icmp slt X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && + match(B, m_One())) + return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); + + // icmp sgt (X + 1), Y -> icmp sge X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && + match(B, m_One())) + return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); + + // if C1 has greater magnitude than C2: + // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y + // s.t. C3 = C1 - C2 + // + // if C2 has greater magnitude than C1: + // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3) + // s.t. C3 = C2 - C1 + if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && + (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) + if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) + if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { + const APInt &AP1 = C1->getValue(); + const APInt &AP2 = C2->getValue(); + if (AP1.isNegative() == AP2.isNegative()) { + APInt AP1Abs = C1->getValue().abs(); + APInt AP2Abs = C2->getValue().abs(); + if (AP1Abs.uge(AP2Abs)) { + ConstantInt *C3 = Builder->getInt(AP1 - AP2); + Value *NewAdd = Builder->CreateNSWAdd(A, C3); + return new ICmpInst(Pred, NewAdd, C); + } else { + ConstantInt *C3 = Builder->getInt(AP2 - AP1); + Value *NewAdd = Builder->CreateNSWAdd(C, C3); + return new ICmpInst(Pred, A, NewAdd); + } + } + } + + // Analyze the case when either Op0 or Op1 is a sub instruction. // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). A = 0; B = 0; C = 0; D = 0; @@ -2541,6 +2669,15 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { } { Value *A, *B; + // Transform (A & ~B) == 0 --> (A & B) != 0 + // and (A & ~B) != 0 --> (A & B) == 0 + // if A is a power of 2. + if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && + match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(A) && I.isEquality()) + return new ICmpInst(I.getInversePredicate(), + Builder->CreateAnd(A, B), + Op1); + // ~x < ~y --> y < x // ~x < cst --> ~cst < x if (match(Op0, m_Not(m_Value(A)))) { diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 337cfe3..e2d7966 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -69,8 +69,8 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { // If the GEP has all zero indices, it doesn't offset the pointer. If it // doesn't, it does. - if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, ToDelete, - IsOffset || !GEP->hasAllZeroIndices())) + if (!isOnlyCopiedFromConstantGlobal( + GEP, TheCopy, ToDelete, IsOffset || !GEP->hasAllZeroIndices())) return false; continue; } @@ -166,7 +166,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 if (AI.isArrayAllocation()) { // Check C != 1 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { - Type *NewTy = + Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName()); New->setAlignment(AI.getAlignment()); @@ -294,7 +294,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, Type *SrcPTy = SrcTy->getElementType(); - if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() || + if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() || DestPTy->isVectorTy()) { // If the source is an array, the code below will not succeed. Check to // see if a trivial 'gep P, 0, 0' will help matters. Only do this for @@ -311,7 +311,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, } if (IC.getDataLayout() && - (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() || + (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() || SrcPTy->isVectorTy()) && // Do not allow turning this into a load of an integer, which is then // casted to a pointer, this pessimizes pointer analysis a lot. @@ -322,7 +322,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, // Okay, we are casting from one integer or pointer type to another of // the same size. Instead of casting the pointer before the load, cast // the result of the loaded value. - LoadInst *NewLoad = + LoadInst *NewLoad = IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName()); NewLoad->setAlignment(LI.getAlignment()); NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); @@ -359,7 +359,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { // None of the following transforms are legal for volatile/atomic loads. // FIXME: Some of it is okay for atomic loads; needs refactoring. if (!LI.isSimple()) return 0; - + // Do really simple store-to-load forwarding and load CSE, to catch cases // where there are several consecutive memory accesses to the same location, // separated by a few arithmetic operations. @@ -380,7 +380,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { Constant::getNullValue(Op->getType()), &LI); return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); } - } + } // load null/undef -> unreachable // TODO: Consider a target hook for valid address spaces for this xform. @@ -399,7 +399,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { if (CE->isCast()) if (Instruction *Res = InstCombineLoadCast(*this, LI, TD)) return Res; - + if (Op->hasOneUse()) { // Change select and PHI nodes to select values instead of addresses: this // helps alias analysis out a lot, allows many others simplifications, and @@ -453,18 +453,18 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { Type *DestPTy = cast<PointerType>(CI->getType())->getElementType(); PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType()); if (SrcTy == 0) return 0; - + Type *SrcPTy = SrcTy->getElementType(); if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy()) return 0; - + /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep" /// to its first element. This allows us to handle things like: /// store i32 xxx, (bitcast {foo*, float}* %P to i32*) /// on 32-bit hosts. SmallVector<Value*, 4> NewGEPIndices; - + // If the source is an array, the code below will not succeed. Check to // see if a trivial 'gep P, 0, 0' will help matters. Only do this for // constants. @@ -472,7 +472,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { // Index through pointer. Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext())); NewGEPIndices.push_back(Zero); - + while (1) { if (StructType *STy = dyn_cast<StructType>(SrcPTy)) { if (!STy->getNumElements()) /* Struct can be empty {} */ @@ -486,24 +486,24 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { break; } } - + SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace()); } if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy()) return 0; - + // If the pointers point into different address spaces or if they point to // values with different sizes, we can't do the transformation. if (!IC.getDataLayout() || - SrcTy->getAddressSpace() != + SrcTy->getAddressSpace() != cast<PointerType>(CI->getType())->getAddressSpace() || IC.getDataLayout()->getTypeSizeInBits(SrcPTy) != IC.getDataLayout()->getTypeSizeInBits(DestPTy)) return 0; // Okay, we are casting from one integer or pointer type to another of - // the same size. Instead of casting the pointer before + // the same size. Instead of casting the pointer before // the store, cast the value to be stored. Value *NewCast; Value *SIOp0 = SI.getOperand(0); @@ -517,12 +517,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { if (SIOp0->getType()->isPointerTy()) opcode = Instruction::PtrToInt; } - + // SIOp0 is a pointer to aggregate and this is a store to the first field, // emit a GEP to index into its first field. if (!NewGEPIndices.empty()) CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices); - + NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy, SIOp0->getName()+".c"); SI.setOperand(0, NewCast); @@ -541,7 +541,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { static bool equivalentAddressValues(Value *A, Value *B) { // Test if the values are trivially equivalent. if (A == B) return true; - + // Test if the values come form identical arithmetic instructions. // This uses isIdenticalToWhenDefined instead of isIdenticalTo because // its only used to compare two uses within the same basic block, which @@ -554,7 +554,7 @@ static bool equivalentAddressValues(Value *A, Value *B) { if (Instruction *BI = dyn_cast<Instruction>(B)) if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) return true; - + // Otherwise they may not be equivalent. return false; } @@ -585,7 +585,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { // If the RHS is an alloca with a single use, zapify the store, making the // alloca dead. if (Ptr->hasOneUse()) { - if (isa<AllocaInst>(Ptr)) + if (isa<AllocaInst>(Ptr)) return EraseInstFromFunction(SI); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { if (isa<AllocaInst>(GEP->getOperand(0))) { @@ -608,8 +608,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { ScanInsts++; continue; - } - + } + if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { // Prev store isn't volatile, and stores to the same location? if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1), @@ -621,7 +621,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { } break; } - + // If this is a load, we have to stop. However, if the loaded value is from // the pointer we're loading and is producing the pointer we're storing, // then *this* store is dead (X = load P; store X -> P). @@ -629,12 +629,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) && LI->isSimple()) return EraseInstFromFunction(SI); - + // Otherwise, this is a load from some other location. Stores before it // may not be dead. break; } - + // Don't skip over loads or things that can modify memory. if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory()) break; @@ -664,11 +664,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { if (Instruction *Res = InstCombineStoreToCast(*this, SI)) return Res; - + // If this store is the last instruction in the basic block (possibly // excepting debug info instructions), and if the block ends with an // unconditional branch, try to move it to the successor block. - BBI = &SI; + BBI = &SI; do { ++BBI; } while (isa<DbgInfoIntrinsic>(BBI) || @@ -677,7 +677,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { if (BI->isUnconditional()) if (SimplifyStoreAtEndOfBlock(SI)) return 0; // xform done! - + return 0; } @@ -691,12 +691,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { /// bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { BasicBlock *StoreBB = SI.getParent(); - + // Check to see if the successor block has exactly two incoming edges. If // so, see if the other predecessor contains a store to the same location. // if so, insert a PHI node (if needed) and move the stores down. BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); - + // Determine whether Dest has exactly two predecessors and, if so, compute // the other predecessor. pred_iterator PI = pred_begin(DestBB); @@ -708,7 +708,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { if (++PI == pred_end(DestBB)) return false; - + P = *PI; if (P != StoreBB) { if (OtherBB) @@ -728,7 +728,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); if (!OtherBr || BBI == OtherBB->begin()) return false; - + // If the other block ends in an unconditional branch, check for the 'if then // else' case. there is an instruction before the branch. StoreInst *OtherStore = 0; @@ -750,10 +750,10 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { } else { // Otherwise, the other block ended with a conditional branch. If one of the // destinations is StoreBB, then we have the if/then case. - if (OtherBr->getSuccessor(0) != StoreBB && + if (OtherBr->getSuccessor(0) != StoreBB && OtherBr->getSuccessor(1) != StoreBB) return false; - + // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an // if/then triangle. See if there is a store to the same ptr as SI that // lives in OtherBB. @@ -771,7 +771,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { BBI == OtherBB->begin()) return false; } - + // In order to eliminate the store in OtherBr, we have to // make sure nothing reads or overwrites the stored value in // StoreBB. @@ -781,7 +781,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { return false; } } - + // Insert a PHI node now if we need it. Value *MergedVal = OtherStore->getOperand(0); if (MergedVal != SI.getOperand(0)) { @@ -790,7 +790,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { PN->addIncoming(OtherStore->getOperand(0), OtherBB); MergedVal = InsertNewInstBefore(PN, DestBB->front()); } - + // Advance to a place where it is safe to insert the new store and // insert it. BBI = DestBB->getFirstInsertionPt(); @@ -800,7 +800,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { SI.getOrdering(), SI.getSynchScope()); InsertNewInstBefore(NewSI, *BBI); - NewSI->setDebugLoc(OtherStore->getDebugLoc()); + NewSI->setDebugLoc(OtherStore->getDebugLoc()); // If the two stores had the same TBAA tag, preserve it. if (MDNode *TBAATag = SI.getMetadata(LLVMContext::MD_tbaa)) @@ -808,7 +808,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { OtherStore->getMetadata(LLVMContext::MD_tbaa)))) NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag); - + // Nuke the old stores. EraseInstFromFunction(SI); EraseInstFromFunction(*OtherStore); diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 173f2bf..df73906 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -28,7 +28,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { // if this is safe. For example, the use could be in dynamically unreached // code. if (!V->hasOneUse()) return 0; - + bool MadeChange = false; // ((1 << A) >>u B) --> (1 << (A-B)) @@ -41,7 +41,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { A = IC.Builder->CreateSub(A, B); return IC.Builder->CreateShl(PowerOf2, A); } - + // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it // inexact. Similarly for <<. if (BinaryOperator *I = dyn_cast<BinaryOperator>(V)) @@ -52,12 +52,12 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { I->setOperand(0, V2); MadeChange = true; } - + if (I->getOpcode() == Instruction::LShr && !I->isExact()) { I->setIsExact(); MadeChange = true; } - + if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { I->setHasNoUnsignedWrap(); MadeChange = true; @@ -67,7 +67,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { // TODO: Lots more we could do here: // If V is a phi node, we can call this on each of its operands. // "select cond, X, 0" can simplify to "X". - + return MadeChange ? V : 0; } @@ -84,12 +84,12 @@ static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) { LHSExt = LHSExt.zext(W * 2); RHSExt = RHSExt.zext(W * 2); } - + APInt MulExt = LHSExt * RHSExt; - + if (!sign) return MulExt.ugt(APInt::getLowBitsSet(W * 2, W)); - + APInt Min = APInt::getSignedMinValue(W).sext(W * 2); APInt Max = APInt::getSignedMaxValue(W).sext(W * 2); return MulExt.slt(Min) || MulExt.sgt(Max); @@ -107,16 +107,16 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { if (match(Op1, m_AllOnes())) // X * -1 == 0 - X return BinaryOperator::CreateNeg(Op0, I.getName()); - + if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { - + // ((X << C1)*C2) == (X * (C2 << C1)) if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0)) if (SI->getOpcode() == Instruction::Shl) if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1))) return BinaryOperator::CreateMul(SI->getOperand(0), ConstantExpr::getShl(CI, ShOp)); - + const APInt &Val = CI->getValue(); if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C Constant *NewCst = ConstantInt::get(Op0->getType(), Val.logBase2()); @@ -125,7 +125,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { if (I.hasNoUnsignedWrap()) Shl->setHasNoUnsignedWrap(); return Shl; } - + // Canonicalize (X+C1)*CI -> X*CI+C1*CI. { Value *X; ConstantInt *C1; if (Op0->hasOneUse() && @@ -158,9 +158,9 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { } } } - + // Simplify mul instructions with a constant RHS. - if (isa<Constant>(Op1)) { + if (isa<Constant>(Op1)) { // Try to fold constant mul into select arguments. if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) if (Instruction *R = FoldOpIntoSelect(I, SI)) @@ -181,7 +181,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { Value *Op1C = Op1; BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0); if (!BO || - (BO->getOpcode() != Instruction::UDiv && + (BO->getOpcode() != Instruction::UDiv && BO->getOpcode() != Instruction::SDiv)) { Op1C = Op0; BO = dyn_cast<BinaryOperator>(Op1); @@ -227,14 +227,14 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { if (match(Op1, m_Shl(m_One(), m_Value(Y)))) return BinaryOperator::CreateShl(Op0, Y); } - + // If one of the operands of the multiply is a cast from a boolean value, then // we know the bool is either zero or one, so this is a 'masking' multiply. // X * Y (where Y is 0 or 1) -> X & (0-Y) if (!I.getType()->isVectorTy()) { // -2 is "-1 << 1" so it is all bits set except the low one. APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true); - + Value *BoolCast = 0, *OtherOp = 0; if (MaskedValueIsZero(Op0, Negative2)) BoolCast = Op0, OtherOp = Op1; @@ -280,7 +280,7 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) { return; if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra()) return; - + ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(0)); if (CFP && CFP->isExactlyValue(0.5)) { Y = I->getOperand(1); @@ -289,14 +289,14 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) { CFP = dyn_cast<ConstantFP>(I->getOperand(1)); if (CFP && CFP->isExactlyValue(0.5)) Y = I->getOperand(0); -} +} /// Helper function of InstCombiner::visitFMul(BinaryOperator(). It returns /// true iff the given value is FMul or FDiv with one and only one operand /// being a normal constant (i.e. not Zero/NaN/Infinity). static bool isFMulOrFDivWithConstant(Value *V) { Instruction *I = dyn_cast<Instruction>(V); - if (!I || (I->getOpcode() != Instruction::FMul && + if (!I || (I->getOpcode() != Instruction::FMul && I->getOpcode() != Instruction::FDiv)) return false; @@ -318,10 +318,10 @@ static bool isNormalFp(const ConstantFP *C) { /// foldFMulConst() is a helper routine of InstCombiner::visitFMul(). /// The input \p FMulOrDiv is a FMul/FDiv with one and only one operand /// being a constant (i.e. isFMulOrFDivWithConstant(FMulOrDiv) == true). -/// This function is to simplify "FMulOrDiv * C" and returns the +/// This function is to simplify "FMulOrDiv * C" and returns the /// resulting expression. Note that this function could return NULL in /// case the constants cannot be folded into a normal floating-point. -/// +/// Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, ConstantFP *C, Instruction *InsertBefore) { assert(isFMulOrFDivWithConstant(FMulOrDiv) && "V is invalid"); @@ -351,7 +351,7 @@ Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, ConstantFP *C, if (isNormalFp(F)) { R = BinaryOperator::CreateFMul(Opnd0, F); } else { - // (X / C1) * C => X / (C1/C) + // (X / C1) * C => X / (C1/C) Constant *F = ConstantExpr::getFDiv(C1, C); if (isNormalFp(cast<ConstantFP>(F))) R = BinaryOperator::CreateFDiv(Opnd0, F); @@ -415,13 +415,13 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) { if (C0) { std::swap(C0, C1); std::swap(Opnd0, Opnd1); - Swap = true; + Swap = true; } if (C1 && C1->getValueAPF().isNormal() && isFMulOrFDivWithConstant(Opnd0)) { Value *M1 = ConstantExpr::getFMul(C1, C); - Value *M0 = isNormalFp(cast<ConstantFP>(M1)) ? + Value *M0 = isNormalFp(cast<ConstantFP>(M1)) ? foldFMulConst(cast<Instruction>(Opnd0), C, &I) : 0; if (M0 && M1) { @@ -495,7 +495,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) { } // (X*Y) * X => (X*X) * Y where Y != X - // The purpose is two-fold: + // The purpose is two-fold: // 1) to form a power expression (of X). // 2) potentially shorten the critical path: After transformation, the // latency of the instruction Y is amortized by the expression of X*X, @@ -537,7 +537,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) { /// instruction. bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) { SelectInst *SI = cast<SelectInst>(I.getOperand(1)); - + // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y int NonNullOperand = -1; if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1))) @@ -547,36 +547,36 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) { if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2))) if (ST->isNullValue()) NonNullOperand = 1; - + if (NonNullOperand == -1) return false; - + Value *SelectCond = SI->getOperand(0); - + // Change the div/rem to use 'Y' instead of the select. I.setOperand(1, SI->getOperand(NonNullOperand)); - + // Okay, we know we replace the operand of the div/rem with 'Y' with no // problem. However, the select, or the condition of the select may have // multiple uses. Based on our knowledge that the operand must be non-zero, // propagate the known value for the select into other uses of it, and // propagate a known value of the condition into its other users. - + // If the select and condition only have a single use, don't bother with this, // early exit. if (SI->use_empty() && SelectCond->hasOneUse()) return true; - + // Scan the current block backward, looking for other uses of SI. BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin(); - + while (BBI != BBFront) { --BBI; // If we found a call to a function, we can't assume it will return, so // information from below it cannot be propagated above it. if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI)) break; - + // Replace uses of the select or its condition with the known values. for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); I != E; ++I) { @@ -589,17 +589,17 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) { Worklist.Add(BBI); } } - + // If we past the instruction, quit looking for it. if (&*BBI == SI) SI = 0; if (&*BBI == SelectCond) SelectCond = 0; - + // If we ran out of things to eliminate, break out of the loop. if (SelectCond == 0 && SI == 0) break; - + } return true; } @@ -617,7 +617,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { I.setOperand(1, V); return &I; } - + // Handle cases involving: [su]div X, (select Cond, Y, Z) // This does not apply for fdiv. if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I)) @@ -683,16 +683,16 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { // Handle the integer div common cases if (Instruction *Common = commonIDivTransforms(I)) return Common; - - { + + { // X udiv 2^C -> X >> C // Check to see if this is an unsigned division with an exact power of 2, // if so, convert to a right shift. const APInt *C; if (match(Op1, m_Power2(C))) { BinaryOperator *LShr = - BinaryOperator::CreateLShr(Op0, - ConstantInt::get(Op0->getType(), + BinaryOperator::CreateLShr(Op0, + ConstantInt::get(Op0->getType(), C->logBase2())); if (I.isExact()) LShr->setIsExact(); return LShr; @@ -732,7 +732,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { return BinaryOperator::CreateLShr(Op0, N); } } - + // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2) // where C1&C2 are powers of two. { Value *Cond; const APInt *C1, *C2; @@ -740,11 +740,11 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { // Construct the "on true" case of the select Value *TSI = Builder->CreateLShr(Op0, C1->logBase2(), Op1->getName()+".t", I.isExact()); - + // Construct the "on false" case of the select Value *FSI = Builder->CreateLShr(Op0, C2->logBase2(), Op1->getName()+".f", I.isExact()); - + // construct the select instruction and return it. return SelectInst::Create(Cond, TSI, FSI); } @@ -799,7 +799,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set return BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); } - + if (match(Op1, m_Shl(m_Power2(), m_Value()))) { // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) // Safe because the only negative value (1 << Y) can take on is @@ -809,13 +809,13 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { } } } - + return 0; } /// CvtFDivConstToReciprocal tries to convert X/C into X*1/C if C not a special /// FP value and: -/// 1) 1/C is exact, or +/// 1) 1/C is exact, or /// 2) reciprocal is allowed. /// If the convertion was successful, the simplified expression "X * 1/C" is /// returned; otherwise, NULL is returned. @@ -826,7 +826,7 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend, const APFloat &FpVal = Divisor->getValueAPF(); APFloat Reciprocal(FpVal.getSemantics()); bool Cvt = FpVal.getExactInverse(&Reciprocal); - + if (!Cvt && AllowReciprocal && FpVal.isNormal()) { Reciprocal = APFloat(FpVal.getSemantics(), 1.0f); (void)Reciprocal.divide(FpVal, APFloat::rmNearestTiesToEven); @@ -870,10 +870,10 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { Constant *C = ConstantExpr::getFMul(C1, C2); const APFloat &F = cast<ConstantFP>(C)->getValueAPF(); if (F.isNormal() && !F.isDenormal()) { - Res = CvtFDivConstToReciprocal(X, cast<ConstantFP>(C), + Res = CvtFDivConstToReciprocal(X, cast<ConstantFP>(C), AllowReciprocal); if (!Res) - Res = BinaryOperator::CreateFDiv(X, C); + Res = BinaryOperator::CreateFDiv(X, C); } } @@ -911,7 +911,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { if (Fold) { const APFloat &FoldC = cast<ConstantFP>(Fold)->getValueAPF(); if (FoldC.isNormal() && !FoldC.isDenormal()) { - Instruction *R = CreateDiv ? + Instruction *R = CreateDiv ? BinaryOperator::CreateFDiv(Fold, X) : BinaryOperator::CreateFMul(X, Fold); R->setFastMathFlags(I.getFastMathFlags()); @@ -997,7 +997,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { if (Instruction *common = commonIRemTransforms(I)) return common; - + // X urem C^2 -> X and C-1 { const APInt *C; if (match(Op1, m_Power2(C))) @@ -1005,7 +1005,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { ConstantInt::get(I.getType(), *C-1)); } - // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1) + // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1) if (match(Op1, m_Shl(m_Power2(), m_Value()))) { Constant *N1 = Constant::getAllOnesValue(I.getType()); Value *Add = Builder->CreateAdd(Op1, N1); @@ -1041,7 +1041,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { // Handle the integer rem common cases if (Instruction *Common = commonIRemTransforms(I)) return Common; - + if (Value *RHSNeg = dyn_castNegVal(Op1)) if (!isa<Constant>(RHSNeg) || (isa<ConstantInt>(RHSNeg) && diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp index b0a998c..bd14e81 100644 --- a/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -27,10 +27,10 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { unsigned Opc = FirstInst->getOpcode(); Value *LHSVal = FirstInst->getOperand(0); Value *RHSVal = FirstInst->getOperand(1); - + Type *LHSType = LHSVal->getType(); Type *RHSType = RHSVal->getType(); - + bool isNUW = false, isNSW = false, isExact = false; if (OverflowingBinaryOperator *BO = dyn_cast<OverflowingBinaryOperator>(FirstInst)) { @@ -39,7 +39,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { } else if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(FirstInst)) isExact = PEO->isExact(); - + // Scan to see if all operands are the same opcode, and all have one use. for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) { Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i)); @@ -54,14 +54,14 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { if (CmpInst *CI = dyn_cast<CmpInst>(I)) if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate()) return 0; - + if (isNUW) isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap(); if (isNSW) isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); if (isExact) isExact = cast<PossiblyExactOperator>(I)->isExact(); - + // Keep track of which operand needs a phi node. if (I->getOperand(0) != LHSVal) LHSVal = 0; if (I->getOperand(1) != RHSVal) RHSVal = 0; @@ -73,9 +73,9 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { // bad when the PHIs are in the header of a loop. if (!LHSVal && !RHSVal) return 0; - + // Otherwise, this is safe to transform! - + Value *InLHS = FirstInst->getOperand(0); Value *InRHS = FirstInst->getOperand(1); PHINode *NewLHS = 0, *NewRHS = 0; @@ -86,7 +86,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { InsertNewInstBefore(NewLHS, PN); LHSVal = NewLHS; } - + if (RHSVal == 0) { NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(), FirstInst->getOperand(1)->getName() + ".pn"); @@ -94,7 +94,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { InsertNewInstBefore(NewRHS, PN); RHSVal = NewRHS; } - + // Add all operands to the new PHIs. if (NewLHS || NewRHS) { for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { @@ -109,7 +109,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { } } } - + if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) { CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), LHSVal, RHSVal); @@ -129,8 +129,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0)); - - SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(), + + SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(), FirstInst->op_end()); // This is true if all GEP bases are allocas and if all indices into them are // constants. @@ -140,9 +140,9 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { // more than one phi, which leads to higher register pressure. This is // especially bad when the PHIs are in the header of a loop. bool NeededPhi = false; - + bool AllInBounds = true; - + // Scan to see if all operands are the same opcode, and all have one use. for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) { GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i)); @@ -151,18 +151,18 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { return 0; AllInBounds &= GEP->isInBounds(); - + // Keep track of whether or not all GEPs are of alloca pointers. if (AllBasePointersAreAllocas && (!isa<AllocaInst>(GEP->getOperand(0)) || !GEP->hasAllConstantIndices())) AllBasePointersAreAllocas = false; - + // Compare the operand lists. for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) { if (FirstInst->getOperand(op) == GEP->getOperand(op)) continue; - + // Don't merge two GEPs when two operands differ (introducing phi nodes) // if one of the PHIs has a constant for the index. The index may be // substantially cheaper to compute for the constants, so making it a @@ -171,7 +171,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { if (isa<ConstantInt>(FirstInst->getOperand(op)) || isa<ConstantInt>(GEP->getOperand(op))) return 0; - + if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType()) return 0; @@ -186,7 +186,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { NeededPhi = true; } } - + // If all of the base pointers of the PHI'd GEPs are from allocas, don't // bother doing this transformation. At best, this will just save a bit of // offset calculation, but all the predecessors will have to materialize the @@ -195,11 +195,11 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { // which can usually all be folded into the load. if (AllBasePointersAreAllocas) return 0; - + // Otherwise, this is safe to transform. Insert PHI nodes for each operand // that is variable. SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size()); - + bool HasAnyPHIs = false; for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) { if (FixedOperands[i]) continue; // operand doesn't need a phi. @@ -207,28 +207,28 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { PHINode *NewPN = PHINode::Create(FirstOp->getType(), e, FirstOp->getName()+".pn"); InsertNewInstBefore(NewPN, PN); - + NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0)); OperandPhis[i] = NewPN; FixedOperands[i] = NewPN; HasAnyPHIs = true; } - + // Add all operands to the new PHIs. if (HasAnyPHIs) { for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i)); BasicBlock *InBB = PN.getIncomingBlock(i); - + for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op) if (PHINode *OpPhi = OperandPhis[op]) OpPhi->addIncoming(InGEP->getOperand(op), InBB); } } - + Value *Base = FixedOperands[0]; - GetElementPtrInst *NewGEP = + GetElementPtrInst *NewGEP = GetElementPtrInst::Create(Base, makeArrayRef(FixedOperands).slice(1)); if (AllInBounds) NewGEP->setIsInBounds(); NewGEP->setDebugLoc(FirstInst->getDebugLoc()); @@ -246,11 +246,11 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { /// to a register. static bool isSafeAndProfitableToSinkLoad(LoadInst *L) { BasicBlock::iterator BBI = L, E = L->getParent()->end(); - + for (++BBI; BBI != E; ++BBI) if (BBI->mayWriteToMemory()) return false; - + // Check for non-address taken alloca. If not address-taken already, it isn't // profitable to do this xform. if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) { @@ -266,11 +266,11 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) { isAddressTaken = true; break; } - + if (!isAddressTaken && AI->isStaticAlloca()) return false; } - + // If this load is a load from a GEP with a constant offset from an alloca, // then we don't want to sink it. In its present form, it will be // load [constant stack offset]. Sinking it will cause us to have to @@ -280,7 +280,7 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) { if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0))) if (AI->isStaticAlloca() && GEP->hasAllConstantIndices()) return false; - + return true; } @@ -300,41 +300,41 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { bool isVolatile = FirstLI->isVolatile(); unsigned LoadAlignment = FirstLI->getAlignment(); unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace(); - + // We can't sink the load if the loaded value could be modified between the // load and the PHI. if (FirstLI->getParent() != PN.getIncomingBlock(0) || !isSafeAndProfitableToSinkLoad(FirstLI)) return 0; - + // If the PHI is of volatile loads and the load block has multiple // successors, sinking it would remove a load of the volatile value from // the path through the other successor. - if (isVolatile && + if (isVolatile && FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1) return 0; - + // Check to see if all arguments are the same operation. for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i)); if (!LI || !LI->hasOneUse()) return 0; - - // We can't sink the load if the loaded value could be modified between + + // We can't sink the load if the loaded value could be modified between // the load and the PHI. if (LI->isVolatile() != isVolatile || LI->getParent() != PN.getIncomingBlock(i) || LI->getPointerAddressSpace() != LoadAddrSpace || !isSafeAndProfitableToSinkLoad(LI)) return 0; - + // If some of the loads have an alignment specified but not all of them, // we can't do the transformation. if ((LoadAlignment != 0) != (LI->getAlignment() != 0)) return 0; - + LoadAlignment = std::min(LoadAlignment, LI->getAlignment()); - + // If the PHI is of volatile loads and the load block has multiple // successors, sinking it would remove a load of the volatile value from // the path through the other successor. @@ -342,16 +342,16 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { LI->getParent()->getTerminator()->getNumSuccessors() != 1) return 0; } - + // Okay, they are all the same operation. Create a new PHI node of the // correct type, and PHI together all of the LHS's of the instructions. PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(), PN.getNumIncomingValues(), PN.getName()+".in"); - + Value *InVal = FirstLI->getOperand(0); NewPN->addIncoming(InVal, PN.getIncomingBlock(0)); - + // Add all operands to the new PHI. for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0); @@ -359,7 +359,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { InVal = 0; NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i)); } - + Value *PhiVal; if (InVal) { // The new PHI unions all of the same values together. This is really @@ -370,14 +370,14 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { InsertNewInstBefore(NewPN, PN); PhiVal = NewPN; } - + // If this was a volatile load that we are merging, make sure to loop through // and mark all the input loads as non-volatile. If we don't do this, we will // insert a new volatile load and the old ones will not be deletable. if (isVolatile) for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false); - + LoadInst *NewLI = new LoadInst(PhiVal, "", isVolatile, LoadAlignment); NewLI->setDebugLoc(FirstLI->getDebugLoc()); return NewLI; @@ -395,7 +395,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { return FoldPHIArgGEPIntoPHI(PN); if (isa<LoadInst>(FirstInst)) return FoldPHIArgLoadIntoPHI(PN); - + // Scan the instruction, looking for input operations that can be folded away. // If all input operands to the phi are the same instruction (e.g. a cast from // the same type or "+42") we can pull the operation through the PHI, reducing @@ -403,7 +403,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { Constant *ConstantOp = 0; Type *CastSrcTy = 0; bool isNUW = false, isNSW = false, isExact = false; - + if (isa<CastInst>(FirstInst)) { CastSrcTy = FirstInst->getOperand(0)->getType(); @@ -414,12 +414,12 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { return 0; } } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) { - // Can fold binop, compare or shift here if the RHS is a constant, + // Can fold binop, compare or shift here if the RHS is a constant, // otherwise call FoldPHIArgBinOpIntoPHI. ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1)); if (ConstantOp == 0) return FoldPHIArgBinOpIntoPHI(PN); - + if (OverflowingBinaryOperator *BO = dyn_cast<OverflowingBinaryOperator>(FirstInst)) { isNUW = BO->hasNoUnsignedWrap(); @@ -442,7 +442,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { } else if (I->getOperand(1) != ConstantOp) { return 0; } - + if (isNUW) isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap(); if (isNSW) @@ -486,7 +486,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { NewCI->setDebugLoc(FirstInst->getDebugLoc()); return NewCI; } - + if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) { BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp); if (isNUW) BinOp->setHasNoUnsignedWrap(); @@ -495,7 +495,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { BinOp->setDebugLoc(FirstInst->getDebugLoc()); return BinOp; } - + CmpInst *CIOp = cast<CmpInst>(FirstInst); CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), PhiVal, ConstantOp); @@ -513,7 +513,7 @@ static bool DeadPHICycle(PHINode *PN, // Remember this node, and if we find the cycle, return. if (!PotentiallyDeadPHIs.insert(PN)) return true; - + // Don't scan crazily complex things. if (PotentiallyDeadPHIs.size() == 16) return false; @@ -527,16 +527,16 @@ static bool DeadPHICycle(PHINode *PN, /// PHIsEqualValue - Return true if this phi node is always equal to /// NonPhiInVal. This happens with mutually cyclic phi nodes like: /// z = some value; x = phi (y, z); y = phi (x, z) -static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, +static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) { // See if we already saw this PHI node. if (!ValueEqualPHIs.insert(PN)) return true; - + // Don't scan crazily complex things. if (ValueEqualPHIs.size() == 16) return false; - + // Scan the operands to see if they are either phi nodes or are equal to // the value. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { @@ -547,7 +547,7 @@ static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, } else if (Op != NonPhiInVal) return false; } - + return true; } @@ -557,10 +557,10 @@ struct PHIUsageRecord { unsigned PHIId; // The ID # of the PHI (something determinstic to sort on) unsigned Shift; // The amount shifted. Instruction *Inst; // The trunc instruction. - + PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User) : PHIId(pn), Shift(Sh), Inst(User) {} - + bool operator<(const PHIUsageRecord &RHS) const { if (PHIId < RHS.PHIId) return true; if (PHIId > RHS.PHIId) return false; @@ -570,15 +570,15 @@ struct PHIUsageRecord { RHS.Inst->getType()->getPrimitiveSizeInBits(); } }; - + struct LoweredPHIRecord { PHINode *PN; // The PHI that was lowered. unsigned Shift; // The amount shifted. unsigned Width; // The width extracted. - + LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty) : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {} - + // Ctor form used by DenseMap. LoweredPHIRecord(PHINode *pn, unsigned Sh) : PN(pn), Shift(Sh), Width(0) {} @@ -621,20 +621,20 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { // PHIUsers - Keep track of all of the truncated values extracted from a set // of PHIs, along with their offset. These are the things we want to rewrite. SmallVector<PHIUsageRecord, 16> PHIUsers; - + // PHIs are often mutually cyclic, so we keep track of a whole set of PHI // nodes which are extracted from. PHIsToSlice is a set we use to avoid // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to // check the uses of (to ensure they are all extracts). SmallVector<PHINode*, 8> PHIsToSlice; SmallPtrSet<PHINode*, 8> PHIsInspected; - + PHIsToSlice.push_back(&FirstPhi); PHIsInspected.insert(&FirstPhi); - + for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) { PHINode *PN = PHIsToSlice[PHIId]; - + // Scan the input list of the PHI. If any input is an invoke, and if the // input is defined in the predecessor, then we won't be split the critical // edge which is required to insert a truncate. Because of this, we have to @@ -644,85 +644,85 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { if (II == 0) continue; if (II->getParent() != PN->getIncomingBlock(i)) continue; - + // If we have a phi, and if it's directly in the predecessor, then we have // a critical edge where we need to put the truncate. Since we can't // split the edge in instcombine, we have to bail out. return 0; } - - + + for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); - + // If the user is a PHI, inspect its uses recursively. if (PHINode *UserPN = dyn_cast<PHINode>(User)) { if (PHIsInspected.insert(UserPN)) PHIsToSlice.push_back(UserPN); continue; } - + // Truncates are always ok. if (isa<TruncInst>(User)) { PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User)); continue; } - + // Otherwise it must be a lshr which can only be used by one trunc. if (User->getOpcode() != Instruction::LShr || !User->hasOneUse() || !isa<TruncInst>(User->use_back()) || !isa<ConstantInt>(User->getOperand(1))) return 0; - + unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue(); PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back())); } } - + // If we have no users, they must be all self uses, just nuke the PHI. if (PHIUsers.empty()) return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType())); - + // If this phi node is transformable, create new PHIs for all the pieces // extracted out of it. First, sort the users by their offset and size. array_pod_sort(PHIUsers.begin(), PHIUsers.end()); - + DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n'; for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n'; ); - + // PredValues - This is a temporary used when rewriting PHI nodes. It is // hoisted out here to avoid construction/destruction thrashing. DenseMap<BasicBlock*, Value*> PredValues; - + // ExtractedVals - Each new PHI we introduce is saved here so we don't // introduce redundant PHIs. DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals; - + for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) { unsigned PHIId = PHIUsers[UserI].PHIId; PHINode *PN = PHIsToSlice[PHIId]; unsigned Offset = PHIUsers[UserI].Shift; Type *Ty = PHIUsers[UserI].Inst->getType(); - + PHINode *EltPHI; - + // If we've already lowered a user like this, reuse the previously lowered // value. if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) { - + // Otherwise, Create the new PHI node for this user. EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(), PN->getName()+".off"+Twine(Offset), PN); assert(EltPHI->getType() != PN->getType() && "Truncate didn't shrink phi?"); - + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *Pred = PN->getIncomingBlock(i); Value *&PredVal = PredValues[Pred]; - + // If we already have a value for this predecessor, reuse it. if (PredVal) { EltPHI->addIncoming(PredVal, Pred); @@ -736,7 +736,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { EltPHI->addIncoming(PredVal, Pred); continue; } - + if (PHINode *InPHI = dyn_cast<PHINode>(PN)) { // If the incoming value was a PHI, and if it was one of the PHIs we // already rewrote it, just use the lowered value. @@ -746,7 +746,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { continue; } } - + // Otherwise, do an extract in the predecessor. Builder->SetInsertPoint(Pred, Pred->getTerminator()); Value *Res = InVal; @@ -756,7 +756,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { Res = Builder->CreateTrunc(Res, Ty, "extract.t"); PredVal = Res; EltPHI->addIncoming(Res, Pred); - + // If the incoming value was a PHI, and if it was one of the PHIs we are // rewriting, we will ultimately delete the code we inserted. This // means we need to revisit that PHI to make sure we extract out the @@ -765,22 +765,22 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { if (PHIsInspected.count(OldInVal)) { unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(), OldInVal)-PHIsToSlice.begin(); - PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset, + PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res))); ++UserE; } } PredValues.clear(); - + DEBUG(errs() << " Made element PHI for offset " << Offset << ": " << *EltPHI << '\n'); ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI; } - + // Replace the use of this piece with the PHI node. ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI); } - + // Replace all the remaining uses of the PHI nodes (self uses and the lshrs) // with undefs. Value *Undef = UndefValue::get(FirstPhi.getType()); @@ -818,7 +818,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { if (DeadPHICycle(PU, PotentiallyDeadPHIs)) return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType())); } - + // If this phi has a single use, and if that use just computes a value for // the next iteration of a loop, delete the phi. This occurs with unused // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this @@ -847,7 +847,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { if (InValNo != NumIncomingVals) { Value *NonPhiInVal = PN.getIncomingValue(InValNo); - + // Scan the rest of the operands to see if there are any conflicts, if so // there is no need to recursively scan other phis. for (++InValNo; InValNo != NumIncomingVals; ++InValNo) { @@ -855,7 +855,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal)) break; } - + // If we scanned over all operands, then we have one unique value plus // phi values. Scan PHI nodes to see if they all merge in each other or // the value. @@ -899,6 +899,6 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits())) if (Instruction *Res = SliceUpIllegalIntegerPHI(PN)) return Res; - + return 0; } diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp index a262d71..2defe63 100644 --- a/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -127,13 +127,14 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI, // If this is a non-volatile load or a cast from the same type, // merge. if (TI->isCast()) { - if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType()) + Type *FIOpndTy = FI->getOperand(0)->getType(); + if (TI->getOperand(0)->getType() != FIOpndTy) return 0; // The select condition may be a vector. We may only change the operand // type if the vector width remains the same (and matches the condition). Type *CondTy = SI.getCondition()->getType(); - if (CondTy->isVectorTy() && CondTy->getVectorNumElements() != - FI->getOperand(0)->getType()->getVectorNumElements()) + if (CondTy->isVectorTy() && (!FIOpndTy->isVectorTy() || + CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements())) return 0; } else { return 0; // unknown unary op. @@ -349,6 +350,68 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, return 0; } +/// foldSelectICmpAndOr - We want to turn: +/// (select (icmp eq (and X, C1), 0), Y, (or Y, C2)) +/// into: +/// (or (shl (and X, C1), C3), y) +/// iff: +/// C1 and C2 are both powers of 2 +/// where: +/// C3 = Log(C2) - Log(C1) +/// +/// This transform handles cases where: +/// 1. The icmp predicate is inverted +/// 2. The select operands are reversed +/// 3. The magnitude of C2 and C1 are flipped +static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal, + Value *FalseVal, + InstCombiner::BuilderTy *Builder) { + const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition()); + if (!IC || !IC->isEquality()) + return 0; + + Value *CmpLHS = IC->getOperand(0); + Value *CmpRHS = IC->getOperand(1); + + if (!match(CmpRHS, m_Zero())) + return 0; + + Value *X; + const APInt *C1; + if (!match(CmpLHS, m_And(m_Value(X), m_Power2(C1)))) + return 0; + + const APInt *C2; + bool OrOnTrueVal = false; + bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2))); + if (!OrOnFalseVal) + OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2))); + + if (!OrOnFalseVal && !OrOnTrueVal) + return 0; + + Value *V = CmpLHS; + Value *Y = OrOnFalseVal ? TrueVal : FalseVal; + + unsigned C1Log = C1->logBase2(); + unsigned C2Log = C2->logBase2(); + if (C2Log > C1Log) { + V = Builder->CreateZExtOrTrunc(V, Y->getType()); + V = Builder->CreateShl(V, C2Log - C1Log); + } else if (C1Log > C2Log) { + V = Builder->CreateLShr(V, C1Log - C2Log); + V = Builder->CreateZExtOrTrunc(V, Y->getType()); + } else + V = Builder->CreateZExtOrTrunc(V, Y->getType()); + + ICmpInst::Predicate Pred = IC->getPredicate(); + if ((Pred == ICmpInst::ICMP_NE && OrOnFalseVal) || + (Pred == ICmpInst::ICMP_EQ && OrOnTrueVal)) + V = Builder->CreateXor(V, *C2); + + return Builder->CreateOr(V, Y); +} + /// visitSelectInstWithICmp - Visit a SelectInst that has an /// ICmpInst as its first operand. /// @@ -520,6 +583,9 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, } } + if (Value *V = foldSelectICmpAndOr(SI, TrueVal, FalseVal, Builder)) + return ReplaceInstUsesWith(SI, V); + return Changed ? &SI : 0; } @@ -675,7 +741,8 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // Change: A = select B, false, C --> A = and !B, C Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName()); return BinaryOperator::CreateAnd(NotCond, FalseVal); - } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) { + } + if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) { if (C->getZExtValue() == false) { // Change: A = select B, C, false --> A = and B, C return BinaryOperator::CreateAnd(CondVal, TrueVal); @@ -689,14 +756,14 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // select a, a, b -> a|b if (CondVal == TrueVal) return BinaryOperator::CreateOr(CondVal, FalseVal); - else if (CondVal == FalseVal) + if (CondVal == FalseVal) return BinaryOperator::CreateAnd(CondVal, TrueVal); // select a, ~a, b -> (~a)&b // select a, b, ~a -> (~a)|b if (match(TrueVal, m_Not(m_Specific(CondVal)))) return BinaryOperator::CreateAnd(TrueVal, FalseVal); - else if (match(FalseVal, m_Not(m_Specific(CondVal)))) + if (match(FalseVal, m_Not(m_Specific(CondVal)))) return BinaryOperator::CreateOr(TrueVal, FalseVal); } @@ -837,7 +904,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { Value *NewFalseOp = NegVal; if (AddOp != TI) std::swap(NewTrueOp, NewFalseOp); - Value *NewSel = + Value *NewSel = Builder->CreateSelect(CondVal, NewTrueOp, NewFalseOp, SI.getName() + ".p"); @@ -861,7 +928,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { Value *LHS, *RHS, *LHS2, *RHS2; if (SelectPatternFlavor SPF = MatchSelectPattern(&SI, LHS, RHS)) { if (SelectPatternFlavor SPF2 = MatchSelectPattern(LHS, LHS2, RHS2)) - if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2, + if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2, SI, SPF, RHS)) return R; if (SelectPatternFlavor SPF2 = MatchSelectPattern(RHS, LHS2, RHS2)) diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 4f71db1..de8a3ac 100644 --- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -105,6 +105,75 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) { return 0; } +// If we have a PHI node with a vector type that has only 2 uses: feed +// itself and be an operand of extractelemnt at a constant location, +// try to replace the PHI of the vector type with a PHI of a scalar type +Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) { + // Verify that the PHI node has exactly 2 uses. Otherwise return NULL. + if (!PN->hasNUses(2)) + return NULL; + + // If so, it's known at this point that one operand is PHI and the other is + // an extractelement node. Find the PHI user that is not the extractelement + // node. + Value::use_iterator iu = PN->use_begin(); + Instruction *PHIUser = dyn_cast<Instruction>(*iu); + if (PHIUser == cast<Instruction>(&EI)) + PHIUser = cast<Instruction>(*(++iu)); + + // Verify that this PHI user has one use, which is the PHI itself, + // and that it is a binary operation which is cheap to scalarize. + // otherwise return NULL. + if (!PHIUser->hasOneUse() || !(PHIUser->use_back() == PN) || + !(isa<BinaryOperator>(PHIUser)) || + !CheapToScalarize(PHIUser, true)) + return NULL; + + // Create a scalar PHI node that will replace the vector PHI node + // just before the current PHI node. + PHINode * scalarPHI = cast<PHINode>( + InsertNewInstWith(PHINode::Create(EI.getType(), + PN->getNumIncomingValues(), ""), *PN)); + // Scalarize each PHI operand. + for (unsigned i=0; i < PN->getNumIncomingValues(); i++) { + Value *PHIInVal = PN->getIncomingValue(i); + BasicBlock *inBB = PN->getIncomingBlock(i); + Value *Elt = EI.getIndexOperand(); + // If the operand is the PHI induction variable: + if (PHIInVal == PHIUser) { + // Scalarize the binary operation. Its first operand is the + // scalar PHI and the second operand is extracted from the other + // vector operand. + BinaryOperator *B0 = cast<BinaryOperator>(PHIUser); + unsigned opId = (B0->getOperand(0) == PN) ? 1: 0; + Value *Op = Builder->CreateExtractElement( + B0->getOperand(opId), Elt, B0->getOperand(opId)->getName()+".Elt"); + Value *newPHIUser = InsertNewInstWith( + BinaryOperator::Create(B0->getOpcode(), scalarPHI,Op), + *B0); + scalarPHI->addIncoming(newPHIUser, inBB); + } else { + // Scalarize PHI input: + Instruction *newEI = + ExtractElementInst::Create(PHIInVal, Elt, ""); + // Insert the new instruction into the predecessor basic block. + Instruction *pos = dyn_cast<Instruction>(PHIInVal); + BasicBlock::iterator InsertPos; + if (pos && !isa<PHINode>(pos)) { + InsertPos = pos; + ++InsertPos; + } else { + InsertPos = inBB->getFirstInsertionPt(); + } + + InsertNewInstWith(newEI, *InsertPos); + + scalarPHI->addIncoming(newEI, inBB); + } + } + return ReplaceInstUsesWith(EI, scalarPHI); +} + Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { // If vector val is constant with all elements the same, replace EI with // that element. We handle a known element # below. @@ -149,6 +218,14 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal)) return new BitCastInst(Elt, EI.getType()); } + + // If there's a vector PHI feeding a scalar use through this extractelement + // instruction, try to scalarize the PHI. + if (PHINode *PN = dyn_cast<PHINode>(EI.getOperand(0))) { + Instruction *scalarPHI = scalarizePHI(EI, PN); + if (scalarPHI) + return (scalarPHI); + } } if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) { @@ -201,10 +278,10 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { } else if (CastInst *CI = dyn_cast<CastInst>(I)) { // Canonicalize extractelement(cast) -> cast(extractelement) // bitcasts can change the number of vector elements and they cost nothing - if (CI->hasOneUse() && EI.hasOneUse() && - (CI->getOpcode() != Instruction::BitCast)) { + if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) { Value *EE = Builder->CreateExtractElement(CI->getOperand(0), EI.getIndexOperand()); + Worklist.AddValue(EE); return CastInst::Create(CI->getOpcode(), EE, EI.getType()); } } @@ -336,6 +413,10 @@ static Value *CollectShuffleElements(Value *V, SmallVectorImpl<Constant*> &Mask, if (VecOp == RHS) { Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS); + // Update Mask to reflect that `ScalarOp' has been inserted at + // position `InsertedIdx' within the vector returned by IEI. + Mask[InsertedIdx % NumElts] = Mask[ExtractedIdx]; + // Everything but the extracted element is replaced with the RHS. for (unsigned i = 0; i != NumElts; ++i) { if (i != InsertedIdx) diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 92b42ee..623c470 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -71,7 +71,7 @@ static const char *kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals"; static const char *kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; static const char *kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; -static const char *kAsanInitName = "__asan_init_v2"; +static const char *kAsanInitName = "__asan_init_v3"; static const char *kAsanHandleNoReturnName = "__asan_handle_no_return"; static const char *kAsanMappingOffsetName = "__asan_mapping_offset"; static const char *kAsanMappingScaleName = "__asan_mapping_scale"; @@ -274,8 +274,6 @@ struct AddressSanitizer : public FunctionPass { Instruction *InsertBefore, bool IsWrite); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F); - void createInitializerPoisonCalls(Module &M, - Value *FirstAddr, Value *LastAddr); bool maybeInsertAsanInitAtFunctionEntry(Function &F); void emitShadowMapping(Module &M, IRBuilder<> &IRB) const; virtual bool doInitialization(Module &M); @@ -333,8 +331,7 @@ class AddressSanitizerModule : public ModulePass { void initializeCallbacks(Module &M); bool ShouldInstrumentGlobal(GlobalVariable *G); - void createInitializerPoisonCalls(Module &M, Value *FirstAddr, - Value *LastAddr); + void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); size_t RedzoneSize() const { return RedzoneSizeForScale(Mapping.Scale); } @@ -753,7 +750,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, } void AddressSanitizerModule::createInitializerPoisonCalls( - Module &M, Value *FirstAddr, Value *LastAddr) { + Module &M, GlobalValue *ModuleName) { // We do all of our poisoning and unpoisoning within _GLOBAL__I_a. Function *GlobalInit = M.getFunction("_GLOBAL__I_a"); // If that function is not present, this TU contains no globals, or they have @@ -765,7 +762,8 @@ void AddressSanitizerModule::createInitializerPoisonCalls( IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt()); // Add a call to poison all external globals before the given function starts. - IRB.CreateCall2(AsanPoisonGlobals, FirstAddr, LastAddr); + Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); + IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); // Add calls to unpoison all globals before each return instruction. for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end(); @@ -839,7 +837,7 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Declare our poisoning and unpoisoning functions. AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL)); AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); @@ -901,12 +899,13 @@ bool AddressSanitizerModule::runOnModule(Module &M) { assert(CtorFunc); IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); - // The addresses of the first and last dynamically initialized globals in - // this TU. Used in initialization order checking. - Value *FirstDynamic = 0, *LastDynamic = 0; + bool HasDynamicallyInitializedGlobals = false; GlobalVariable *ModuleName = createPrivateGlobalForString( M, M.getModuleIdentifier()); + // We shouldn't merge same module names, as this string serves as unique + // module ID in runtime. + ModuleName->setUnnamedAddr(false); for (size_t i = 0; i < n; i++) { static const uint64_t kMaxGlobalRedzone = 1 << 18; @@ -966,11 +965,8 @@ bool AddressSanitizerModule::runOnModule(Module &M) { NULL); // Populate the first and last globals declared in this TU. - if (CheckInitOrder && GlobalHasDynamicInitializer) { - LastDynamic = ConstantExpr::getPointerCast(NewGlobal, IntptrTy); - if (FirstDynamic == 0) - FirstDynamic = LastDynamic; - } + if (CheckInitOrder && GlobalHasDynamicInitializer) + HasDynamicallyInitializedGlobals = true; DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } @@ -981,8 +977,8 @@ bool AddressSanitizerModule::runOnModule(Module &M) { ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); // Create calls for poisoning before initializers run and unpoisoning after. - if (CheckInitOrder && FirstDynamic && LastDynamic) - createInitializerPoisonCalls(M, FirstDynamic, LastDynamic); + if (CheckInitOrder && HasDynamicallyInitializedGlobals) + createInitializerPoisonCalls(M, ModuleName); IRB.CreateCall2(AsanRegisterGlobals, IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)); @@ -1317,10 +1313,10 @@ void FunctionStackPoisoner::poisonStack() { ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); } - // This string will be parsed by the run-time (DescribeStackAddress). + // This string will be parsed by the run-time (DescribeAddressIfStack). SmallString<2048> StackDescriptionStorage; raw_svector_ostream StackDescription(StackDescriptionStorage); - StackDescription << F.getName() << " " << AllocaVec.size() << " "; + StackDescription << AllocaVec.size() << " "; // Insert poison calls for lifetime intrinsics for alloca. bool HavePoisonedAllocas = false; @@ -1353,19 +1349,26 @@ void FunctionStackPoisoner::poisonStack() { } assert(Pos == LocalStackSize); - // Write the Magic value and the frame description constant to the redzone. + // The left-most redzone has enough space for at least 4 pointers. + // Write the Magic value to redzone[0]. Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), BasePlus0); - Value *BasePlus1 = IRB.CreateAdd(LocalStackBase, - ConstantInt::get(IntptrTy, - ASan.LongSize/8)); - BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy); + // Write the frame description constant to redzone[1]. + Value *BasePlus1 = IRB.CreateIntToPtr( + IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), + IntptrPtrTy); GlobalVariable *StackDescriptionGlobal = createPrivateGlobalForString(*F.getParent(), StackDescription.str()); Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); + // Write the PC to redzone[2]. + Value *BasePlus2 = IRB.CreateIntToPtr( + IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, + 2 * ASan.LongSize/8)), + IntptrPtrTy); + IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); // Poison the stack redzones at the entry. Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); diff --git a/lib/Transforms/Instrumentation/BlackList.cpp b/lib/Transforms/Instrumentation/BlackList.cpp index 927982d..39de4b0 100644 --- a/lib/Transforms/Instrumentation/BlackList.cpp +++ b/lib/Transforms/Instrumentation/BlackList.cpp @@ -110,7 +110,8 @@ static StringRef GetGVTypeString(const GlobalVariable &G) { bool BlackList::isInInit(const GlobalVariable &G) const { return (isIn(*G.getParent()) || inSection("global-init", G.getName()) || - inSection("global-init-type", GetGVTypeString(G))); + inSection("global-init-type", GetGVTypeString(G)) || + inSection("global-init-src", G.getParent()->getModuleIdentifier())); } bool BlackList::inSection(const StringRef Section, diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index e8d4ac8..2edd151 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -32,6 +32,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/DebugLoc.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/InstIterator.h" #include "llvm/Support/PathV2.h" #include "llvm/Support/raw_ostream.h" @@ -101,6 +102,7 @@ namespace { Constant *getIncrementIndirectCounterFunc(); Constant *getEmitFunctionFunc(); Constant *getEmitArcsFunc(); + Constant *getDeleteWriteoutFunctionListFunc(); Constant *getDeleteFlushFunctionListFunc(); Constant *getEndFileFunc(); @@ -142,6 +144,12 @@ ModulePass *llvm::createGCOVProfilerPass(const GCOVOptions &Options) { return new GCOVProfiler(Options); } +static std::string getFunctionName(DISubprogram SP) { + if (!SP.getLinkageName().empty()) + return SP.getLinkageName(); + return SP.getName(); +} + namespace { class GCOVRecord { protected: @@ -290,7 +298,7 @@ namespace { ReturnBlock = new GCOVBlock(i++, os); writeBytes(FunctionTag, 4); - uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(SP.getName()) + + uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(getFunctionName(SP)) + 1 + lengthOfGCOVString(SP.getFilename()) + 1; if (UseCfgChecksum) ++BlockLen; @@ -299,7 +307,7 @@ namespace { write(0); // lineno checksum if (UseCfgChecksum) write(0); // cfg checksum - writeGCOVString(SP.getName()); + writeGCOVString(getFunctionName(SP)); writeGCOVString(SP.getFilename()); write(SP.getLineNumber()); } @@ -374,7 +382,11 @@ std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) { SmallString<128> Filename = CU.getFilename(); sys::path::replace_extension(Filename, NewStem); - return sys::path::filename(Filename.str()); + StringRef FName = sys::path::filename(Filename); + SmallString<128> CurPath; + if (sys::fs::current_path(CurPath)) return FName; + sys::path::append(CurPath, FName.str()); + return CurPath.str(); } bool GCOVProfiler::runOnModule(Module &M) { @@ -544,8 +556,8 @@ bool GCOVProfiler::emitProfileArcs() { Function *FlushF = insertFlush(CountersBySP); // Create a small bit of code that registers the "__llvm_gcov_writeout" to - // be executed at exit and the "__llvm_gcov_flush" function to be executed - // when "__gcov_flush" is called. + // be executed at exit and the "__llvm_gcov_flush" function to be executed + // when "__gcov_flush" is called. FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); Function *F = Function::Create(FTy, GlobalValue::InternalLinkage, "__llvm_gcov_init", M); @@ -558,21 +570,17 @@ bool GCOVProfiler::emitProfileArcs() { BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); IRBuilder<> Builder(BB); - FTy = FunctionType::get(Builder.getInt32Ty(), - PointerType::get(FTy, 0), false); - Constant *AtExitFn = M->getOrInsertFunction("atexit", FTy); - Builder.CreateCall(AtExitFn, WriteoutF); - - // Register the local flush function. FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); - FTy = FunctionType::get(Builder.getVoidTy(), - PointerType::get(FTy, 0), false); - Constant *RegFlush = - M->getOrInsertFunction("llvm_register_flush_function", FTy); - Builder.CreateCall(RegFlush, FlushF); - - // Make sure that all the flush function list is deleted. - Builder.CreateCall(AtExitFn, getDeleteFlushFunctionListFunc()); + Type *Params[] = { + PointerType::get(FTy, 0), + PointerType::get(FTy, 0) + }; + FTy = FunctionType::get(Builder.getVoidTy(), Params, false); + + // Inialize the environment and register the local writeout and flush + // functions. + Constant *GCOVInit = M->getOrInsertFunction("llvm_gcov_init", FTy); + Builder.CreateCall2(GCOVInit, WriteoutF, FlushF); Builder.CreateRetVoid(); appendToGlobalCtors(*M, F, 0); @@ -671,6 +679,11 @@ Constant *GCOVProfiler::getEmitArcsFunc() { return M->getOrInsertFunction("llvm_gcda_emit_arcs", FTy); } +Constant *GCOVProfiler::getDeleteWriteoutFunctionListFunc() { + FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); + return M->getOrInsertFunction("llvm_delete_writeout_function_list", FTy); +} + Constant *GCOVProfiler::getDeleteFlushFunctionListFunc() { FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); return M->getOrInsertFunction("llvm_delete_flush_function_list", FTy); @@ -724,12 +737,12 @@ Function *GCOVProfiler::insertCounterWriteout( Builder.CreateGlobalStringPtr(ReversedVersion)); for (unsigned j = 0, e = CountersBySP.size(); j != e; ++j) { DISubprogram SP(CountersBySP[j].second); - Builder.CreateCall3(EmitFunction, - Builder.getInt32(j), - Options.FunctionNamesInData ? - Builder.CreateGlobalStringPtr(SP.getName()) : - Constant::getNullValue(Builder.getInt8PtrTy()), - Builder.getInt8(Options.UseCfgChecksum)); + Builder.CreateCall3( + EmitFunction, Builder.getInt32(j), + Options.FunctionNamesInData ? + Builder.CreateGlobalStringPtr(getFunctionName(SP)) : + Constant::getNullValue(Builder.getInt8PtrTy()), + Builder.getInt8(Options.UseCfgChecksum)); GlobalVariable *GV = CountersBySP[j].first; unsigned Arcs = diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp index 8ba1025..9f35396 100644 --- a/lib/Transforms/Instrumentation/Instrumentation.cpp +++ b/lib/Transforms/Instrumentation/Instrumentation.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" #include "llvm-c/Initialization.h" using namespace llvm; diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index fce6513..4e75904 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -122,6 +122,9 @@ static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given patter"), cl::Hidden, cl::init(0xff)); +static cl::opt<bool> ClPoisonUndef("msan-poison-undef", + cl::desc("poison undef temps"), + cl::Hidden, cl::init(true)); static cl::opt<bool> ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), @@ -690,7 +693,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// /// Clean shadow (all zeroes) means all bits of the value are defined /// (initialized). - Value *getCleanShadow(Value *V) { + Constant *getCleanShadow(Value *V) { Type *ShadowTy = getShadowTy(V); if (!ShadowTy) return 0; @@ -709,6 +712,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return ConstantStruct::get(ST, Vals); } + /// \brief Create a dirty shadow for a given value. + Constant *getPoisonedShadow(Value *V) { + Type *ShadowTy = getShadowTy(V); + if (!ShadowTy) + return 0; + return getPoisonedShadow(ShadowTy); + } + /// \brief Create a clean (zero) origin. Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); @@ -730,7 +741,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return Shadow; } if (UndefValue *U = dyn_cast<UndefValue>(V)) { - Value *AllOnes = getPoisonedShadow(getShadowTy(V)); + Value *AllOnes = ClPoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); (void)U; return AllOnes; diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index f93c5ab..299060a 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -30,6 +30,7 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" @@ -56,6 +57,9 @@ static cl::opt<bool> ClInstrumentFuncEntryExit( static cl::opt<bool> ClInstrumentAtomics( "tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden); +static cl::opt<bool> ClInstrumentMemIntrinsics( + "tsan-instrument-memintrinsics", cl::init(true), + cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); @@ -63,6 +67,7 @@ STATISTIC(NumOmittedReadsBeforeWrite, "Number of reads ignored due to following writes"); STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); +STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); STATISTIC(NumOmittedReadsFromConstantGlobals, "Number of reads from constant globals"); STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); @@ -85,12 +90,14 @@ struct ThreadSanitizer : public FunctionPass { void initializeCallbacks(Module &M); bool instrumentLoadOrStore(Instruction *I); bool instrumentAtomic(Instruction *I); + bool instrumentMemIntrinsic(Instruction *I); void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, SmallVectorImpl<Instruction*> &All); bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); DataLayout *TD; + Type *IntptrTy; SmallString<64> BlacklistFile; OwningPtr<BlackList> BL; IntegerType *OrdTy; @@ -108,6 +115,8 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanAtomicThreadFence; Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; + Function *TsanVptrLoad; + Function *MemmoveFn, *MemcpyFn, *MemsetFn; }; } // namespace @@ -196,10 +205,22 @@ void ThreadSanitizer::initializeCallbacks(Module &M) { TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), NULL)); + TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); + + MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction( + "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, NULL)); + MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction( + "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IntptrTy, NULL)); + MemsetFn = checkInterfaceFunction(M.getOrInsertFunction( + "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), + IntptrTy, NULL)); } bool ThreadSanitizer::doInitialization(Module &M) { @@ -210,6 +231,7 @@ bool ThreadSanitizer::doInitialization(Module &M) { // Always insert a call to __tsan_init into the module's CTORs. IRBuilder<> IRB(M.getContext()); + IntptrTy = IRB.getIntPtrTy(TD); Value *TsanInit = M.getOrInsertFunction("__tsan_init", IRB.getVoidTy(), NULL); appendToGlobalCtors(M, cast<Function>(TsanInit), 0); @@ -309,6 +331,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) { SmallVector<Instruction*, 8> AllLoadsAndStores; SmallVector<Instruction*, 8> LocalLoadsAndStores; SmallVector<Instruction*, 8> AtomicAccesses; + SmallVector<Instruction*, 8> MemIntrinCalls; bool Res = false; bool HasCalls = false; @@ -325,6 +348,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) { else if (isa<ReturnInst>(BI)) RetVec.push_back(BI); else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { + if (isa<MemIntrinsic>(BI)) + MemIntrinCalls.push_back(BI); HasCalls = true; chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); } @@ -348,6 +373,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) { Res |= instrumentAtomic(AtomicAccesses[i]); } + if (ClInstrumentMemIntrinsics) + for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) { + Res |= instrumentMemIntrinsic(MemIntrinCalls[i]); + } + // Instrument function entry/exit points if there were instrumented accesses. if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); @@ -386,6 +416,12 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { NumInstrumentedVtableWrites++; return true; } + if (!IsWrite && isVtableAccess(I)) { + IRB.CreateCall(TsanVptrLoad, + IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); + NumInstrumentedVtableReads++; + return true; + } Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); if (IsWrite) NumInstrumentedWrites++; @@ -423,6 +459,32 @@ static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { return IRB->getInt32(v); } +// If a memset intrinsic gets inlined by the code gen, we will miss races on it. +// So, we either need to ensure the intrinsic is not inlined, or instrument it. +// We do not instrument memset/memmove/memcpy intrinsics (too complicated), +// instead we simply replace them with regular function calls, which are then +// intercepted by the run-time. +// Since tsan is running after everyone else, the calls should not be +// replaced back with intrinsics. If that becomes wrong at some point, +// we will need to call e.g. __tsan_memset to avoid the intrinsics. +bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { + IRBuilder<> IRB(I); + if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { + IRB.CreateCall3(MemsetFn, + IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), + IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); + I->eraseFromParent(); + } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { + IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, + IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); + I->eraseFromParent(); + } + return false; +} + // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x // standards. For background see C++11 standard. A slightly older, publically // available draft of the standard (not entirely up-to-date, but close enough diff --git a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp index 5aada9c..8f917ae 100644 --- a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp +++ b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp @@ -38,6 +38,7 @@ llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr, switch (Class) { case IC_Autorelease: case IC_AutoreleaseRV: + case IC_IntrinsicUser: case IC_User: // These operations never directly modify a reference count. return false; diff --git a/lib/Transforms/ObjCARC/ObjCARC.cpp b/lib/Transforms/ObjCARC/ObjCARC.cpp index 53a31b0..373168e 100644 --- a/lib/Transforms/ObjCARC/ObjCARC.cpp +++ b/lib/Transforms/ObjCARC/ObjCARC.cpp @@ -30,6 +30,7 @@ using namespace llvm::objcarc; bool llvm::objcarc::EnableARCOpts; static cl::opt<bool, true> EnableARCOptimizations("enable-objc-arc-opts", + cl::desc("enable/disable all ARC Optimizations"), cl::location(EnableARCOpts), cl::init(true)); diff --git a/lib/Transforms/ObjCARC/ObjCARC.h b/lib/Transforms/ObjCARC/ObjCARC.h index e062b66..39670f3 100644 --- a/lib/Transforms/ObjCARC/ObjCARC.h +++ b/lib/Transforms/ObjCARC/ObjCARC.h @@ -64,7 +64,8 @@ static inline bool ModuleHasARC(const Module &M) { M.getNamedValue("objc_copyWeak") || M.getNamedValue("objc_retainedObject") || M.getNamedValue("objc_unretainedObject") || - M.getNamedValue("objc_unretainedPointer"); + M.getNamedValue("objc_unretainedPointer") || + M.getNamedValue("clang.arc.use"); } /// \enum InstructionClass @@ -89,6 +90,7 @@ enum InstructionClass { IC_CopyWeak, ///< objc_copyWeak (derived) IC_DestroyWeak, ///< objc_destroyWeak (derived) IC_StoreStrong, ///< objc_storeStrong (derived) + IC_IntrinsicUser, ///< clang.arc.use IC_CallOrUser, ///< could call objc_release and/or "use" pointers IC_Call, ///< could call objc_release IC_User, ///< could "use" a pointer @@ -97,6 +99,13 @@ enum InstructionClass { raw_ostream &operator<<(raw_ostream &OS, const InstructionClass Class); +/// \brief Test if the given class is a kind of user. +inline static bool IsUser(InstructionClass Class) { + return Class == IC_User || + Class == IC_CallOrUser || + Class == IC_IntrinsicUser; +} + /// \brief Test if the given class is objc_retain or equivalent. static inline bool IsRetain(InstructionClass Class) { return Class == IC_Retain || @@ -112,13 +121,10 @@ static inline bool IsAutorelease(InstructionClass Class) { /// \brief Test if the given class represents instructions which return their /// argument verbatim. static inline bool IsForwarding(InstructionClass Class) { - // objc_retainBlock technically doesn't always return its argument - // verbatim, but it doesn't matter for our purposes here. return Class == IC_Retain || Class == IC_RetainRV || Class == IC_Autorelease || Class == IC_AutoreleaseRV || - Class == IC_RetainBlock || Class == IC_NoopCast; } @@ -256,11 +262,11 @@ static inline Value *GetObjCArg(Value *Inst) { return StripPointerCastsAndObjCCalls(cast<CallInst>(Inst)->getArgOperand(0)); } -static inline bool isNullOrUndef(const Value *V) { +static inline bool IsNullOrUndef(const Value *V) { return isa<ConstantPointerNull>(V) || isa<UndefValue>(V); } -static inline bool isNoopInstruction(const Instruction *I) { +static inline bool IsNoopInstruction(const Instruction *I) { return isa<BitCastInst>(I) || (isa<GetElementPtrInst>(I) && cast<GetElementPtrInst>(I)->hasAllZeroIndices()); diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp index 1c13d1c..c43f4f4 100644 --- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -66,6 +66,8 @@ namespace { Constant *RetainAutoreleaseCallee; /// Declaration for objc_retainAutoreleaseReturnValue(). Constant *RetainAutoreleaseRVCallee; + /// Declaration for objc_retainAutoreleasedReturnValue(). + Constant *RetainRVCallee; /// The inline asm string to insert between calls and RetainRV calls to make /// the optimization work on targets which need it. @@ -77,9 +79,12 @@ namespace { SmallPtrSet<CallInst *, 8> StoreStrongCalls; Constant *getStoreStrongCallee(Module *M); + Constant *getRetainRVCallee(Module *M); Constant *getRetainAutoreleaseCallee(Module *M); Constant *getRetainAutoreleaseRVCallee(Module *M); + bool OptimizeRetainCall(Function &F, Instruction *Retain); + bool ContractAutorelease(Function &F, Instruction *Autorelease, InstructionClass Class, SmallPtrSet<Instruction *, 4> @@ -172,6 +177,57 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) { return RetainAutoreleaseRVCallee; } +Constant *ObjCARCContract::getRetainRVCallee(Module *M) { + if (!RetainRVCallee) { + LLVMContext &C = M->getContext(); + Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); + Type *Params[] = { I8X }; + FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); + AttributeSet Attribute = + AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex, + Attribute::NoUnwind); + RetainRVCallee = + M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy, + Attribute); + } + return RetainRVCallee; +} + +/// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a +/// return value. We do this late so we do not disrupt the dataflow analysis in +/// ObjCARCOpt. +bool +ObjCARCContract::OptimizeRetainCall(Function &F, Instruction *Retain) { + ImmutableCallSite CS(GetObjCArg(Retain)); + const Instruction *Call = CS.getInstruction(); + if (!Call) + return false; + if (Call->getParent() != Retain->getParent()) + return false; + + // Check that the call is next to the retain. + BasicBlock::const_iterator I = Call; + ++I; + while (IsNoopInstruction(I)) ++I; + if (&*I != Retain) + return false; + + // Turn it to an objc_retainAutoreleasedReturnValue. + Changed = true; + ++NumPeeps; + + DEBUG(dbgs() << "Transforming objc_retain => " + "objc_retainAutoreleasedReturnValue since the operand is a " + "return value.\nOld: "<< *Retain << "\n"); + + // We do not have to worry about tail calls/does not throw since + // retain/retainRV have the same properties. + cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent())); + + DEBUG(dbgs() << "New: " << *Retain << "\n"); + return true; +} + /// Merge an autorelease with a retain into a fused call. bool ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease, @@ -329,6 +385,7 @@ bool ObjCARCContract::doInitialization(Module &M) { StoreStrongCallee = 0; RetainAutoreleaseCallee = 0; RetainAutoreleaseRVCallee = 0; + RetainRVCallee = 0; // Initialize RetainRVMarker. RetainRVMarker = 0; @@ -380,7 +437,6 @@ bool ObjCARCContract::runOnFunction(Function &F) { // objc_retainBlock does not necessarily return its argument. InstructionClass Class = GetBasicInstructionClass(Inst); switch (Class) { - case IC_Retain: case IC_FusedRetainAutorelease: case IC_FusedRetainAutoreleaseRV: break; @@ -389,6 +445,13 @@ bool ObjCARCContract::runOnFunction(Function &F) { if (ContractAutorelease(F, Inst, Class, DependingInstructions, Visited)) continue; break; + case IC_Retain: + // Attempt to convert retains to retainrvs if they are next to function + // calls. + if (!OptimizeRetainCall(F, Inst)) + break; + // If we succeed in our optimization, fall through. + // FALLTHROUGH case IC_RetainRV: { // If we're compiling for a target which needs a special inline-asm // marker to do the retainAutoreleasedReturnValue optimization, @@ -410,7 +473,7 @@ bool ObjCARCContract::runOnFunction(Function &F) { break; } --BBI; - } while (isNoopInstruction(BBI)); + } while (IsNoopInstruction(BBI)); if (&*BBI == GetObjCArg(Inst)) { DEBUG(dbgs() << "ObjCARCContract: Adding inline asm marker for " @@ -429,7 +492,7 @@ bool ObjCARCContract::runOnFunction(Function &F) { case IC_InitWeak: { // objc_initWeak(p, null) => *p = null CallInst *CI = cast<CallInst>(Inst); - if (isNullOrUndef(CI->getArgOperand(1))) { + if (IsNullOrUndef(CI->getArgOperand(1))) { Value *Null = ConstantPointerNull::get(cast<PointerType>(CI->getType())); Changed = true; @@ -453,6 +516,10 @@ bool ObjCARCContract::runOnFunction(Function &F) { if (isa<AllocaInst>(Inst)) TailOkForStoreStrongs = false; continue; + case IC_IntrinsicUser: + // Remove calls to @clang.arc.use(...). + Inst->eraseFromParent(); + continue; default: continue; } diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 9c14949..e3d51d5 100644 --- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -33,6 +33,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/CFG.h" #include "llvm/Support/Debug.h" @@ -190,13 +191,13 @@ static bool DoesRetainableObjPtrEscape(const User *Ptr) { do { const Value *V = Worklist.pop_back_val(); - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Visiting: " << *V << "\n"); + DEBUG(dbgs() << "Visiting: " << *V << "\n"); for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; ++UI) { const User *UUser = *UI; - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User: " << *UUser << "\n"); + DEBUG(dbgs() << "User: " << *UUser << "\n"); // Special - Use by a call (callee or argument) is not considered // to be an escape. @@ -206,11 +207,13 @@ static bool DoesRetainableObjPtrEscape(const User *Ptr) { case IC_StoreStrong: case IC_Autorelease: case IC_AutoreleaseRV: { - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies pointer " - "arguments. Pointer Escapes!\n"); + DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n"); // These special functions make copies of their pointer arguments. return true; } + case IC_IntrinsicUser: + // Use by the use intrinsic is not an escape. + continue; case IC_User: case IC_None: // Use by an instruction which copies the value is an escape if the @@ -219,12 +222,11 @@ static bool DoesRetainableObjPtrEscape(const User *Ptr) { isa<PHINode>(UUser) || isa<SelectInst>(UUser)) { if (VisitedSet.insert(UUser)) { - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies value. " - "Ptr escapes if result escapes. Adding to list.\n"); + DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes." + " Adding to list.\n"); Worklist.push_back(UUser); } else { - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Already visited node." - "\n"); + DEBUG(dbgs() << "Already visited node.\n"); } continue; } @@ -241,13 +243,13 @@ static bool DoesRetainableObjPtrEscape(const User *Ptr) { continue; } // Otherwise, conservatively assume an escape. - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Assuming ptr escapes.\n"); + DEBUG(dbgs() << "Assuming ptr escapes.\n"); return true; } } while (!Worklist.empty()); // No escapes found. - DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Ptr does not escape.\n"); + DEBUG(dbgs() << "Ptr does not escape.\n"); return false; } @@ -301,6 +303,16 @@ STATISTIC(NumRets, "Number of return value forwarding " "retain+autoreleaes eliminated"); STATISTIC(NumRRs, "Number of retain+release paths eliminated"); STATISTIC(NumPeeps, "Number of calls peephole-optimized"); +STATISTIC(NumRetainsBeforeOpt, + "Number of retains before optimization."); +STATISTIC(NumReleasesBeforeOpt, + "Number of releases before optimization."); +#ifndef NDEBUG +STATISTIC(NumRetainsAfterOpt, + "Number of retains after optimization."); +STATISTIC(NumReleasesAfterOpt, + "Number of releases after optimization."); +#endif namespace { /// \enum Sequence @@ -371,7 +383,7 @@ static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) { namespace { /// \brief Unidirectional information about either a /// retain-decrement-use-release sequence or release-use-decrement-retain - /// reverese sequence. + /// reverse sequence. struct RRInfo { /// After an objc_retain, the reference count of the referenced /// object is known to be positive. Similarly, before an objc_release, the @@ -387,10 +399,6 @@ namespace { /// KnownSafe is true when either of these conditions is satisfied. bool KnownSafe; - /// True if the Calls are objc_retainBlock calls (as opposed to objc_retain - /// calls). - bool IsRetainBlock; - /// True of the objc_release calls are all marked with the "tail" keyword. bool IsTailCallRelease; @@ -407,17 +415,18 @@ namespace { SmallPtrSet<Instruction *, 2> ReverseInsertPts; RRInfo() : - KnownSafe(false), IsRetainBlock(false), - IsTailCallRelease(false), - ReleaseMetadata(0) {} + KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0) {} void clear(); + + bool IsTrackingImpreciseReleases() { + return ReleaseMetadata != 0; + } }; } void RRInfo::clear() { KnownSafe = false; - IsRetainBlock = false; IsTailCallRelease = false; ReleaseMetadata = 0; Calls.clear(); @@ -431,7 +440,7 @@ namespace { /// True if the reference count is known to be incremented. bool KnownPositiveRefCount; - /// True of we've seen an opportunity for partial RR elimination, such as + /// True if we've seen an opportunity for partial RR elimination, such as /// pushing calls into a CFG triangle or into one side of a CFG diamond. bool Partial; @@ -451,15 +460,16 @@ namespace { KnownPositiveRefCount = true; } - void ClearRefCount() { + void ClearKnownPositiveRefCount() { KnownPositiveRefCount = false; } - bool IsKnownIncremented() const { + bool HasKnownPositiveRefCount() const { return KnownPositiveRefCount; } void SetSeq(Sequence NewSeq) { + DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n"); Seq = NewSeq; } @@ -472,7 +482,8 @@ namespace { } void ResetSequenceProgress(Sequence NewSeq) { - Seq = NewSeq; + DEBUG(dbgs() << "Resetting sequence progress.\n"); + SetSeq(NewSeq); Partial = false; RRI.clear(); } @@ -486,10 +497,6 @@ PtrState::Merge(const PtrState &Other, bool TopDown) { Seq = MergeSeqs(Seq, Other.Seq, TopDown); KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount; - // We can't merge a plain objc_retain with an objc_retainBlock. - if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock) - Seq = S_None; - // If we're not in a sequence (anymore), drop all associated state. if (Seq == S_None) { Partial = false; @@ -698,6 +705,287 @@ void BBState::MergeSucc(const BBState &Other) { MI->second.Merge(PtrState(), /*TopDown=*/false); } +// Only enable ARC Annotations if we are building a debug version of +// libObjCARCOpts. +#ifndef NDEBUG +#define ARC_ANNOTATIONS +#endif + +// Define some macros along the lines of DEBUG and some helper functions to make +// it cleaner to create annotations in the source code and to no-op when not +// building in debug mode. +#ifdef ARC_ANNOTATIONS + +#include "llvm/Support/CommandLine.h" + +/// Enable/disable ARC sequence annotations. +static cl::opt<bool> +EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false), + cl::desc("Enable emission of arc data flow analysis " + "annotations")); +static cl::opt<bool> +DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false), + cl::desc("Disable check for cfg hazards when " + "annotating")); +static cl::opt<std::string> +ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier", + cl::init(""), + cl::desc("filter out all data flow annotations " + "but those that apply to the given " + "target llvm identifier.")); + +/// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an +/// instruction so that we can track backwards when post processing via the llvm +/// arc annotation processor tool. If the function is an +static MDString *AppendMDNodeToSourcePtr(unsigned NodeId, + Value *Ptr) { + MDString *Hash = 0; + + // If pointer is a result of an instruction and it does not have a source + // MDNode it, attach a new MDNode onto it. If pointer is a result of + // an instruction and does have a source MDNode attached to it, return a + // reference to said Node. Otherwise just return 0. + if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) { + MDNode *Node; + if (!(Node = Inst->getMetadata(NodeId))) { + // We do not have any node. Generate and attatch the hash MDString to the + // instruction. + + // We just use an MDString to ensure that this metadata gets written out + // of line at the module level and to provide a very simple format + // encoding the information herein. Both of these makes it simpler to + // parse the annotations by a simple external program. + std::string Str; + raw_string_ostream os(Str); + os << "(" << Inst->getParent()->getParent()->getName() << ",%" + << Inst->getName() << ")"; + + Hash = MDString::get(Inst->getContext(), os.str()); + Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash)); + } else { + // We have a node. Grab its hash and return it. + assert(Node->getNumOperands() == 1 && + "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand."); + Hash = cast<MDString>(Node->getOperand(0)); + } + } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) { + std::string str; + raw_string_ostream os(str); + os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName() + << ")"; + Hash = MDString::get(Arg->getContext(), os.str()); + } + + return Hash; +} + +static std::string SequenceToString(Sequence A) { + std::string str; + raw_string_ostream os(str); + os << A; + return os.str(); +} + +/// Helper function to change a Sequence into a String object using our overload +/// for raw_ostream so we only have printing code in one location. +static MDString *SequenceToMDString(LLVMContext &Context, + Sequence A) { + return MDString::get(Context, SequenceToString(A)); +} + +/// A simple function to generate a MDNode which describes the change in state +/// for Value *Ptr caused by Instruction *Inst. +static void AppendMDNodeToInstForPtr(unsigned NodeId, + Instruction *Inst, + Value *Ptr, + MDString *PtrSourceMDNodeID, + Sequence OldSeq, + Sequence NewSeq) { + MDNode *Node = 0; + Value *tmp[3] = {PtrSourceMDNodeID, + SequenceToMDString(Inst->getContext(), + OldSeq), + SequenceToMDString(Inst->getContext(), + NewSeq)}; + Node = MDNode::get(Inst->getContext(), + ArrayRef<Value*>(tmp, 3)); + + Inst->setMetadata(NodeId, Node); +} + +/// Add to the beginning of the basic block llvm.ptr.annotations which show the +/// state of a pointer at the entrance to a basic block. +static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB, + Value *Ptr, Sequence Seq) { + // If we have a target identifier, make sure that we match it before + // continuing. + if(!ARCAnnotationTargetIdentifier.empty() && + !Ptr->getName().equals(ARCAnnotationTargetIdentifier)) + return; + + Module *M = BB->getParent()->getParent(); + LLVMContext &C = M->getContext(); + Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); + Type *I8XX = PointerType::getUnqual(I8X); + Type *Params[] = {I8XX, I8XX}; + FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), + ArrayRef<Type*>(Params, 2), + /*isVarArg=*/false); + Constant *Callee = M->getOrInsertFunction(Name, FTy); + + IRBuilder<> Builder(BB, BB->getFirstInsertionPt()); + + Value *PtrName; + StringRef Tmp = Ptr->getName(); + if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) { + Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp, + Tmp + "_STR"); + PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage, + cast<Constant>(ActualPtrName), Tmp); + } + + Value *S; + std::string SeqStr = SequenceToString(Seq); + if (0 == (S = M->getGlobalVariable(SeqStr, true))) { + Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr, + SeqStr + "_STR"); + S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage, + cast<Constant>(ActualPtrName), SeqStr); + } + + Builder.CreateCall2(Callee, PtrName, S); +} + +/// Add to the end of the basic block llvm.ptr.annotations which show the state +/// of the pointer at the bottom of the basic block. +static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB, + Value *Ptr, Sequence Seq) { + // If we have a target identifier, make sure that we match it before emitting + // an annotation. + if(!ARCAnnotationTargetIdentifier.empty() && + !Ptr->getName().equals(ARCAnnotationTargetIdentifier)) + return; + + Module *M = BB->getParent()->getParent(); + LLVMContext &C = M->getContext(); + Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); + Type *I8XX = PointerType::getUnqual(I8X); + Type *Params[] = {I8XX, I8XX}; + FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), + ArrayRef<Type*>(Params, 2), + /*isVarArg=*/false); + Constant *Callee = M->getOrInsertFunction(Name, FTy); + + IRBuilder<> Builder(BB, llvm::prior(BB->end())); + + Value *PtrName; + StringRef Tmp = Ptr->getName(); + if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) { + Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp, + Tmp + "_STR"); + PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage, + cast<Constant>(ActualPtrName), Tmp); + } + + Value *S; + std::string SeqStr = SequenceToString(Seq); + if (0 == (S = M->getGlobalVariable(SeqStr, true))) { + Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr, + SeqStr + "_STR"); + S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage, + cast<Constant>(ActualPtrName), SeqStr); + } + Builder.CreateCall2(Callee, PtrName, S); +} + +/// Adds a source annotation to pointer and a state change annotation to Inst +/// referencing the source annotation and the old/new state of pointer. +static void GenerateARCAnnotation(unsigned InstMDId, + unsigned PtrMDId, + Instruction *Inst, + Value *Ptr, + Sequence OldSeq, + Sequence NewSeq) { + if (EnableARCAnnotations) { + // If we have a target identifier, make sure that we match it before + // emitting an annotation. + if(!ARCAnnotationTargetIdentifier.empty() && + !Ptr->getName().equals(ARCAnnotationTargetIdentifier)) + return; + + // First generate the source annotation on our pointer. This will return an + // MDString* if Ptr actually comes from an instruction implying we can put + // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL), + // then we know that our pointer is from an Argument so we put a reference + // to the argument number. + // + // The point of this is to make it easy for the + // llvm-arc-annotation-processor tool to cross reference where the source + // pointer is in the LLVM IR since the LLVM IR parser does not submit such + // information via debug info for backends to use (since why would anyone + // need such a thing from LLVM IR besides in non standard cases + // [i.e. this]). + MDString *SourcePtrMDNode = + AppendMDNodeToSourcePtr(PtrMDId, Ptr); + AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq, + NewSeq); + } +} + +// The actual interface for accessing the above functionality is defined via +// some simple macros which are defined below. We do this so that the user does +// not need to pass in what metadata id is needed resulting in cleaner code and +// additionally since it provides an easy way to conditionally no-op all +// annotation support in a non-debug build. + +/// Use this macro to annotate a sequence state change when processing +/// instructions bottom up, +#define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \ + GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \ + ARCAnnotationProvenanceSourceMDKind, (inst), \ + const_cast<Value*>(ptr), (old), (new)) +/// Use this macro to annotate a sequence state change when processing +/// instructions top down. +#define ANNOTATE_TOPDOWN(inst, ptr, old, new) \ + GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \ + ARCAnnotationProvenanceSourceMDKind, (inst), \ + const_cast<Value*>(ptr), (old), (new)) + +#define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \ + do { \ + if (EnableARCAnnotations) { \ + for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \ + E = (_states)._direction##_ptr_end(); I != E; ++I) { \ + Value *Ptr = const_cast<Value*>(I->first); \ + Sequence Seq = I->second.GetSeq(); \ + GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \ + } \ + } \ + } while (0) + +#define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \ + ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \ + Entrance, bottom_up) +#define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \ + ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \ + Terminator, bottom_up) +#define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \ + ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \ + Entrance, top_down) +#define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \ + ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \ + Terminator, top_down) + +#else // !ARC_ANNOTATION +// If annotations are off, noop. +#define ANNOTATE_BOTTOMUP(inst, ptr, old, new) +#define ANNOTATE_TOPDOWN(inst, ptr, old, new) +#define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock) +#define ANNOTATE_BOTTOMUP_BBEND(states, basicblock) +#define ANNOTATE_TOPDOWN_BBSTART(states, basicblock) +#define ANNOTATE_TOPDOWN_BBEND(states, basicblock) +#endif // !ARC_ANNOTATION + namespace { /// \brief The main ARC optimization pass. class ObjCARCOpt : public FunctionPass { @@ -711,9 +999,6 @@ namespace { /// them. These are initialized lazily to avoid cluttering up the Module /// with unused declarations. - /// Declaration for ObjC runtime function - /// objc_retainAutoreleasedReturnValue. - Constant *RetainRVCallee; /// Declaration for ObjC runtime function objc_autoreleaseReturnValue. Constant *AutoreleaseRVCallee; /// Declaration for ObjC runtime function objc_release. @@ -738,7 +1023,15 @@ namespace { /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata. unsigned NoObjCARCExceptionsMDKind; - Constant *getRetainRVCallee(Module *M); +#ifdef ARC_ANNOTATIONS + /// The Metadata Kind for llvm.arc.annotation.bottomup metadata. + unsigned ARCAnnotationBottomUpMDKind; + /// The Metadata Kind for llvm.arc.annotation.topdown metadata. + unsigned ARCAnnotationTopDownMDKind; + /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata. + unsigned ARCAnnotationProvenanceSourceMDKind; +#endif // ARC_ANNOATIONS + Constant *getAutoreleaseRVCallee(Module *M); Constant *getReleaseCallee(Module *M); Constant *getRetainCallee(Module *M); @@ -747,10 +1040,11 @@ namespace { bool IsRetainBlockOptimizable(const Instruction *Inst); - void OptimizeRetainCall(Function &F, Instruction *Retain); bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV); void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV, InstructionClass &Class); + bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock, + InstructionClass &Class); void OptimizeIndividualCalls(Function &F); void CheckForCFGHazards(const BasicBlock *BB, @@ -804,6 +1098,10 @@ namespace { void OptimizeReturns(Function &F); +#ifndef NDEBUG + void GatherStatistics(Function &F, bool AfterOptimization = false); +#endif + virtual void getAnalysisUsage(AnalysisUsage &AU) const; virtual bool doInitialization(Module &M); virtual bool runOnFunction(Function &F); @@ -851,22 +1149,6 @@ bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) { return true; } -Constant *ObjCARCOpt::getRetainRVCallee(Module *M) { - if (!RetainRVCallee) { - LLVMContext &C = M->getContext(); - Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); - Type *Params[] = { I8X }; - FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); - AttributeSet Attribute = - AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex, - Attribute::NoUnwind); - RetainRVCallee = - M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy, - Attribute); - } - return RetainRVCallee; -} - Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) { if (!AutoreleaseRVCallee) { LLVMContext &C = M->getContext(); @@ -946,38 +1228,6 @@ Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) { return AutoreleaseCallee; } -/// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a -/// return value. -void -ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) { - ImmutableCallSite CS(GetObjCArg(Retain)); - const Instruction *Call = CS.getInstruction(); - if (!Call) return; - if (Call->getParent() != Retain->getParent()) return; - - // Check that the call is next to the retain. - BasicBlock::const_iterator I = Call; - ++I; - while (isNoopInstruction(I)) ++I; - if (&*I != Retain) - return; - - // Turn it to an objc_retainAutoreleasedReturnValue.. - Changed = true; - ++NumPeeps; - - DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainCall: Transforming " - "objc_retain => objc_retainAutoreleasedReturnValue" - " since the operand is a return value.\n" - " Old: " - << *Retain << "\n"); - - cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent())); - - DEBUG(dbgs() << " New: " - << *Retain << "\n"); -} - /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is /// not a return value. Or, if it can be paired with an /// objc_autoreleaseReturnValue, delete the pair and return true. @@ -990,14 +1240,14 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) { if (Call->getParent() == RetainRV->getParent()) { BasicBlock::const_iterator I = Call; ++I; - while (isNoopInstruction(I)) ++I; + while (IsNoopInstruction(I)) ++I; if (&*I == RetainRV) return false; } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) { BasicBlock *RetainRVParent = RetainRV->getParent(); if (II->getNormalDest() == RetainRVParent) { BasicBlock::const_iterator I = RetainRVParent->begin(); - while (isNoopInstruction(I)) ++I; + while (IsNoopInstruction(I)) ++I; if (&*I == RetainRV) return false; } @@ -1008,15 +1258,14 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) { // pointer. In this case, we can delete the pair. BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin(); if (I != Begin) { - do --I; while (I != Begin && isNoopInstruction(I)); + do --I; while (I != Begin && IsNoopInstruction(I)); if (GetBasicInstructionClass(I) == IC_AutoreleaseRV && GetObjCArg(I) == Arg) { Changed = true; ++NumPeeps; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Erasing " << *I << "\n" - << " Erasing " << *RetainRV - << "\n"); + DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n" + << "Erasing " << *RetainRV << "\n"); EraseInstruction(I); EraseInstruction(RetainRV); @@ -1028,16 +1277,13 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) { Changed = true; ++NumPeeps; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Transforming " - "objc_retainAutoreleasedReturnValue => " + DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => " "objc_retain since the operand is not a return value.\n" - " Old: " - << *RetainRV << "\n"); + "Old = " << *RetainRV << "\n"); cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent())); - DEBUG(dbgs() << " New: " - << *RetainRV << "\n"); + DEBUG(dbgs() << "New = " << *RetainRV << "\n"); return false; } @@ -1066,12 +1312,10 @@ ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV, Changed = true; ++NumPeeps; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeAutoreleaseRVCall: Transforming " - "objc_autoreleaseReturnValue => " + DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => " "objc_autorelease since its operand is not used as a return " "value.\n" - " Old: " - << *AutoreleaseRV << "\n"); + "Old = " << *AutoreleaseRV << "\n"); CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV); AutoreleaseRVCI-> @@ -1079,14 +1323,48 @@ ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV, AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease. Class = IC_Autorelease; - DEBUG(dbgs() << " New: " - << *AutoreleaseRV << "\n"); + DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n"); + +} + +// \brief Attempt to strength reduce objc_retainBlock calls to objc_retain +// calls. +// +// Specifically: If an objc_retainBlock call has the copy_on_escape metadata and +// does not escape (following the rules of block escaping), strength reduce the +// objc_retainBlock to an objc_retain. +// +// TODO: If an objc_retainBlock call is dominated period by a previous +// objc_retainBlock call, strength reduce the objc_retainBlock to an +// objc_retain. +bool +ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst, + InstructionClass &Class) { + assert(GetBasicInstructionClass(Inst) == Class); + assert(IC_RetainBlock == Class); + + // If we can not optimize Inst, return false. + if (!IsRetainBlockOptimizable(Inst)) + return false; + Changed = true; + ++NumPeeps; + + DEBUG(dbgs() << "Strength reduced retainBlock => retain.\n"); + DEBUG(dbgs() << "Old: " << *Inst << "\n"); + CallInst *RetainBlock = cast<CallInst>(Inst); + RetainBlock->setCalledFunction(getRetainCallee(F.getParent())); + // Remove copy_on_escape metadata. + RetainBlock->setMetadata(CopyOnEscapeMDKind, 0); + Class = IC_Retain; + DEBUG(dbgs() << "New: " << *Inst << "\n"); + return true; } /// Visit each call, one at a time, and make simplifications without doing any /// additional analysis. void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { + DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n"); // Reset all the flags in preparation for recomputing them. UsedInThisFunction = 0; @@ -1096,8 +1374,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { InstructionClass Class = GetBasicInstructionClass(Inst); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Visiting: Class: " - << Class << "; " << *Inst << "\n"); + DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n"); switch (Class) { default: break; @@ -1113,8 +1390,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case IC_NoopCast: Changed = true; ++NumNoops; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Erasing no-op cast:" - " " << *Inst << "\n"); + DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n"); EraseInstruction(Inst); continue; @@ -1125,18 +1401,15 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case IC_InitWeak: case IC_DestroyWeak: { CallInst *CI = cast<CallInst>(Inst); - if (isNullOrUndef(CI->getArgOperand(0))) { + if (IsNullOrUndef(CI->getArgOperand(0))) { Changed = true; Type *Ty = CI->getArgOperand(0)->getType(); new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()), Constant::getNullValue(Ty), CI); llvm::Value *NewValue = UndefValue::get(CI->getType()); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null " - "pointer-to-weak-pointer is undefined behavior.\n" - " Old = " << *CI << - "\n New = " << - *NewValue << "\n"); + DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior." + "\nOld = " << *CI << "\nNew = " << *NewValue << "\n"); CI->replaceAllUsesWith(NewValue); CI->eraseFromParent(); continue; @@ -1146,8 +1419,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case IC_CopyWeak: case IC_MoveWeak: { CallInst *CI = cast<CallInst>(Inst); - if (isNullOrUndef(CI->getArgOperand(0)) || - isNullOrUndef(CI->getArgOperand(1))) { + if (IsNullOrUndef(CI->getArgOperand(0)) || + IsNullOrUndef(CI->getArgOperand(1))) { Changed = true; Type *Ty = CI->getArgOperand(0)->getType(); new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()), @@ -1155,11 +1428,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { CI); llvm::Value *NewValue = UndefValue::get(CI->getType()); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null " - "pointer-to-weak-pointer is undefined behavior.\n" - " Old = " << *CI << - "\n New = " << - *NewValue << "\n"); + DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior." + "\nOld = " << *CI << "\nNew = " << *NewValue << "\n"); CI->replaceAllUsesWith(NewValue); CI->eraseFromParent(); @@ -1167,8 +1437,14 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { } break; } + case IC_RetainBlock: + // If we strength reduce an objc_retainBlock to an objc_retain, continue + // onto the objc_retain peephole optimizations. Otherwise break. + if (!OptimizeRetainBlockCall(F, Inst, Class)) + break; + // FALLTHROUGH case IC_Retain: - OptimizeRetainCall(F, Inst); + ++NumRetainsBeforeOpt; break; case IC_RetainRV: if (OptimizeRetainRVCall(F, Inst)) @@ -1177,6 +1453,9 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case IC_AutoreleaseRV: OptimizeAutoreleaseRVCall(F, Inst, Class); break; + case IC_Release: + ++NumReleasesBeforeOpt; + break; } // objc_autorelease(x) -> objc_release(x) if x is otherwise unused. @@ -1196,12 +1475,9 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, ArrayRef<Value *>())); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Replacing " - "objc_autorelease(x) with objc_release(x) since x is " - "otherwise unused.\n" - " Old: " << *Call << - "\n New: " << - *NewCall << "\n"); + DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) " + "since x is otherwise unused.\nOld: " << *Call << "\nNew: " + << *NewCall << "\n"); EraseInstruction(Call); Inst = NewCall; @@ -1213,9 +1489,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { // a tail keyword. if (IsAlwaysTail(Class)) { Changed = true; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Adding tail keyword" - " to function since it can never be passed stack args: " << *Inst << - "\n"); + DEBUG(dbgs() << "Adding tail keyword to function since it can never be " + "passed stack args: " << *Inst << "\n"); cast<CallInst>(Inst)->setTailCall(); } @@ -1223,8 +1498,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { // semantics of ARC truly do not do so. if (IsNeverTail(Class)) { Changed = true; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Removing tail " - "keyword from function: " << *Inst << + DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst << "\n"); cast<CallInst>(Inst)->setTailCall(false); } @@ -1232,8 +1506,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { // Set nounwind as needed. if (IsNoThrow(Class)) { Changed = true; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Found no throw" - " class. Setting nounwind on: " << *Inst << "\n"); + DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst + << "\n"); cast<CallInst>(Inst)->setDoesNotThrow(); } @@ -1245,11 +1519,11 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { const Value *Arg = GetObjCArg(Inst); // ARC calls with null are no-ops. Delete them. - if (isNullOrUndef(Arg)) { + if (IsNullOrUndef(Arg)) { Changed = true; ++NumNoops; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: ARC calls with " - " null are no-ops. Erasing: " << *Inst << "\n"); + DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst + << "\n"); EraseInstruction(Inst); continue; } @@ -1280,7 +1554,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *Incoming = StripPointerCastsAndObjCCalls(PN->getIncomingValue(i)); - if (isNullOrUndef(Incoming)) + if (IsNullOrUndef(Incoming)) HasNull = true; else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back()) .getNumSuccessors() != 1) { @@ -1334,7 +1608,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *Incoming = StripPointerCastsAndObjCCalls(PN->getIncomingValue(i)); - if (!isNullOrUndef(Incoming)) { + if (!IsNullOrUndef(Incoming)) { CallInst *Clone = cast<CallInst>(CInst->clone()); Value *Op = PN->getIncomingValue(i); Instruction *InsertPos = &PN->getIncomingBlock(i)->back(); @@ -1343,10 +1617,9 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { Clone->setArgOperand(0, Op); Clone->insertBefore(InsertPos); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Cloning " + DEBUG(dbgs() << "Cloning " << *CInst << "\n" - " And inserting " - "clone at " << *InsertPos << "\n"); + "And inserting clone at " << *InsertPos << "\n"); Worklist.push_back(std::make_pair(Clone, Incoming)); } } @@ -1358,7 +1631,65 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { } } while (!Worklist.empty()); } - DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Finished List.\n"); +} + +/// If we have a top down pointer in the S_Use state, make sure that there are +/// no CFG hazards by checking the states of various bottom up pointers. +static void CheckForUseCFGHazard(const Sequence SuccSSeq, + const bool SuccSRRIKnownSafe, + PtrState &S, + bool &SomeSuccHasSame, + bool &AllSuccsHaveSame, + bool &ShouldContinue) { + switch (SuccSSeq) { + case S_CanRelease: { + if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) { + S.ClearSequenceProgress(); + break; + } + ShouldContinue = true; + break; + } + case S_Use: + SomeSuccHasSame = true; + break; + case S_Stop: + case S_Release: + case S_MovableRelease: + if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) + AllSuccsHaveSame = false; + break; + case S_Retain: + llvm_unreachable("bottom-up pointer in retain state!"); + case S_None: + llvm_unreachable("This should have been handled earlier."); + } +} + +/// If we have a Top Down pointer in the S_CanRelease state, make sure that +/// there are no CFG hazards by checking the states of various bottom up +/// pointers. +static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq, + const bool SuccSRRIKnownSafe, + PtrState &S, + bool &SomeSuccHasSame, + bool &AllSuccsHaveSame) { + switch (SuccSSeq) { + case S_CanRelease: + SomeSuccHasSame = true; + break; + case S_Stop: + case S_Release: + case S_MovableRelease: + case S_Use: + if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) + AllSuccsHaveSame = false; + break; + case S_Retain: + llvm_unreachable("bottom-up pointer in retain state!"); + case S_None: + llvm_unreachable("This should have been handled earlier."); + } } /// Check for critical edges, loop boundaries, irreducible control flow, or @@ -1371,106 +1702,82 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB, // If any top-down local-use or possible-dec has a succ which is earlier in // the sequence, forget it. for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(), - E = MyStates.top_down_ptr_end(); I != E; ++I) - switch (I->second.GetSeq()) { - default: break; - case S_Use: { - const Value *Arg = I->first; - const TerminatorInst *TI = cast<TerminatorInst>(&BB->back()); - bool SomeSuccHasSame = false; - bool AllSuccsHaveSame = true; - PtrState &S = I->second; - succ_const_iterator SI(TI), SE(TI, false); - - for (; SI != SE; ++SI) { - Sequence SuccSSeq = S_None; - bool SuccSRRIKnownSafe = false; - // If VisitBottomUp has pointer information for this successor, take - // what we know about it. - DenseMap<const BasicBlock *, BBState>::iterator BBI = - BBStates.find(*SI); - assert(BBI != BBStates.end()); - const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg); - SuccSSeq = SuccS.GetSeq(); - SuccSRRIKnownSafe = SuccS.RRI.KnownSafe; - switch (SuccSSeq) { - case S_None: - case S_CanRelease: { - if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) { - S.ClearSequenceProgress(); - break; - } - continue; - } - case S_Use: - SomeSuccHasSame = true; - break; - case S_Stop: - case S_Release: - case S_MovableRelease: - if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) - AllSuccsHaveSame = false; - break; - case S_Retain: - llvm_unreachable("bottom-up pointer in retain state!"); - } - } - // If the state at the other end of any of the successor edges - // matches the current state, require all edges to match. This - // guards against loops in the middle of a sequence. - if (SomeSuccHasSame && !AllSuccsHaveSame) + E = MyStates.top_down_ptr_end(); I != E; ++I) { + PtrState &S = I->second; + const Sequence Seq = I->second.GetSeq(); + + // We only care about S_Retain, S_CanRelease, and S_Use. + if (Seq == S_None) + continue; + + // Make sure that if extra top down states are added in the future that this + // code is updated to handle it. + assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) && + "Unknown top down sequence state."); + + const Value *Arg = I->first; + const TerminatorInst *TI = cast<TerminatorInst>(&BB->back()); + bool SomeSuccHasSame = false; + bool AllSuccsHaveSame = true; + + succ_const_iterator SI(TI), SE(TI, false); + + for (; SI != SE; ++SI) { + // If VisitBottomUp has pointer information for this successor, take + // what we know about it. + const DenseMap<const BasicBlock *, BBState>::iterator BBI = + BBStates.find(*SI); + assert(BBI != BBStates.end()); + const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg); + const Sequence SuccSSeq = SuccS.GetSeq(); + + // If bottom up, the pointer is in an S_None state, clear the sequence + // progress since the sequence in the bottom up state finished + // suggesting a mismatch in between retains/releases. This is true for + // all three cases that we are handling here: S_Retain, S_Use, and + // S_CanRelease. + if (SuccSSeq == S_None) { S.ClearSequenceProgress(); - break; - } - case S_CanRelease: { - const Value *Arg = I->first; - const TerminatorInst *TI = cast<TerminatorInst>(&BB->back()); - bool SomeSuccHasSame = false; - bool AllSuccsHaveSame = true; - PtrState &S = I->second; - succ_const_iterator SI(TI), SE(TI, false); - - for (; SI != SE; ++SI) { - Sequence SuccSSeq = S_None; - bool SuccSRRIKnownSafe = false; - // If VisitBottomUp has pointer information for this successor, take - // what we know about it. - DenseMap<const BasicBlock *, BBState>::iterator BBI = - BBStates.find(*SI); - assert(BBI != BBStates.end()); - const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg); - SuccSSeq = SuccS.GetSeq(); - SuccSRRIKnownSafe = SuccS.RRI.KnownSafe; - switch (SuccSSeq) { - case S_None: { - if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) { - S.ClearSequenceProgress(); - break; - } + continue; + } + + // If we have S_Use or S_CanRelease, perform our check for cfg hazard + // checks. + const bool SuccSRRIKnownSafe = SuccS.RRI.KnownSafe; + + // *NOTE* We do not use Seq from above here since we are allowing for + // S.GetSeq() to change while we are visiting basic blocks. + switch(S.GetSeq()) { + case S_Use: { + bool ShouldContinue = false; + CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, + SomeSuccHasSame, AllSuccsHaveSame, + ShouldContinue); + if (ShouldContinue) continue; - } - case S_CanRelease: - SomeSuccHasSame = true; - break; - case S_Stop: - case S_Release: - case S_MovableRelease: - case S_Use: - if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) - AllSuccsHaveSame = false; - break; - case S_Retain: - llvm_unreachable("bottom-up pointer in retain state!"); - } + break; + } + case S_CanRelease: { + CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, + S, SomeSuccHasSame, + AllSuccsHaveSame); + break; + } + case S_Retain: + case S_None: + case S_Stop: + case S_Release: + case S_MovableRelease: + break; } - // If the state at the other end of any of the successor edges - // matches the current state, require all edges to match. This - // guards against loops in the middle of a sequence. - if (SomeSuccHasSame && !AllSuccsHaveSame) - S.ClearSequenceProgress(); - break; - } } + + // If the state at the other end of any of the successor edges + // matches the current state, require all edges to match. This + // guards against loops in the middle of a sequence. + if (SomeSuccHasSame && !AllSuccsHaveSame) + S.ClearSequenceProgress(); + } } bool @@ -1482,6 +1789,8 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, InstructionClass Class = GetInstructionClass(Inst); const Value *Arg = 0; + DEBUG(dbgs() << "Class: " << Class << "\n"); + switch (Class) { case IC_Release: { Arg = GetObjCArg(Inst); @@ -1496,27 +1805,26 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, // pairs by making PtrState hold a stack of states, but this is // simple and avoids adding overhead for the non-nested case. if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) { - DEBUG(dbgs() << "ObjCARCOpt::VisitInstructionBottomUp: Found nested " - "releases (i.e. a release pair)\n"); + DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n"); NestingDetected = true; } MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind); - S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release); + Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release; + ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq); + S.ResetSequenceProgress(NewSeq); S.RRI.ReleaseMetadata = ReleaseMetadata; - S.RRI.KnownSafe = S.IsKnownIncremented(); + S.RRI.KnownSafe = S.HasKnownPositiveRefCount(); S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall(); S.RRI.Calls.insert(Inst); - S.SetKnownPositiveRefCount(); break; } case IC_RetainBlock: - // An objc_retainBlock call with just a use may need to be kept, - // because it may be copying a block from the stack to the heap. - if (!IsRetainBlockOptimizable(Inst)) - break; - // FALLTHROUGH + // In OptimizeIndividualCalls, we have strength reduced all optimizable + // objc_retainBlocks to objc_retains. Thus at this point any + // objc_retainBlocks that we see are not optimizable. + break; case IC_Retain: case IC_RetainRV: { Arg = GetObjCArg(Inst); @@ -1524,20 +1832,22 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, PtrState &S = MyStates.getPtrBottomUpState(Arg); S.SetKnownPositiveRefCount(); - switch (S.GetSeq()) { + Sequence OldSeq = S.GetSeq(); + switch (OldSeq) { case S_Stop: case S_Release: case S_MovableRelease: case S_Use: - S.RRI.ReverseInsertPts.clear(); + // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an + // imprecise release, clear our reverse insertion points. + if (OldSeq != S_Use || S.RRI.IsTrackingImpreciseReleases()) + S.RRI.ReverseInsertPts.clear(); // FALL THROUGH case S_CanRelease: // Don't do retain+release tracking for IC_RetainRV, because it's // better to let it remain as the first instruction after a call. - if (Class != IC_RetainRV) { - S.RRI.IsRetainBlock = Class == IC_RetainBlock; + if (Class != IC_RetainRV) Retains[Inst] = S.RRI; - } S.ClearSequenceProgress(); break; case S_None: @@ -1545,7 +1855,9 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, case S_Retain: llvm_unreachable("bottom-up pointer in retain state!"); } - return NestingDetected; + ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq()); + // A retain moving bottom up can be a use. + break; } case IC_AutoreleasepoolPop: // Conservatively, clear MyStates for all known pointers. @@ -1571,10 +1883,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, // Check for possible releases. if (CanAlterRefCount(Inst, Ptr, PA, Class)) { - S.ClearRefCount(); + DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr + << "\n"); + S.ClearKnownPositiveRefCount(); switch (Seq) { case S_Use: S.SetSeq(S_CanRelease); + ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq()); continue; case S_CanRelease: case S_Release: @@ -1592,6 +1907,8 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, case S_Release: case S_MovableRelease: if (CanUse(Inst, Ptr, PA, Class)) { + DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr + << "\n"); assert(S.RRI.ReverseInsertPts.empty()); // If this is an invoke instruction, we're scanning it as part of // one of its successor blocks, since we can't insert code after it @@ -1601,10 +1918,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, else S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst))); S.SetSeq(S_Use); - } else if (Seq == S_Release && - (Class == IC_User || Class == IC_CallOrUser)) { + ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use); + } else if (Seq == S_Release && IsUser(Class)) { + DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr + << "\n"); // Non-movable releases depend on any possible objc pointer use. S.SetSeq(S_Stop); + ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop); assert(S.RRI.ReverseInsertPts.empty()); // As above; handle invoke specially. if (isa<InvokeInst>(Inst)) @@ -1614,8 +1934,12 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, } break; case S_Stop: - if (CanUse(Inst, Ptr, PA, Class)) + if (CanUse(Inst, Ptr, PA, Class)) { + DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr + << "\n"); S.SetSeq(S_Use); + ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use); + } break; case S_CanRelease: case S_Use: @@ -1633,6 +1957,9 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates, MapVector<Value *, RRInfo> &Retains) { + + DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n"); + bool NestingDetected = false; BBState &MyStates = BBStates[BB]; @@ -1654,6 +1981,10 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, } } + // If ARC Annotations are enabled, output the current state of pointers at the + // bottom of the basic block. + ANNOTATE_BOTTOMUP_BBEND(MyStates, BB); + // Visit all the instructions, bottom-up. for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) { Instruction *Inst = llvm::prior(I); @@ -1662,7 +1993,7 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, if (isa<InvokeInst>(Inst)) continue; - DEBUG(dbgs() << "ObjCARCOpt::VisitButtonUp: Visiting " << *Inst << "\n"); + DEBUG(dbgs() << "Visiting " << *Inst << "\n"); NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates); } @@ -1677,6 +2008,10 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates); } + // If ARC Annotations are enabled, output the current state of pointers at the + // top of the basic block. + ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB); + return NestingDetected; } @@ -1690,11 +2025,10 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, switch (Class) { case IC_RetainBlock: - // An objc_retainBlock call with just a use may need to be kept, - // because it may be copying a block from the stack to the heap. - if (!IsRetainBlockOptimizable(Inst)) - break; - // FALLTHROUGH + // In OptimizeIndividualCalls, we have strength reduced all optimizable + // objc_retainBlocks to objc_retains. Thus at this point any + // objc_retainBlocks that we see are not optimizable. + break; case IC_Retain: case IC_RetainRV: { Arg = GetObjCArg(Inst); @@ -1714,9 +2048,9 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, if (S.GetSeq() == S_Retain) NestingDetected = true; + ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain); S.ResetSequenceProgress(S_Retain); - S.RRI.IsRetainBlock = Class == IC_RetainBlock; - S.RRI.KnownSafe = S.IsKnownIncremented(); + S.RRI.KnownSafe = S.HasKnownPositiveRefCount(); S.RRI.Calls.insert(Inst); } @@ -1730,17 +2064,23 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, Arg = GetObjCArg(Inst); PtrState &S = MyStates.getPtrTopDownState(Arg); - S.ClearRefCount(); + S.ClearKnownPositiveRefCount(); + + Sequence OldSeq = S.GetSeq(); - switch (S.GetSeq()) { + MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind); + + switch (OldSeq) { case S_Retain: case S_CanRelease: - S.RRI.ReverseInsertPts.clear(); + if (OldSeq == S_Retain || ReleaseMetadata != 0) + S.RRI.ReverseInsertPts.clear(); // FALL THROUGH case S_Use: - S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind); + S.RRI.ReleaseMetadata = ReleaseMetadata; S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall(); Releases[Inst] = S.RRI; + ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None); S.ClearSequenceProgress(); break; case S_None: @@ -1776,10 +2116,13 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, // Check for possible releases. if (CanAlterRefCount(Inst, Ptr, PA, Class)) { - S.ClearRefCount(); + DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr + << "\n"); + S.ClearKnownPositiveRefCount(); switch (Seq) { case S_Retain: S.SetSeq(S_CanRelease); + ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease); assert(S.RRI.ReverseInsertPts.empty()); S.RRI.ReverseInsertPts.insert(Inst); @@ -1801,8 +2144,12 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, // Check for possible direct uses. switch (Seq) { case S_CanRelease: - if (CanUse(Inst, Ptr, PA, Class)) + if (CanUse(Inst, Ptr, PA, Class)) { + DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr + << "\n"); S.SetSeq(S_Use); + ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use); + } break; case S_Retain: case S_Use: @@ -1822,6 +2169,7 @@ bool ObjCARCOpt::VisitTopDown(BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates, DenseMap<Value *, RRInfo> &Releases) { + DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n"); bool NestingDetected = false; BBState &MyStates = BBStates[BB]; @@ -1843,15 +2191,26 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB, } } + // If ARC Annotations are enabled, output the current state of pointers at the + // top of the basic block. + ANNOTATE_TOPDOWN_BBSTART(MyStates, BB); + // Visit all the instructions, top-down. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { Instruction *Inst = I; - DEBUG(dbgs() << "ObjCARCOpt::VisitTopDown: Visiting " << *Inst << "\n"); + DEBUG(dbgs() << "Visiting " << *Inst << "\n"); NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates); } + // If ARC Annotations are enabled, output the current state of pointers at the + // bottom of the basic block. + ANNOTATE_TOPDOWN_BBEND(MyStates, BB); + +#ifdef ARC_ANNOTATIONS + if (!(EnableARCAnnotations && DisableCheckForCFGHazards)) +#endif CheckForCFGHazards(BB, BBStates, MyStates); return NestingDetected; } @@ -1983,6 +2342,8 @@ void ObjCARCOpt::MoveCalls(Value *Arg, Type *ArgTy = Arg->getType(); Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext())); + DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n"); + // Insert the new retain and release calls. for (SmallPtrSet<Instruction *, 2>::const_iterator PI = ReleasesToMove.ReverseInsertPts.begin(), @@ -1991,20 +2352,12 @@ void ObjCARCOpt::MoveCalls(Value *Arg, Value *MyArg = ArgTy == ParamTy ? Arg : new BitCastInst(Arg, ParamTy, "", InsertPt); CallInst *Call = - CallInst::Create(RetainsToMove.IsRetainBlock ? - getRetainBlockCallee(M) : getRetainCallee(M), - MyArg, "", InsertPt); + CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt); Call->setDoesNotThrow(); - if (RetainsToMove.IsRetainBlock) - Call->setMetadata(CopyOnEscapeMDKind, - MDNode::get(M->getContext(), ArrayRef<Value *>())); - else - Call->setTailCall(); + Call->setTailCall(); - DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Release: " << *Call - << "\n" - " At insertion point: " << *InsertPt - << "\n"); + DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n" + "At insertion point: " << *InsertPt << "\n"); } for (SmallPtrSet<Instruction *, 2>::const_iterator PI = RetainsToMove.ReverseInsertPts.begin(), @@ -2021,10 +2374,8 @@ void ObjCARCOpt::MoveCalls(Value *Arg, if (ReleasesToMove.IsTailCallRelease) Call->setTailCall(); - DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Retain: " << *Call - << "\n" - " At insertion point: " << *InsertPt - << "\n"); + DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n" + "At insertion point: " << *InsertPt << "\n"); } // Delete the original retain and release calls. @@ -2034,8 +2385,7 @@ void ObjCARCOpt::MoveCalls(Value *Arg, Instruction *OrigRetain = *AI; Retains.blot(OrigRetain); DeadInsts.push_back(OrigRetain); - DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting retain: " << *OrigRetain << - "\n"); + DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n"); } for (SmallPtrSet<Instruction *, 2>::const_iterator AI = ReleasesToMove.Calls.begin(), @@ -2043,9 +2393,9 @@ void ObjCARCOpt::MoveCalls(Value *Arg, Instruction *OrigRelease = *AI; Releases.erase(OrigRelease); DeadInsts.push_back(OrigRelease); - DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting release: " << *OrigRelease - << "\n"); + DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n"); } + } bool @@ -2075,7 +2425,6 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> unsigned OldCount = 0; unsigned NewCount = 0; bool FirstRelease = true; - bool FirstRetain = true; for (;;) { for (SmallVectorImpl<Instruction *>::const_iterator NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) { @@ -2156,16 +2505,6 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> OldDelta += PathCount; OldCount += PathCount; - // Merge the IsRetainBlock values. - if (FirstRetain) { - RetainsToMove.IsRetainBlock = NewReleaseRetainRRI.IsRetainBlock; - FirstRetain = false; - } else if (ReleasesToMove.IsRetainBlock != - NewReleaseRetainRRI.IsRetainBlock) - // It's not possible to merge the sequences if one uses - // objc_retain and the other uses objc_retainBlock. - return false; - // Collect the optimal insertion points. if (!KnownSafe) for (SmallPtrSet<Instruction *, 2>::const_iterator @@ -2210,6 +2549,12 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> if (OldDelta != 0) return false; +#ifdef ARC_ANNOTATIONS + // Do not move calls if ARC annotations are requested. + if (EnableARCAnnotations) + return false; +#endif // ARC_ANNOTATIONS + Changed = true; assert(OldCount != 0 && "Unreachable code?"); NumRRs += OldCount - NewCount; @@ -2228,6 +2573,8 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState> MapVector<Value *, RRInfo> &Retains, DenseMap<Value *, RRInfo> &Releases, Module *M) { + DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n"); + bool AnyPairsCompletelyEliminated = false; RRInfo RetainsToMove; RRInfo ReleasesToMove; @@ -2243,8 +2590,7 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState> Instruction *Retain = cast<Instruction>(V); - DEBUG(dbgs() << "ObjCARCOpt::PerformCodePlacement: Visiting: " << *Retain - << "\n"); + DEBUG(dbgs() << "Visiting: " << *Retain << "\n"); Value *Arg = GetObjCArg(Retain); @@ -2295,14 +2641,15 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState> /// Weak pointer optimizations. void ObjCARCOpt::OptimizeWeakCalls(Function &F) { + DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n"); + // First, do memdep-style RLE and S2L optimizations. We can't use memdep // itself because it uses AliasAnalysis and we need to do provenance // queries instead. for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) { Instruction *Inst = &*I++; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Visiting: " << *Inst << - "\n"); + DEBUG(dbgs() << "Visiting: " << *Inst << "\n"); InstructionClass Class = GetBasicInstructionClass(Inst); if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained) @@ -2392,6 +2739,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) { goto clobbered; case IC_AutoreleasepoolPush: case IC_None: + case IC_IntrinsicUser: case IC_User: // Weak pointers are only modified through the weak entry points // (and arbitrary calls, which could call the weak entry points). @@ -2449,9 +2797,6 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) { done:; } } - - DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Finished List.\n\n"); - } /// Identify program paths which execute sequences of retains and releases which @@ -2476,6 +2821,88 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) { NestingDetected; } +/// Check if there is a dependent call earlier that does not have anything in +/// between the Retain and the call that can affect the reference count of their +/// shared pointer argument. Note that Retain need not be in BB. +static bool +HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain, + SmallPtrSet<Instruction *, 4> &DepInsts, + SmallPtrSet<const BasicBlock *, 4> &Visited, + ProvenanceAnalysis &PA) { + FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain, + DepInsts, Visited, PA); + if (DepInsts.size() != 1) + return false; + + CallInst *Call = + dyn_cast_or_null<CallInst>(*DepInsts.begin()); + + // Check that the pointer is the return value of the call. + if (!Call || Arg != Call) + return false; + + // Check that the call is a regular call. + InstructionClass Class = GetBasicInstructionClass(Call); + if (Class != IC_CallOrUser && Class != IC_Call) + return false; + + return true; +} + +/// Find a dependent retain that precedes the given autorelease for which there +/// is nothing in between the two instructions that can affect the ref count of +/// Arg. +static CallInst * +FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB, + Instruction *Autorelease, + SmallPtrSet<Instruction *, 4> &DepInsts, + SmallPtrSet<const BasicBlock *, 4> &Visited, + ProvenanceAnalysis &PA) { + FindDependencies(CanChangeRetainCount, Arg, + BB, Autorelease, DepInsts, Visited, PA); + if (DepInsts.size() != 1) + return 0; + + CallInst *Retain = + dyn_cast_or_null<CallInst>(*DepInsts.begin()); + + // Check that we found a retain with the same argument. + if (!Retain || + !IsRetain(GetBasicInstructionClass(Retain)) || + GetObjCArg(Retain) != Arg) { + return 0; + } + + return Retain; +} + +/// Look for an ``autorelease'' instruction dependent on Arg such that there are +/// no instructions dependent on Arg that need a positive ref count in between +/// the autorelease and the ret. +static CallInst * +FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB, + ReturnInst *Ret, + SmallPtrSet<Instruction *, 4> &DepInsts, + SmallPtrSet<const BasicBlock *, 4> &V, + ProvenanceAnalysis &PA) { + FindDependencies(NeedsPositiveRetainCount, Arg, + BB, Ret, DepInsts, V, PA); + if (DepInsts.size() != 1) + return 0; + + CallInst *Autorelease = + dyn_cast_or_null<CallInst>(*DepInsts.begin()); + if (!Autorelease) + return 0; + InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease); + if (!IsAutorelease(AutoreleaseClass)) + return 0; + if (GetObjCArg(Autorelease) != Arg) + return 0; + + return Autorelease; +} + /// Look for this pattern: /// \code /// %call = call i8* @something(...) @@ -2484,122 +2911,91 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) { /// ret i8* %3 /// \endcode /// And delete the retain and autorelease. -/// -/// Otherwise if it's just this: -/// \code -/// %3 = call i8* @objc_autorelease(i8* %2) -/// ret i8* %3 -/// \endcode -/// convert the autorelease to autoreleaseRV. void ObjCARCOpt::OptimizeReturns(Function &F) { if (!F.getReturnType()->isPointerTy()) return; + DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n"); + SmallPtrSet<Instruction *, 4> DependingInstructions; SmallPtrSet<const BasicBlock *, 4> Visited; for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { BasicBlock *BB = FI; ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back()); - DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Visiting: " << *Ret << "\n"); + DEBUG(dbgs() << "Visiting: " << *Ret << "\n"); - if (!Ret) continue; + if (!Ret) + continue; const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0)); - FindDependencies(NeedsPositiveRetainCount, Arg, - BB, Ret, DependingInstructions, Visited, PA); - if (DependingInstructions.size() != 1) - goto next_block; - - { - CallInst *Autorelease = - dyn_cast_or_null<CallInst>(*DependingInstructions.begin()); - if (!Autorelease) - goto next_block; - InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease); - if (!IsAutorelease(AutoreleaseClass)) - goto next_block; - if (GetObjCArg(Autorelease) != Arg) - goto next_block; - - DependingInstructions.clear(); - Visited.clear(); - - // Check that there is nothing that can affect the reference - // count between the autorelease and the retain. - FindDependencies(CanChangeRetainCount, Arg, - BB, Autorelease, DependingInstructions, Visited, PA); - if (DependingInstructions.size() != 1) - goto next_block; - - { - CallInst *Retain = - dyn_cast_or_null<CallInst>(*DependingInstructions.begin()); - - // Check that we found a retain with the same argument. - if (!Retain || - !IsRetain(GetBasicInstructionClass(Retain)) || - GetObjCArg(Retain) != Arg) - goto next_block; - - DependingInstructions.clear(); - Visited.clear(); - - // Convert the autorelease to an autoreleaseRV, since it's - // returning the value. - if (AutoreleaseClass == IC_Autorelease) { - DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Converting autorelease " - "=> autoreleaseRV since it's returning a value.\n" - " In: " << *Autorelease - << "\n"); - Autorelease->setCalledFunction(getAutoreleaseRVCallee(F.getParent())); - DEBUG(dbgs() << " Out: " << *Autorelease - << "\n"); - Autorelease->setTailCall(); // Always tail call autoreleaseRV. - AutoreleaseClass = IC_AutoreleaseRV; - } - - // Check that there is nothing that can affect the reference - // count between the retain and the call. - // Note that Retain need not be in BB. - FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain, - DependingInstructions, Visited, PA); - if (DependingInstructions.size() != 1) - goto next_block; - { - CallInst *Call = - dyn_cast_or_null<CallInst>(*DependingInstructions.begin()); + // Look for an ``autorelease'' instruction that is a predecessor of Ret and + // dependent on Arg such that there are no instructions dependent on Arg + // that need a positive ref count in between the autorelease and Ret. + CallInst *Autorelease = + FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret, + DependingInstructions, Visited, + PA); + DependingInstructions.clear(); + Visited.clear(); - // Check that the pointer is the return value of the call. - if (!Call || Arg != Call) - goto next_block; + if (!Autorelease) + continue; - // Check that the call is a regular call. - InstructionClass Class = GetBasicInstructionClass(Call); - if (Class != IC_CallOrUser && Class != IC_Call) - goto next_block; + CallInst *Retain = + FindPredecessorRetainWithSafePath(Arg, BB, Autorelease, + DependingInstructions, Visited, PA); + DependingInstructions.clear(); + Visited.clear(); - // If so, we can zap the retain and autorelease. - Changed = true; - ++NumRets; - DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Erasing: " << *Retain - << "\n Erasing: " - << *Autorelease << "\n"); - EraseInstruction(Retain); - EraseInstruction(Autorelease); - } - } - } + if (!Retain) + continue; - next_block: + // Check that there is nothing that can affect the reference count + // between the retain and the call. Note that Retain need not be in BB. + bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain, + DependingInstructions, + Visited, PA); DependingInstructions.clear(); Visited.clear(); + + if (!HasSafePathToCall) + continue; + + // If so, we can zap the retain and autorelease. + Changed = true; + ++NumRets; + DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " + << *Autorelease << "\n"); + EraseInstruction(Retain); + EraseInstruction(Autorelease); } +} - DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Finished List.\n\n"); +#ifndef NDEBUG +void +ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) { + llvm::Statistic &NumRetains = + AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt; + llvm::Statistic &NumReleases = + AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt; + for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) { + Instruction *Inst = &*I++; + switch (GetBasicInstructionClass(Inst)) { + default: + break; + case IC_Retain: + ++NumRetains; + break; + case IC_Release: + ++NumReleases; + break; + } + } } +#endif bool ObjCARCOpt::doInitialization(Module &M) { if (!EnableARCOpts) @@ -2617,13 +3013,20 @@ bool ObjCARCOpt::doInitialization(Module &M) { M.getContext().getMDKindID("clang.arc.copy_on_escape"); NoObjCARCExceptionsMDKind = M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions"); +#ifdef ARC_ANNOTATIONS + ARCAnnotationBottomUpMDKind = + M.getContext().getMDKindID("llvm.arc.annotation.bottomup"); + ARCAnnotationTopDownMDKind = + M.getContext().getMDKindID("llvm.arc.annotation.topdown"); + ARCAnnotationProvenanceSourceMDKind = + M.getContext().getMDKindID("llvm.arc.annotation.provenancesource"); +#endif // ARC_ANNOTATIONS // Intuitively, objc_retain and others are nocapture, however in practice // they are not, because they return their argument value. And objc_release // calls finalizers which can have arbitrary side effects. // These are initialized lazily. - RetainRVCallee = 0; AutoreleaseRVCallee = 0; ReleaseCallee = 0; RetainCallee = 0; @@ -2643,7 +3046,8 @@ bool ObjCARCOpt::runOnFunction(Function &F) { Changed = false; - DEBUG(dbgs() << "ObjCARCOpt: Visiting Function: " << F.getName() << "\n"); + DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>" + "\n"); PA.setAA(&getAnalysis<AliasAnalysis>()); @@ -2651,7 +3055,7 @@ bool ObjCARCOpt::runOnFunction(Function &F) { // when compiling code that isn't ObjC, skip these if the relevant ObjC // library functions aren't declared. - // Preliminary optimizations. This also computs UsedInThisFunction. + // Preliminary optimizations. This also computes UsedInThisFunction. OptimizeIndividualCalls(F); // Optimizations for weak pointers. @@ -2678,6 +3082,13 @@ bool ObjCARCOpt::runOnFunction(Function &F) { (1 << IC_AutoreleaseRV))) OptimizeReturns(F); + // Gather statistics after optimization. +#ifndef NDEBUG + if (AreStatisticsEnabled()) { + GatherStatistics(F, true); + } +#endif + DEBUG(dbgs() << "\n"); return Changed; diff --git a/lib/Transforms/ObjCARC/ObjCARCUtil.cpp b/lib/Transforms/ObjCARC/ObjCARCUtil.cpp index a841c64..03e12d4 100644 --- a/lib/Transforms/ObjCARC/ObjCARCUtil.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCUtil.cpp @@ -72,6 +72,8 @@ raw_ostream &llvm::objcarc::operator<<(raw_ostream &OS, return OS << "IC_Call"; case IC_User: return OS << "IC_User"; + case IC_IntrinsicUser: + return OS << "IC_IntrinsicUser"; case IC_None: return OS << "IC_None"; } @@ -81,10 +83,11 @@ raw_ostream &llvm::objcarc::operator<<(raw_ostream &OS, InstructionClass llvm::objcarc::GetFunctionClass(const Function *F) { Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end(); - // No arguments. + // No (mandatory) arguments. if (AI == AE) return StringSwitch<InstructionClass>(F->getName()) .Case("objc_autoreleasePoolPush", IC_AutoreleasepoolPush) + .Case("clang.arc.use", IC_IntrinsicUser) .Default(IC_CallOrUser); // One argument. @@ -142,6 +145,14 @@ InstructionClass llvm::objcarc::GetFunctionClass(const Function *F) { return StringSwitch<InstructionClass>(F->getName()) .Case("objc_moveWeak", IC_MoveWeak) .Case("objc_copyWeak", IC_CopyWeak) + // Ignore annotation calls. This is important to stop the + // optimizer from treating annotations as uses which would + // make the state of the pointers they are attempting to + // elucidate to be incorrect. + .Case("llvm.arc.annotation.topdown.bbstart", IC_None) + .Case("llvm.arc.annotation.topdown.bbend", IC_None) + .Case("llvm.arc.annotation.bottomup.bbstart", IC_None) + .Case("llvm.arc.annotation.bottomup.bbend", IC_None) .Default(IC_CallOrUser); } diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index 015fd2e..615c517 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -1761,7 +1761,7 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { if (!DefIsLiveOut) return false; - // Make sure non of the uses are PHI nodes. + // Make sure none of the uses are PHI nodes. for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp index 14e463a..4796eb2 100644 --- a/lib/Transforms/Scalar/GlobalMerge.cpp +++ b/lib/Transforms/Scalar/GlobalMerge.cpp @@ -61,10 +61,8 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" -#include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" -#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" @@ -101,7 +99,7 @@ namespace { /// Collect every variables marked as "used" void collectUsedGlobalVariables(Module &M); - /// Keep track of the GlobalVariable that are marked as "used" + /// Keep track of the GlobalVariable that must not be merged away SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables; public: @@ -113,6 +111,7 @@ namespace { virtual bool doInitialization(Module &M); virtual bool runOnFunction(Function &F); + virtual bool doFinalization(Module &M); const char *getPassName() const { return "Merge internal globals"; @@ -201,9 +200,8 @@ void GlobalMerge::collectUsedGlobalVariables(Module &M) { if (!GV || !GV->hasInitializer()) return; // Should be an array of 'i8*'. - const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer()); - if (InitList == 0) return; - + const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer()); + for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) if (const GlobalVariable *G = dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts())) @@ -211,9 +209,6 @@ void GlobalMerge::collectUsedGlobalVariables(Module &M) { } void GlobalMerge::setMustKeepGlobalVariables(Module &M) { - // If we already processed a Module, UsedGlobalVariables may have been - // populated. Reset the information for this module. - MustKeepGlobalVariables.clear(); collectUsedGlobalVariables(M); for (Module::iterator IFn = M.begin(), IEndFn = M.end(); IFn != IEndFn; @@ -268,8 +263,6 @@ bool GlobalMerge::doInitialization(Module &M) { continue; // Ignore all "required" globals: - // - the ones used for EH - // - the ones marked with "used" attribute if (isMustKeepGlobalVariable(I)) continue; @@ -307,6 +300,11 @@ bool GlobalMerge::runOnFunction(Function &F) { return false; } +bool GlobalMerge::doFinalization(Module &M) { + MustKeepGlobalVariables.clear(); + return false; +} + Pass *llvm::createGlobalMergePass(const TargetLowering *tli) { return new GlobalMerge(tli); } diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 97fff7e..8e76c78 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -535,6 +535,45 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { if (!SE->isLoopInvariant(ExitValue, L)) continue; + // Computing the value outside of the loop brings no benefit if : + // - it is definitely used inside the loop in a way which can not be + // optimized away. + // - no use outside of the loop can take advantage of hoisting the + // computation out of the loop + if (ExitValue->getSCEVType()>=scMulExpr) { + unsigned NumHardInternalUses = 0; + unsigned NumSoftExternalUses = 0; + unsigned NumUses = 0; + for (Value::use_iterator IB=Inst->use_begin(), IE=Inst->use_end(); + IB!=IE && NumUses<=6 ; ++IB) { + Instruction *UseInstr = cast<Instruction>(*IB); + unsigned Opc = UseInstr->getOpcode(); + NumUses++; + if (L->contains(UseInstr)) { + if (Opc == Instruction::Call || Opc == Instruction::Ret) + NumHardInternalUses++; + } else { + if (Opc == Instruction::PHI) { + // Do not count the Phi as a use. LCSSA may have inserted + // plenty of trivial ones. + NumUses--; + for (Value::use_iterator PB=UseInstr->use_begin(), + PE=UseInstr->use_end(); + PB!=PE && NumUses<=6 ; ++PB, ++NumUses) { + unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode(); + if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret) + NumSoftExternalUses++; + } + continue; + } + if (Opc != Instruction::Call && Opc != Instruction::Ret) + NumSoftExternalUses++; + } + } + if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses) + continue; + } + Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst); DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp index 9c67e32..0b62050 100644 --- a/lib/Transforms/Scalar/LoopDeletion.cpp +++ b/lib/Transforms/Scalar/LoopDeletion.cpp @@ -34,13 +34,9 @@ namespace { } // Possibly eliminate loop L if it is dead. - bool runOnLoop(Loop* L, LPPassManager& LPM); + bool runOnLoop(Loop *L, LPPassManager &LPM); - bool IsLoopDead(Loop* L, SmallVector<BasicBlock*, 4>& exitingBlocks, - SmallVector<BasicBlock*, 4>& exitBlocks, - bool &Changed, BasicBlock *Preheader); - - virtual void getAnalysisUsage(AnalysisUsage& AU) const { + virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<DominatorTree>(); AU.addRequired<LoopInfo>(); AU.addRequired<ScalarEvolution>(); @@ -53,6 +49,12 @@ namespace { AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); } + + private: + bool isLoopDead(Loop *L, SmallVector<BasicBlock*, 4> &exitingBlocks, + SmallVector<BasicBlock*, 4> &exitBlocks, + bool &Changed, BasicBlock *Preheader); + }; } @@ -67,18 +69,18 @@ INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopDeletion, "loop-deletion", "Delete dead loops", false, false) -Pass* llvm::createLoopDeletionPass() { +Pass *llvm::createLoopDeletionPass() { return new LoopDeletion(); } -/// IsLoopDead - Determined if a loop is dead. This assumes that we've already +/// isLoopDead - Determined if a loop is dead. This assumes that we've already /// checked for unique exit and exiting blocks, and that the code is in LCSSA /// form. -bool LoopDeletion::IsLoopDead(Loop* L, - SmallVector<BasicBlock*, 4>& exitingBlocks, - SmallVector<BasicBlock*, 4>& exitBlocks, +bool LoopDeletion::isLoopDead(Loop *L, + SmallVector<BasicBlock*, 4> &exitingBlocks, + SmallVector<BasicBlock*, 4> &exitBlocks, bool &Changed, BasicBlock *Preheader) { - BasicBlock* exitBlock = exitBlocks[0]; + BasicBlock *exitBlock = exitBlocks[0]; // Make sure that all PHI entries coming from the loop are loop invariant. // Because the code is in LCSSA form, any values used outside of the loop @@ -86,19 +88,19 @@ bool LoopDeletion::IsLoopDead(Loop* L, // sufficient to guarantee that no loop-variant values are used outside // of the loop. BasicBlock::iterator BI = exitBlock->begin(); - while (PHINode* P = dyn_cast<PHINode>(BI)) { - Value* incoming = P->getIncomingValueForBlock(exitingBlocks[0]); + while (PHINode *P = dyn_cast<PHINode>(BI)) { + Value *incoming = P->getIncomingValueForBlock(exitingBlocks[0]); // Make sure all exiting blocks produce the same incoming value for the exit // block. If there are different incoming values for different exiting // blocks, then it is impossible to statically determine which value should // be used. - for (unsigned i = 1; i < exitingBlocks.size(); ++i) { + for (unsigned i = 1, e = exitingBlocks.size(); i < e; ++i) { if (incoming != P->getIncomingValueForBlock(exitingBlocks[i])) return false; } - if (Instruction* I = dyn_cast<Instruction>(incoming)) + if (Instruction *I = dyn_cast<Instruction>(incoming)) if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator())) return false; @@ -127,10 +129,10 @@ bool LoopDeletion::IsLoopDead(Loop* L, /// so could change the halting/non-halting nature of a program. /// NOTE: This entire process relies pretty heavily on LoopSimplify and LCSSA /// in order to make various safety checks work. -bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { +bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) { // We can only remove the loop if there is a preheader that we can // branch from after removing it. - BasicBlock* preheader = L->getLoopPreheader(); + BasicBlock *preheader = L->getLoopPreheader(); if (!preheader) return false; @@ -158,19 +160,19 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { // Finally, we have to check that the loop really is dead. bool Changed = false; - if (!IsLoopDead(L, exitingBlocks, exitBlocks, Changed, preheader)) + if (!isLoopDead(L, exitingBlocks, exitBlocks, Changed, preheader)) return Changed; // Don't remove loops for which we can't solve the trip count. // They could be infinite, in which case we'd be changing program behavior. - ScalarEvolution& SE = getAnalysis<ScalarEvolution>(); + ScalarEvolution &SE = getAnalysis<ScalarEvolution>(); const SCEV *S = SE.getMaxBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(S)) return Changed; // Now that we know the removal is safe, remove the loop by changing the // branch from the preheader to go to the single exit block. - BasicBlock* exitBlock = exitBlocks[0]; + BasicBlock *exitBlock = exitBlocks[0]; // Because we're deleting a large chunk of code at once, the sequence in which // we remove things is very important to avoid invalidation issues. Don't @@ -182,14 +184,14 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { SE.forgetLoop(L); // Connect the preheader directly to the exit block. - TerminatorInst* TI = preheader->getTerminator(); + TerminatorInst *TI = preheader->getTerminator(); TI->replaceUsesOfWith(L->getHeader(), exitBlock); // Rewrite phis in the exit block to get their inputs from // the preheader instead of the exiting block. - BasicBlock* exitingBlock = exitingBlocks[0]; + BasicBlock *exitingBlock = exitingBlocks[0]; BasicBlock::iterator BI = exitBlock->begin(); - while (PHINode* P = dyn_cast<PHINode>(BI)) { + while (PHINode *P = dyn_cast<PHINode>(BI)) { int j = P->getBasicBlockIndex(exitingBlock); assert(j >= 0 && "Can't find exiting block in exit block's phi node!"); P->setIncomingBlock(j, preheader); @@ -200,7 +202,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { // Update the dominator tree and remove the instructions and blocks that will // be deleted from the reference counting scheme. - DominatorTree& DT = getAnalysis<DominatorTree>(); + DominatorTree &DT = getAnalysis<DominatorTree>(); SmallVector<DomTreeNode*, 8> ChildNodes; for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end(); LI != LE; ++LI) { @@ -230,7 +232,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { // Finally, the blocks from loopinfo. This has to happen late because // otherwise our loop iterators won't work. - LoopInfo& loopInfo = getAnalysis<LoopInfo>(); + LoopInfo &loopInfo = getAnalysis<LoopInfo>(); SmallPtrSet<BasicBlock*, 8> blocks; blocks.insert(L->block_begin(), L->block_end()); for (SmallPtrSet<BasicBlock*,8>::iterator I = blocks.begin(), diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 4e4cb86..73e44d7 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -895,7 +895,7 @@ void Cost::RatePrimaryRegister(const SCEV *Reg, } if (Regs.insert(Reg)) { RateRegister(Reg, Regs, L, SE, DT); - if (isLoser()) + if (LoserRegs && isLoser()) LoserRegs->insert(Reg); } } @@ -1895,15 +1895,13 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { if (ICmpInst::isTrueWhenEqual(Pred)) { // Look for n+1, and grab n. if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) - if (isa<ConstantInt>(BO->getOperand(1)) && - cast<ConstantInt>(BO->getOperand(1))->isOne() && - SE.getSCEV(BO->getOperand(0)) == MaxRHS) - NewRHS = BO->getOperand(0); + if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) + if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) + NewRHS = BO->getOperand(0); if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) - if (isa<ConstantInt>(BO->getOperand(1)) && - cast<ConstantInt>(BO->getOperand(1))->isOne() && - SE.getSCEV(BO->getOperand(0)) == MaxRHS) - NewRHS = BO->getOperand(0); + if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) + if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) + NewRHS = BO->getOperand(0); if (!NewRHS) return Cond; } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) @@ -2716,6 +2714,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, // by LSR. const IVInc &Head = Chain.Incs[0]; User::op_iterator IVOpEnd = Head.UserInst->op_end(); + // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), IVOpEnd, L, SE); Value *IVSrc = 0; diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 0da3746..a3c241d 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -110,6 +110,51 @@ namespace { } }; }; + + /// Utility class representing a non-constant Xor-operand. We classify + /// non-constant Xor-Operands into two categories: + /// C1) The operand is in the form "X & C", where C is a constant and C != ~0 + /// C2) + /// C2.1) The operand is in the form of "X | C", where C is a non-zero + /// constant. + /// C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this + /// operand as "E | 0" + class XorOpnd { + public: + XorOpnd(Value *V); + const XorOpnd &operator=(const XorOpnd &That); + + bool isInvalid() const { return SymbolicPart == 0; } + bool isOrExpr() const { return isOr; } + Value *getValue() const { return OrigVal; } + Value *getSymbolicPart() const { return SymbolicPart; } + unsigned getSymbolicRank() const { return SymbolicRank; } + const APInt &getConstPart() const { return ConstPart; } + + void Invalidate() { SymbolicPart = OrigVal = 0; } + void setSymbolicRank(unsigned R) { SymbolicRank = R; } + + // Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank. + // The purpose is twofold: + // 1) Cluster together the operands sharing the same symbolic-value. + // 2) Operand having smaller symbolic-value-rank is permuted earlier, which + // could potentially shorten crital path, and expose more loop-invariants. + // Note that values' rank are basically defined in RPO order (FIXME). + // So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier + // than Y which is defined earlier than Z. Permute "x | 1", "Y & 2", + // "z" in the order of X-Y-Z is better than any other orders. + struct PtrSortFunctor { + bool operator()(XorOpnd * const &LHS, XorOpnd * const &RHS) { + return LHS->getSymbolicRank() < RHS->getSymbolicRank(); + } + }; + private: + Value *OrigVal; + Value *SymbolicPart; + APInt ConstPart; + unsigned SymbolicRank; + bool isOr; + }; } namespace { @@ -137,6 +182,11 @@ namespace { Value *OptimizeExpression(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); + Value *OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); + bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd, + Value *&Res); + bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, + APInt &ConstOpnd, Value *&Res); bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, SmallVectorImpl<Factor> &Factors); Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder, @@ -148,6 +198,42 @@ namespace { }; } +XorOpnd::XorOpnd(Value *V) { + assert(!isa<ConstantInt>(V) && "No ConstantInt"); + OrigVal = V; + Instruction *I = dyn_cast<Instruction>(V); + SymbolicRank = 0; + + if (I && (I->getOpcode() == Instruction::Or || + I->getOpcode() == Instruction::And)) { + Value *V0 = I->getOperand(0); + Value *V1 = I->getOperand(1); + if (isa<ConstantInt>(V0)) + std::swap(V0, V1); + + if (ConstantInt *C = dyn_cast<ConstantInt>(V1)) { + ConstPart = C->getValue(); + SymbolicPart = V0; + isOr = (I->getOpcode() == Instruction::Or); + return; + } + } + + // view the operand as "V | 0" + SymbolicPart = V; + ConstPart = APInt::getNullValue(V->getType()->getIntegerBitWidth()); + isOr = true; +} + +const XorOpnd &XorOpnd::operator=(const XorOpnd &That) { + OrigVal = That.OrigVal; + SymbolicPart = That.SymbolicPart; + ConstPart = That.ConstPart; + SymbolicRank = That.SymbolicRank; + isOr = That.isOr; + return *this; +} + char Reassociate::ID = 0; INITIALIZE_PASS(Reassociate, "reassociate", "Reassociate expressions", false, false) @@ -1040,6 +1126,250 @@ static Value *OptimizeAndOrXor(unsigned Opcode, return 0; } +/// Helper funciton of CombineXorOpnd(). It creates a bitwise-and +/// instruction with the given two operands, and return the resulting +/// instruction. There are two special cases: 1) if the constant operand is 0, +/// it will return NULL. 2) if the constant is ~0, the symbolic operand will +/// be returned. +static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, + const APInt &ConstOpnd) { + if (ConstOpnd != 0) { + if (!ConstOpnd.isAllOnesValue()) { + LLVMContext &Ctx = Opnd->getType()->getContext(); + Instruction *I; + I = BinaryOperator::CreateAnd(Opnd, ConstantInt::get(Ctx, ConstOpnd), + "and.ra", InsertBefore); + I->setDebugLoc(InsertBefore->getDebugLoc()); + return I; + } + return Opnd; + } + return 0; +} + +// Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd" +// into "R ^ C", where C would be 0, and R is a symbolic value. +// +// If it was successful, true is returned, and the "R" and "C" is returned +// via "Res" and "ConstOpnd", respectively; otherwise, false is returned, +// and both "Res" and "ConstOpnd" remain unchanged. +// +bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, + APInt &ConstOpnd, Value *&Res) { + // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 + // = ((x | c1) ^ c1) ^ (c1 ^ c2) + // = (x & ~c1) ^ (c1 ^ c2) + // It is useful only when c1 == c2. + if (Opnd1->isOrExpr() && Opnd1->getConstPart() != 0) { + if (!Opnd1->getValue()->hasOneUse()) + return false; + + const APInt &C1 = Opnd1->getConstPart(); + if (C1 != ConstOpnd) + return false; + + Value *X = Opnd1->getSymbolicPart(); + Res = createAndInstr(I, X, ~C1); + // ConstOpnd was C2, now C1 ^ C2. + ConstOpnd ^= C1; + + if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) + RedoInsts.insert(T); + return true; + } + return false; +} + + +// Helper function of OptimizeXor(). It tries to simplify +// "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a +// symbolic value. +// +// If it was successful, true is returned, and the "R" and "C" is returned +// via "Res" and "ConstOpnd", respectively (If the entire expression is +// evaluated to a constant, the Res is set to NULL); otherwise, false is +// returned, and both "Res" and "ConstOpnd" remain unchanged. +bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, + APInt &ConstOpnd, Value *&Res) { + Value *X = Opnd1->getSymbolicPart(); + if (X != Opnd2->getSymbolicPart()) + return false; + + // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.) + int DeadInstNum = 1; + if (Opnd1->getValue()->hasOneUse()) + DeadInstNum++; + if (Opnd2->getValue()->hasOneUse()) + DeadInstNum++; + + // Xor-Rule 2: + // (x | c1) ^ (x & c2) + // = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1 + // = (x & ~c1) ^ (x & c2) ^ c1 // Xor-Rule 1 + // = (x & c3) ^ c1, where c3 = ~c1 ^ c2 // Xor-rule 3 + // + if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) { + if (Opnd2->isOrExpr()) + std::swap(Opnd1, Opnd2); + + const APInt &C1 = Opnd1->getConstPart(); + const APInt &C2 = Opnd2->getConstPart(); + APInt C3((~C1) ^ C2); + + // Do not increase code size! + if (C3 != 0 && !C3.isAllOnesValue()) { + int NewInstNum = ConstOpnd != 0 ? 1 : 2; + if (NewInstNum > DeadInstNum) + return false; + } + + Res = createAndInstr(I, X, C3); + ConstOpnd ^= C1; + + } else if (Opnd1->isOrExpr()) { + // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2 + // + const APInt &C1 = Opnd1->getConstPart(); + const APInt &C2 = Opnd2->getConstPart(); + APInt C3 = C1 ^ C2; + + // Do not increase code size + if (C3 != 0 && !C3.isAllOnesValue()) { + int NewInstNum = ConstOpnd != 0 ? 1 : 2; + if (NewInstNum > DeadInstNum) + return false; + } + + Res = createAndInstr(I, X, C3); + ConstOpnd ^= C3; + } else { + // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2)) + // + const APInt &C1 = Opnd1->getConstPart(); + const APInt &C2 = Opnd2->getConstPart(); + APInt C3 = C1 ^ C2; + Res = createAndInstr(I, X, C3); + } + + // Put the original operands in the Redo list; hope they will be deleted + // as dead code. + if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) + RedoInsts.insert(T); + if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue())) + RedoInsts.insert(T); + + return true; +} + +/// Optimize a series of operands to an 'xor' instruction. If it can be reduced +/// to a single Value, it is returned, otherwise the Ops list is mutated as +/// necessary. +Value *Reassociate::OptimizeXor(Instruction *I, + SmallVectorImpl<ValueEntry> &Ops) { + if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops)) + return V; + + if (Ops.size() == 1) + return 0; + + SmallVector<XorOpnd, 8> Opnds; + SmallVector<XorOpnd*, 8> OpndPtrs; + Type *Ty = Ops[0].Op->getType(); + APInt ConstOpnd(Ty->getIntegerBitWidth(), 0); + + // Step 1: Convert ValueEntry to XorOpnd + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + Value *V = Ops[i].Op; + if (!isa<ConstantInt>(V)) { + XorOpnd O(V); + O.setSymbolicRank(getRank(O.getSymbolicPart())); + Opnds.push_back(O); + } else + ConstOpnd ^= cast<ConstantInt>(V)->getValue(); + } + + // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds". + // It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate + // the "OpndPtrs" as well. For the similar reason, do not fuse this loop + // with the previous loop --- the iterator of the "Opnds" may be invalidated + // when new elements are added to the vector. + for (unsigned i = 0, e = Opnds.size(); i != e; ++i) + OpndPtrs.push_back(&Opnds[i]); + + // Step 2: Sort the Xor-Operands in a way such that the operands containing + // the same symbolic value cluster together. For instance, the input operand + // sequence ("x | 123", "y & 456", "x & 789") will be sorted into: + // ("x | 123", "x & 789", "y & 456"). + std::sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor()); + + // Step 3: Combine adjacent operands + XorOpnd *PrevOpnd = 0; + bool Changed = false; + for (unsigned i = 0, e = Opnds.size(); i < e; i++) { + XorOpnd *CurrOpnd = OpndPtrs[i]; + // The combined value + Value *CV; + + // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd" + if (ConstOpnd != 0 && CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) { + Changed = true; + if (CV) + *CurrOpnd = XorOpnd(CV); + else { + CurrOpnd->Invalidate(); + continue; + } + } + + if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) { + PrevOpnd = CurrOpnd; + continue; + } + + // step 3.2: When previous and current operands share the same symbolic + // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" + // + if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) { + // Remove previous operand + PrevOpnd->Invalidate(); + if (CV) { + *CurrOpnd = XorOpnd(CV); + PrevOpnd = CurrOpnd; + } else { + CurrOpnd->Invalidate(); + PrevOpnd = 0; + } + Changed = true; + } + } + + // Step 4: Reassemble the Ops + if (Changed) { + Ops.clear(); + for (unsigned int i = 0, e = Opnds.size(); i < e; i++) { + XorOpnd &O = Opnds[i]; + if (O.isInvalid()) + continue; + ValueEntry VE(getRank(O.getValue()), O.getValue()); + Ops.push_back(VE); + } + if (ConstOpnd != 0) { + Value *C = ConstantInt::get(Ty->getContext(), ConstOpnd); + ValueEntry VE(getRank(C), C); + Ops.push_back(VE); + } + int Sz = Ops.size(); + if (Sz == 1) + return Ops.back().Op; + else if (Sz == 0) { + assert(ConstOpnd == 0); + return ConstantInt::get(Ty->getContext(), ConstOpnd); + } + } + + return 0; +} + /// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This /// optimizes based on identities. If it can be reduced to a single Value, it /// is returned, otherwise the Ops list is mutated as necessary. @@ -1431,11 +1761,15 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, default: break; case Instruction::And: case Instruction::Or: - case Instruction::Xor: if (Value *Result = OptimizeAndOrXor(Opcode, Ops)) return Result; break; + case Instruction::Xor: + if (Value *Result = OptimizeXor(I, Ops)) + return Result; + break; + case Instruction::Add: if (Value *Result = OptimizeAdd(I, Ops)) return Result; diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 0e57e5c..d073e78 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -57,11 +57,15 @@ using namespace llvm; STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); -STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); -STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); +STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); +STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions"); +STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses found"); +STATISTIC(MaxPartitionUsesPerAlloca, "Maximum number of partition uses"); +STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); +STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); -STATISTIC(NumDeleted, "Number of instructions deleted"); -STATISTIC(NumVectorized, "Number of vectorized aggregates"); +STATISTIC(NumDeleted, "Number of instructions deleted"); +STATISTIC(NumVectorized, "Number of vectorized aggregates"); /// Hidden option to force the pass to not use DomTree and mem2reg, instead /// forming SSA values through the SSAUpdater infrastructure. @@ -69,6 +73,43 @@ static cl::opt<bool> ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); namespace { +/// \brief A custom IRBuilder inserter which prefixes all names if they are +/// preserved. +template <bool preserveNames = true> +class IRBuilderPrefixedInserter : + public IRBuilderDefaultInserter<preserveNames> { + std::string Prefix; + +public: + void SetNamePrefix(const Twine &P) { Prefix = P.str(); } + +protected: + void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, + BasicBlock::iterator InsertPt) const { + IRBuilderDefaultInserter<preserveNames>::InsertHelper( + I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt); + } +}; + +// Specialization for not preserving the name is trivial. +template <> +class IRBuilderPrefixedInserter<false> : + public IRBuilderDefaultInserter<false> { +public: + void SetNamePrefix(const Twine &P) {} +}; + +/// \brief Provide a typedef for IRBuilder that drops names in release builds. +#ifndef NDEBUG +typedef llvm::IRBuilder<true, ConstantFolder, + IRBuilderPrefixedInserter<true> > IRBuilderTy; +#else +typedef llvm::IRBuilder<false, ConstantFolder, + IRBuilderPrefixedInserter<false> > IRBuilderTy; +#endif +} + +namespace { /// \brief A common base class for representing a half-open byte range. struct ByteRange { /// \brief The beginning offset of the range. @@ -888,7 +929,7 @@ private: uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - Offset.getLimitedValue(); - MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; + const MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd && Offsets.DestBegin == Offsets.SourceBegin) return markAsDead(II); // Skip identity transfers without side-effects. @@ -1097,6 +1138,10 @@ AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI) splitAndMergePartitions(); } + // Record how many partitions we end up with. + NumAllocaPartitions += Partitions.size(); + MaxPartitionsPerAlloca = std::max<unsigned>(Partitions.size(), MaxPartitionsPerAlloca); + // Now build up the user lists for each of these disjoint partitions by // re-walking the recursive users of the alloca. Uses.resize(Partitions.size()); @@ -1104,6 +1149,14 @@ AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI) PtrI = UB.visitPtr(AI); assert(!PtrI.isEscaped() && "Previously analyzed pointer now escapes!"); assert(!PtrI.isAborted() && "Early aborted the visit of the pointer."); + + unsigned NumUses = 0; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) + for (unsigned Idx = 0, Size = Uses.size(); Idx != Size; ++Idx) + NumUses += Uses[Idx].size(); +#endif + NumAllocaPartitionUses += NumUses; + MaxPartitionUsesPerAlloca = std::max<unsigned>(NumUses, MaxPartitionUsesPerAlloca); } Type *AllocaPartitioning::getCommonType(iterator I) const { @@ -1265,12 +1318,12 @@ public: // may be zapped by an optimization pass in future. if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(ZExt->getOperand(0)); - if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) + else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(SExt->getOperand(0)); if (!Arg) - Arg = SI->getOperand(0); + Arg = SI->getValueOperand(); } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { - Arg = LI->getOperand(0); + Arg = LI->getPointerOperand(); } else { continue; } @@ -1494,7 +1547,7 @@ private: assert(!Loads.empty()); Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); - IRBuilder<> PHIBuilder(&PN); + IRBuilderTy PHIBuilder(&PN); PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), PN.getName() + ".sroa.speculated"); @@ -1517,7 +1570,7 @@ private: TerminatorInst *TI = Pred->getTerminator(); Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx)); Value *InVal = PN.getIncomingValue(Idx); - IRBuilder<> PredBuilder(TI); + IRBuilderTy PredBuilder(TI); LoadInst *Load = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." + @@ -1598,7 +1651,7 @@ private: if (!isSafeSelectToSpeculate(SI, Loads)) return; - IRBuilder<> IRB(&SI); + IRBuilderTy IRB(&SI); Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) }; AllocaPartitioning::iterator PIs[2]; PartitionUse PUs[2]; @@ -1662,9 +1715,8 @@ private: /// /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. -static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, - SmallVectorImpl<Value *> &Indices, - const Twine &Prefix) { +static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, + SmallVectorImpl<Value *> &Indices) { if (Indices.empty()) return BasePtr; @@ -1673,7 +1725,7 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) return BasePtr; - return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx"); + return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward @@ -1685,12 +1737,11 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, /// TargetTy. If we can't find one with the same type, we at least try to use /// one with the same size. If none of that works, we just produce the GEP as /// indicated by Indices to have the correct offset. -static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &TD, Value *BasePtr, Type *Ty, Type *TargetTy, - SmallVectorImpl<Value *> &Indices, - const Twine &Prefix) { + SmallVectorImpl<Value *> &Indices) { if (Ty == TargetTy) - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices); // See if we can descend into a struct and locate a field with the correct // type. @@ -1717,20 +1768,19 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); - return buildGEP(IRB, BasePtr, Indices, Prefix); + return buildGEP(IRB, BasePtr, Indices); } /// \brief Recursively compute indices for a natural GEP. /// /// This is the recursive step for getNaturalGEPWithOffset that walks down the /// element types adding appropriate indices for the GEP. -static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &TD, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, - SmallVectorImpl<Value *> &Indices, - const Twine &Prefix) { + SmallVectorImpl<Value *> &Indices) { if (Offset == 0) - return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix); + return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices); // We can't recurse through pointer types. if (Ty->isPointerTy()) @@ -1750,7 +1800,7 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(), - Offset, TargetTy, Indices, Prefix); + Offset, TargetTy, Indices); } if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { @@ -1763,7 +1813,7 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + Indices); } StructType *STy = dyn_cast<StructType>(Ty); @@ -1782,7 +1832,7 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, Indices.push_back(IRB.getInt32(Index)); return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + Indices); } /// \brief Get a natural GEP from a base pointer to a particular offset and @@ -1795,10 +1845,9 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, /// Indices, and setting Ty to the result subtype. /// /// If no natural GEP can be constructed, this function returns null. -static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, +static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &TD, Value *Ptr, APInt Offset, Type *TargetTy, - SmallVectorImpl<Value *> &Indices, - const Twine &Prefix) { + SmallVectorImpl<Value *> &Indices) { PointerType *Ty = cast<PointerType>(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is @@ -1817,7 +1866,7 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, - Indices, Prefix); + Indices); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the @@ -1835,9 +1884,8 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, /// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. -static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, - Value *Ptr, APInt Offset, Type *PointerTy, - const Twine &Prefix) { +static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &TD, + Value *Ptr, APInt Offset, Type *PointerTy) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet<Value *, 4> Visited; @@ -1871,7 +1919,7 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, // See if we can perform a natural GEP here. Indices.clear(); if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy, - Indices, Prefix)) { + Indices)) { if (P->getType() == PointerTy) { // Zap any offset pointer that we ended up computing in previous rounds. if (OffsetPtr && OffsetPtr->use_empty()) @@ -1906,19 +1954,19 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, if (!OffsetPtr) { if (!Int8Ptr) { Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - Prefix + ".raw_cast"); + "raw_cast"); Int8PtrOffset = Offset; } OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), - Prefix + ".raw_idx"); + "raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) - Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast"); + Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast"); return Ptr; } @@ -1958,7 +2006,7 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { /// This will try various different casting techniques, such as bitcasts, /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test /// two types for viability with this routine. -static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V, +static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, Type *Ty) { assert(canConvertValue(DL, V->getType(), Ty) && "Value not convertable to type"); @@ -2156,7 +2204,7 @@ static bool isIntegerWideningViable(const DataLayout &TD, return WholeAllocaOp; } -static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V, +static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name) { DEBUG(dbgs() << " start: " << *V << "\n"); @@ -2179,7 +2227,7 @@ static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V, return V; } -static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old, +static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, Value *V, uint64_t Offset, const Twine &Name) { IntegerType *IntTy = cast<IntegerType>(Old->getType()); IntegerType *Ty = cast<IntegerType>(V->getType()); @@ -2210,7 +2258,7 @@ static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old, return V; } -static Value *extractVector(IRBuilder<> &IRB, Value *V, +static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, unsigned EndIndex, const Twine &Name) { VectorType *VecTy = cast<VectorType>(V->getType()); @@ -2238,7 +2286,7 @@ static Value *extractVector(IRBuilder<> &IRB, Value *V, return V; } -static Value *insertVector(IRBuilder<> &IRB, Value *Old, Value *V, +static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, unsigned BeginIndex, const Twine &Name) { VectorType *VecTy = cast<VectorType>(Old->getType()); assert(VecTy && "Can only insert a vector into a vector"); @@ -2274,17 +2322,15 @@ static Value *insertVector(IRBuilder<> &IRB, Value *Old, Value *V, V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(Mask), Name + ".expand"); - DEBUG(dbgs() << " shuffle1: " << *V << "\n"); + DEBUG(dbgs() << " shuffle: " << *V << "\n"); Mask.clear(); for (unsigned i = 0; i != VecTy->getNumElements(); ++i) - if (i >= BeginIndex && i < EndIndex) - Mask.push_back(IRB.getInt32(i)); - else - Mask.push_back(IRB.getInt32(i + VecTy->getNumElements())); - V = IRB.CreateShuffleVector(V, Old, ConstantVector::get(Mask), - Name + "insert"); - DEBUG(dbgs() << " shuffle2: " << *V << "\n"); + Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); + + V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); + + DEBUG(dbgs() << " blend: " << *V << "\n"); return V; } @@ -2332,8 +2378,9 @@ class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter, Use *OldUse; Instruction *OldPtr; - // The name prefix to use when rewriting instructions for this alloca. - std::string NamePrefix; + // Utility IR builder, whose name prefix is setup for each visited use, and + // the insertion point is set to point to the user. + IRBuilderTy IRB; public: AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P, @@ -2346,7 +2393,8 @@ public: NewAllocaEndOffset(NewEndOffset), NewAllocaTy(NewAI.getAllocatedType()), VecTy(), ElementTy(), ElementSize(), IntTy(), - BeginOffset(), EndOffset(), IsSplit(), OldUse(), OldPtr() { + BeginOffset(), EndOffset(), IsSplit(), OldUse(), OldPtr(), + IRB(NewAI.getContext(), ConstantFolder()) { } /// \brief Visit the users of the alloca partition and rewrite them. @@ -2375,7 +2423,13 @@ public: IsSplit = I->isSplit(); OldUse = I->getUse(); OldPtr = cast<Instruction>(OldUse->get()); - NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str(); + + Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); + IRB.SetInsertPoint(OldUserI); + IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); + IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + + "."); + CanSROA &= visit(cast<Instruction>(OldUse->getUser())); } if (VecTy) { @@ -2398,14 +2452,10 @@ private: llvm_unreachable("No rewrite rule for this instruction!"); } - Twine getName(const Twine &Suffix) { - return NamePrefix + Suffix; - } - - Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { + Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, Type *PointerTy) { assert(BeginOffset >= NewAllocaBeginOffset); APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); - return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); + return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy); } /// \brief Compute suitable alignment to access an offset into the new alloca. @@ -2455,27 +2505,27 @@ private: Pass.DeadInsts.insert(I); } - Value *rewriteVectorizedLoadInst(IRBuilder<> &IRB) { + Value *rewriteVectorizedLoadInst() { unsigned BeginIndex = getIndex(BeginOffset); unsigned EndIndex = getIndex(EndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - return extractVector(IRB, V, BeginIndex, EndIndex, getName(".vec")); + "load"); + return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } - Value *rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) { + Value *rewriteIntegerLoad(LoadInst &LI) { assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); + "load"); V = convertValue(TD, IRB, V, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; if (Offset > 0 || EndOffset < NewAllocaEndOffset) V = extractInteger(TD, IRB, V, cast<IntegerType>(LI.getType()), Offset, - getName(".extract")); + "extract"); return V; } @@ -2486,24 +2536,23 @@ private: uint64_t Size = EndOffset - BeginOffset; - IRBuilder<> IRB(&LI); Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8) : LI.getType(); bool IsPtrAdjusted = false; Value *V; if (VecTy) { - V = rewriteVectorizedLoadInst(IRB); + V = rewriteVectorizedLoadInst(); } else if (IntTy && LI.getType()->isIntegerTy()) { - V = rewriteIntegerLoad(IRB, LI); + V = rewriteIntegerLoad(LI); } else if (BeginOffset == NewAllocaBeginOffset && canConvertValue(TD, NewAllocaTy, LI.getType())) { V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - LI.isVolatile(), getName(".load")); + LI.isVolatile(), "load"); } else { Type *LTy = TargetTy->getPointerTo(); V = IRB.CreateAlignedLoad(getAdjustedAllocaPtr(IRB, LTy), getPartitionTypeAlign(TargetTy), - LI.isVolatile(), getName(".load")); + LI.isVolatile(), "load"); IsPtrAdjusted = true; } V = convertValue(TD, IRB, V, TargetTy); @@ -2526,7 +2575,7 @@ private: Value *Placeholder = new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); V = insertInteger(TD, IRB, Placeholder, V, BeginOffset, - getName(".insert")); + "insert"); LI.replaceAllUsesWith(V); Placeholder->replaceAllUsesWith(&LI); delete Placeholder; @@ -2540,7 +2589,7 @@ private: return !LI.isVolatile() && !IsPtrAdjusted; } - bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, Value *V, + bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { unsigned BeginIndex = getIndex(BeginOffset); unsigned EndIndex = getIndex(EndOffset); @@ -2555,8 +2604,8 @@ private: // Mix in the existing elements. Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - V = insertVector(IRB, Old, V, BeginIndex, getName(".vec")); + "load"); + V = insertVector(IRB, Old, V, BeginIndex, "vec"); StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); Pass.DeadInsts.insert(&SI); @@ -2566,17 +2615,17 @@ private: return true; } - bool rewriteIntegerStore(IRBuilder<> &IRB, Value *V, StoreInst &SI) { + bool rewriteIntegerStore(Value *V, StoreInst &SI) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); + "oldload"); Old = convertValue(TD, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset, - getName(".insert")); + "insert"); } V = convertValue(TD, IRB, V, NewAllocaTy); StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); @@ -2590,7 +2639,6 @@ private: DEBUG(dbgs() << " original: " << SI << "\n"); Value *OldOp = SI.getOperand(1); assert(OldOp == OldPtr); - IRBuilder<> IRB(&SI); Value *V = SI.getValueOperand(); @@ -2611,16 +2659,17 @@ private: "Non-byte-multiple bit width"); IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); V = extractInteger(TD, IRB, V, NarrowTy, BeginOffset, - getName(".extract")); + "extract"); } if (VecTy) - return rewriteVectorizedStoreInst(IRB, V, SI, OldOp); + return rewriteVectorizedStoreInst(V, SI, OldOp); if (IntTy && V->getType()->isIntegerTy()) - return rewriteIntegerStore(IRB, V, SI); + return rewriteIntegerStore(V, SI); StoreInst *NewSI; if (BeginOffset == NewAllocaBeginOffset && + EndOffset == NewAllocaEndOffset && canConvertValue(TD, V->getType(), NewAllocaTy)) { V = convertValue(TD, IRB, V, NewAllocaTy); NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), @@ -2648,7 +2697,7 @@ private: /// /// \param V The i8 value to splat. /// \param Size The number of bytes in the output (assuming i8 is one byte) - Value *getIntegerSplat(IRBuilder<> &IRB, Value *V, unsigned Size) { + Value *getIntegerSplat(Value *V, unsigned Size) { assert(Size > 0 && "Expected a positive number of bytes."); IntegerType *VTy = cast<IntegerType>(V->getType()); assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); @@ -2656,26 +2705,25 @@ private: return V; Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8); - V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, getName(".zext")), + V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"), ConstantExpr::getUDiv( Constant::getAllOnesValue(SplatIntTy), ConstantExpr::getZExt( Constant::getAllOnesValue(V->getType()), SplatIntTy)), - getName(".isplat")); + "isplat"); return V; } /// \brief Compute a vector splat for a given element value. - Value *getVectorSplat(IRBuilder<> &IRB, Value *V, unsigned NumElements) { - V = IRB.CreateVectorSplat(NumElements, V, NamePrefix); + Value *getVectorSplat(Value *V, unsigned NumElements) { + V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); DEBUG(dbgs() << " splat: " << *V << "\n"); return V; } bool visitMemSetInst(MemSetInst &II) { DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getRawDest() == OldPtr); // If the memset has a variable size, it cannot be split, just adjust the @@ -2732,31 +2780,31 @@ private: unsigned NumElements = EndIndex - BeginIndex; assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); - Value *Splat = getIntegerSplat(IRB, II.getValue(), - TD.getTypeSizeInBits(ElementTy)/8); + Value *Splat = + getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ElementTy) / 8); Splat = convertValue(TD, IRB, Splat, ElementTy); if (NumElements > 1) - Splat = getVectorSplat(IRB, Splat, NumElements); + Splat = getVectorSplat(Splat, NumElements); Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); - V = insertVector(IRB, Old, Splat, BeginIndex, getName(".vec")); + "oldload"); + V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); } else if (IntTy) { // If this is a memset on an alloca where we can widen stores, insert the // set integer. assert(!II.isVolatile()); uint64_t Size = EndOffset - BeginOffset; - V = getIntegerSplat(IRB, II.getValue(), Size); + V = getIntegerSplat(II.getValue(), Size); if (IntTy && (BeginOffset != NewAllocaBeginOffset || EndOffset != NewAllocaBeginOffset)) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); + "oldload"); Old = convertValue(TD, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - V = insertInteger(TD, IRB, Old, V, Offset, getName(".insert")); + V = insertInteger(TD, IRB, Old, V, Offset, "insert"); } else { assert(V->getType() == IntTy && "Wrong type for an alloca wide integer!"); @@ -2767,10 +2815,9 @@ private: assert(BeginOffset == NewAllocaBeginOffset); assert(EndOffset == NewAllocaEndOffset); - V = getIntegerSplat(IRB, II.getValue(), - TD.getTypeSizeInBits(ScalarTy)/8); + V = getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ScalarTy) / 8); if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) - V = getVectorSplat(IRB, V, AllocaVecTy->getNumElements()); + V = getVectorSplat(V, AllocaVecTy->getNumElements()); V = convertValue(TD, IRB, V, AllocaTy); } @@ -2787,7 +2834,6 @@ private: // them into two categories: split intrinsics and unsplit intrinsics. DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr); bool IsDest = II.getRawDest() == OldPtr; @@ -2871,8 +2917,7 @@ private: // Compute the other pointer, folding as much as possible to produce // a single, simple GEP in most cases. - OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy, - getName("." + OtherPtr->getName())); + OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy); Value *OurPtr = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType() @@ -2915,8 +2960,7 @@ private: OtherPtrTy = SubIntTy->getPointerTo(); } - Value *SrcPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy, - getName("." + OtherPtr->getName())); + Value *SrcPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy); Value *DstPtr = &NewAI; if (!IsDest) std::swap(SrcPtr, DstPtr); @@ -2924,31 +2968,31 @@ private: Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); - Src = extractVector(IRB, Src, BeginIndex, EndIndex, getName(".vec")); + "load"); + Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); } else if (IntTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".load")); + "load"); Src = convertValue(TD, IRB, Src, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, getName(".extract")); + Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, "extract"); } else { Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), - getName(".copyload")); + "copyload"); } if (VecTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); - Src = insertVector(IRB, Old, Src, BeginIndex, getName(".vec")); + "oldload"); + Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); } else if (IntTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - getName(".oldload")); + "oldload"); Old = convertValue(TD, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; - Src = insertInteger(TD, IRB, Old, Src, Offset, getName(".insert")); + Src = insertInteger(TD, IRB, Old, Src, Offset, "insert"); Src = convertValue(TD, IRB, Src, NewAllocaTy); } @@ -2963,7 +3007,6 @@ private: assert(II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end); DEBUG(dbgs() << " original: " << II << "\n"); - IRBuilder<> IRB(&II); assert(II.getArgOperand(1) == OldPtr); // Record this instruction for deletion. @@ -2991,7 +3034,9 @@ private: // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. - IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr)); + IRBuilderTy PtrBuilder(cast<Instruction>(OldPtr)); + PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + + "."); Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType()); // Replace the operands which were using the old pointer. @@ -3004,17 +3049,16 @@ private: bool visitSelectInst(SelectInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); - IRBuilder<> IRB(&SI); - - // Find the operand we need to rewrite here. - bool IsTrueVal = SI.getTrueValue() == OldPtr; - if (IsTrueVal) - assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!"); - else - assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!"); + assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && + "Pointer isn't an operand!"); Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType()); - SI.setOperand(IsTrueVal ? 1 : 2, NewPtr); + // Replace the operands which were using the old pointer. + if (SI.getOperand(1) == OldPtr) + SI.setOperand(1, NewPtr); + if (SI.getOperand(2) == OldPtr) + SI.setOperand(2, NewPtr); + DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); return false; @@ -3079,7 +3123,7 @@ private: class OpSplitter { protected: /// The builder used to form new instructions. - IRBuilder<> IRB; + IRBuilderTy IRB; /// The indices which to be used with insert- or extractvalue to select the /// appropriate value within the aggregate. SmallVector<unsigned, 4> Indices; @@ -3291,12 +3335,13 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, Type *ElementTy = SeqTy->getElementType(); uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; - if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) + if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) return 0; - if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) + } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) return 0; + } Offset -= NumSkippedElements * ElementSize; // First check if we need to recurse. diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index e590a37..bfde334 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1462,8 +1462,8 @@ bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) { } // performScalarRepl - This algorithm is a simple worklist driven algorithm, -// which runs on all of the alloca instructions in the function, removing them -// if they are only used by getelementptr instructions. +// which runs on all of the alloca instructions in the entry block, removing +// them if they are only used by getelementptr instructions. // bool SROA::performScalarRepl(Function &F) { std::vector<AllocaInst*> WorkList; @@ -1724,17 +1724,8 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI, continue; ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); - if (!IdxVal) { - // Non constant GEPs are only a problem on arrays, structs, and pointers - // Vectors can be dynamically indexed. - // FIXME: Add support for dynamic indexing on arrays. This should be - // ok on any subarrays of the alloca array, eg, a[0][i] is ok, but a[i][0] - // isn't. - if (!(*GEPIt)->isVectorTy()) - return MarkUnsafe(Info, GEPI); - NonConstant = true; - NonConstantIdxSize = TD->getTypeAllocSize(*GEPIt); - } + if (!IdxVal) + return MarkUnsafe(Info, GEPI); } // Compute the offset due to this GEP and check if the alloca has a diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index 916b37d..3514e6c 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -19,7 +19,6 @@ #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringMap.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Config/config.h" // FIXME: Shouldn't depend on host! @@ -35,7 +34,6 @@ #include "llvm/Transforms/Utils/BuildLibCalls.h" using namespace llvm; -STATISTIC(NumAnnotated, "Number of attributes added to library functions"); //===----------------------------------------------------------------------===// // Optimizer Base Class @@ -91,8 +89,6 @@ namespace { TargetLibraryInfo *TLI; StringMap<LibCallOptimization*> Optimizations; - - bool Modified; // This is only used by doInitialization. public: static char ID; // Pass identification SimplifyLibCalls() : FunctionPass(ID) { @@ -104,14 +100,6 @@ namespace { void InitOptimizations(); bool runOnFunction(Function &F); - void setDoesNotAccessMemory(Function &F); - void setOnlyReadsMemory(Function &F); - void setDoesNotThrow(Function &F); - void setDoesNotCapture(Function &F, unsigned n); - void setDoesNotAlias(Function &F, unsigned n); - bool doInitialization(Module &M); - - void inferPrototypeAttributes(Function &F); virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<TargetLibraryInfo>(); } @@ -208,697 +196,6 @@ bool SimplifyLibCalls::runOnFunction(Function &F) { return Changed; } -// Utility methods for doInitialization. - -void SimplifyLibCalls::setDoesNotAccessMemory(Function &F) { - if (!F.doesNotAccessMemory()) { - F.setDoesNotAccessMemory(); - ++NumAnnotated; - Modified = true; - } -} -void SimplifyLibCalls::setOnlyReadsMemory(Function &F) { - if (!F.onlyReadsMemory()) { - F.setOnlyReadsMemory(); - ++NumAnnotated; - Modified = true; - } -} -void SimplifyLibCalls::setDoesNotThrow(Function &F) { - if (!F.doesNotThrow()) { - F.setDoesNotThrow(); - ++NumAnnotated; - Modified = true; - } -} -void SimplifyLibCalls::setDoesNotCapture(Function &F, unsigned n) { - if (!F.doesNotCapture(n)) { - F.setDoesNotCapture(n); - ++NumAnnotated; - Modified = true; - } -} -void SimplifyLibCalls::setDoesNotAlias(Function &F, unsigned n) { - if (!F.doesNotAlias(n)) { - F.setDoesNotAlias(n); - ++NumAnnotated; - Modified = true; - } -} - - -void SimplifyLibCalls::inferPrototypeAttributes(Function &F) { - FunctionType *FTy = F.getFunctionType(); - - StringRef Name = F.getName(); - switch (Name[0]) { - case 's': - if (Name == "strlen") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "strchr" || - Name == "strrchr") { - if (FTy->getNumParams() != 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isIntegerTy()) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - } else if (Name == "strcpy" || - Name == "stpcpy" || - Name == "strcat" || - Name == "strtol" || - Name == "strtod" || - Name == "strtof" || - Name == "strtoul" || - Name == "strtoll" || - Name == "strtold" || - Name == "strncat" || - Name == "strncpy" || - Name == "stpncpy" || - Name == "strtoull") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "strxfrm") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "strcmp" || - Name == "strspn" || - Name == "strncmp" || - Name == "strcspn" || - Name == "strcoll" || - Name == "strcasecmp" || - Name == "strncasecmp") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "strstr" || - Name == "strpbrk") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "strtok" || - Name == "strtok_r") { - if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "scanf" || - Name == "setbuf" || - Name == "setvbuf") { - if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "strdup" || - Name == "strndup") { - if (FTy->getNumParams() < 1 || !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - } else if (Name == "stat" || - Name == "sscanf" || - Name == "sprintf" || - Name == "statvfs") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "snprintf") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(2)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 3); - } else if (Name == "setitimer") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(1)->isPointerTy() || - !FTy->getParamType(2)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - setDoesNotCapture(F, 3); - } else if (Name == "system") { - if (FTy->getNumParams() != 1 || - !FTy->getParamType(0)->isPointerTy()) - return; - // May throw; "system" is a valid pthread cancellation point. - setDoesNotCapture(F, 1); - } - break; - case 'm': - if (Name == "malloc") { - if (FTy->getNumParams() != 1 || - !FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - } else if (Name == "memcmp") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "memchr" || - Name == "memrchr") { - if (FTy->getNumParams() != 3) - return; - setOnlyReadsMemory(F); - setDoesNotThrow(F); - } else if (Name == "modf" || - Name == "modff" || - Name == "modfl" || - Name == "memcpy" || - Name == "memccpy" || - Name == "memmove") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "memalign") { - if (!FTy->getReturnType()->isPointerTy()) - return; - setDoesNotAlias(F, 0); - } else if (Name == "mkdir" || - Name == "mktime") { - if (FTy->getNumParams() == 0 || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'r': - if (Name == "realloc") { - if (FTy->getNumParams() != 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - } else if (Name == "read") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(1)->isPointerTy()) - return; - // May throw; "read" is a valid pthread cancellation point. - setDoesNotCapture(F, 2); - } else if (Name == "rmdir" || - Name == "rewind" || - Name == "remove" || - Name == "realpath") { - if (FTy->getNumParams() < 1 || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "rename" || - Name == "readlink") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } - break; - case 'w': - if (Name == "write") { - if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy()) - return; - // May throw; "write" is a valid pthread cancellation point. - setDoesNotCapture(F, 2); - } - break; - case 'b': - if (Name == "bcopy") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "bcmp") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setOnlyReadsMemory(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "bzero") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'c': - if (Name == "calloc") { - if (FTy->getNumParams() != 2 || - !FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - } else if (Name == "chmod" || - Name == "chown" || - Name == "ctermid" || - Name == "clearerr" || - Name == "closedir") { - if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'a': - if (Name == "atoi" || - Name == "atol" || - Name == "atof" || - Name == "atoll") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setOnlyReadsMemory(F); - setDoesNotCapture(F, 1); - } else if (Name == "access") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'f': - if (Name == "fopen") { - if (FTy->getNumParams() != 2 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "fdopen") { - if (FTy->getNumParams() != 2 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 2); - } else if (Name == "feof" || - Name == "free" || - Name == "fseek" || - Name == "ftell" || - Name == "fgetc" || - Name == "fseeko" || - Name == "ftello" || - Name == "fileno" || - Name == "fflush" || - Name == "fclose" || - Name == "fsetpos" || - Name == "flockfile" || - Name == "funlockfile" || - Name == "ftrylockfile") { - if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "ferror") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setOnlyReadsMemory(F); - } else if (Name == "fputc" || - Name == "fstat" || - Name == "frexp" || - Name == "frexpf" || - Name == "frexpl" || - Name == "fstatvfs") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "fgets") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(2)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 3); - } else if (Name == "fread" || - Name == "fwrite") { - if (FTy->getNumParams() != 4 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(3)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 4); - } else if (Name == "fputs" || - Name == "fscanf" || - Name == "fprintf" || - Name == "fgetpos") { - if (FTy->getNumParams() < 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } - break; - case 'g': - if (Name == "getc" || - Name == "getlogin_r" || - Name == "getc_unlocked") { - if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "getenv") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setOnlyReadsMemory(F); - setDoesNotCapture(F, 1); - } else if (Name == "gets" || - Name == "getchar") { - setDoesNotThrow(F); - } else if (Name == "getitimer") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "getpwnam") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'u': - if (Name == "ungetc") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "uname" || - Name == "unlink" || - Name == "unsetenv") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "utime" || - Name == "utimes") { - if (FTy->getNumParams() != 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } - break; - case 'p': - if (Name == "putc") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "puts" || - Name == "printf" || - Name == "perror") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "pread" || - Name == "pwrite") { - if (FTy->getNumParams() != 4 || !FTy->getParamType(1)->isPointerTy()) - return; - // May throw; these are valid pthread cancellation points. - setDoesNotCapture(F, 2); - } else if (Name == "putchar") { - setDoesNotThrow(F); - } else if (Name == "popen") { - if (FTy->getNumParams() != 2 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "pclose") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'v': - if (Name == "vscanf") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "vsscanf" || - Name == "vfscanf") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(1)->isPointerTy() || - !FTy->getParamType(2)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "valloc") { - if (!FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - } else if (Name == "vprintf") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "vfprintf" || - Name == "vsprintf") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "vsnprintf") { - if (FTy->getNumParams() != 4 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(2)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 3); - } - break; - case 'o': - if (Name == "open") { - if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) - return; - // May throw; "open" is a valid pthread cancellation point. - setDoesNotCapture(F, 1); - } else if (Name == "opendir") { - if (FTy->getNumParams() != 1 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - } - break; - case 't': - if (Name == "tmpfile") { - if (!FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - } else if (Name == "times") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'h': - if (Name == "htonl" || - Name == "htons") { - setDoesNotThrow(F); - setDoesNotAccessMemory(F); - } - break; - case 'n': - if (Name == "ntohl" || - Name == "ntohs") { - setDoesNotThrow(F); - setDoesNotAccessMemory(F); - } - break; - case 'l': - if (Name == "lstat") { - if (FTy->getNumParams() != 2 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "lchown") { - if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } - break; - case 'q': - if (Name == "qsort") { - if (FTy->getNumParams() != 4 || !FTy->getParamType(3)->isPointerTy()) - return; - // May throw; places call through function pointer. - setDoesNotCapture(F, 4); - } - break; - case '_': - if (Name == "__strdup" || - Name == "__strndup") { - if (FTy->getNumParams() < 1 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - } else if (Name == "__strtok_r") { - if (FTy->getNumParams() != 3 || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "_IO_getc") { - if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "_IO_putc") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } - break; - case 1: - if (Name == "\1__isoc99_scanf") { - if (FTy->getNumParams() < 1 || - !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "\1stat64" || - Name == "\1lstat64" || - Name == "\1statvfs64" || - Name == "\1__isoc99_sscanf") { - if (FTy->getNumParams() < 1 || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "\1fopen64") { - if (FTy->getNumParams() != 2 || - !FTy->getReturnType()->isPointerTy() || - !FTy->getParamType(0)->isPointerTy() || - !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - setDoesNotCapture(F, 1); - setDoesNotCapture(F, 2); - } else if (Name == "\1fseeko64" || - Name == "\1ftello64") { - if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 1); - } else if (Name == "\1tmpfile64") { - if (!FTy->getReturnType()->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotAlias(F, 0); - } else if (Name == "\1fstat64" || - Name == "\1fstatvfs64") { - if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) - return; - setDoesNotThrow(F); - setDoesNotCapture(F, 2); - } else if (Name == "\1open64") { - if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) - return; - // May throw; "open" is a valid pthread cancellation point. - setDoesNotCapture(F, 1); - } - break; - } -} - -/// doInitialization - Add attributes to well-known functions. -/// -bool SimplifyLibCalls::doInitialization(Module &M) { - Modified = false; - for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { - Function &F = *I; - if (F.isDeclaration() && F.hasName()) - inferPrototypeAttributes(F); - } - return Modified; -} - // TODO: // Additional cases that we need to add to this file: // diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp index 0d2598a..dabb67b 100644 --- a/lib/Transforms/Utils/InlineFunction.cpp +++ b/lib/Transforms/Utils/InlineFunction.cpp @@ -82,7 +82,8 @@ namespace { /// a simple branch. When there is more than one predecessor, we need to /// split the landing pad block after the landingpad instruction and jump /// to there. - void forwardResume(ResumeInst *RI); + void forwardResume(ResumeInst *RI, + SmallPtrSet<LandingPadInst*, 16> &InlinedLPads); /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind /// destination block for the given basic block, using the values for the @@ -140,8 +141,10 @@ BasicBlock *InvokeInliningInfo::getInnerResumeDest() { /// block. When the landing pad block has only one predecessor, this is a simple /// branch. When there is more than one predecessor, we need to split the /// landing pad block after the landingpad instruction and jump to there. -void InvokeInliningInfo::forwardResume(ResumeInst *RI) { +void InvokeInliningInfo::forwardResume(ResumeInst *RI, + SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) { BasicBlock *Dest = getInnerResumeDest(); + LandingPadInst *OuterLPad = getLandingPadInst(); BasicBlock *Src = RI->getParent(); BranchInst::Create(Dest, Src); @@ -152,6 +155,16 @@ void InvokeInliningInfo::forwardResume(ResumeInst *RI) { InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); RI->eraseFromParent(); + + // Append the clauses from the outer landing pad instruction into the inlined + // landing pad instructions. + for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(), + E = InlinedLPads.end(); I != E; ++I) { + LandingPadInst *InlinedLPad = *I; + for (unsigned OuterIdx = 0, OuterNum = OuterLPad->getNumClauses(); + OuterIdx != OuterNum; ++OuterIdx) + InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); + } } /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into @@ -229,19 +242,15 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, // The inlined code is currently at the end of the function, scan from the // start of the inlined code to its end, checking for stuff we need to - // rewrite. If the code doesn't have calls or unwinds, we know there is - // nothing to rewrite. - if (!InlinedCodeInfo.ContainsCalls) { - // Now that everything is happy, we have one final detail. The PHI nodes in - // the exception destination block still have entries due to the original - // invoke instruction. Eliminate these entries (which might even delete the - // PHI node) now. - InvokeDest->removePredecessor(II->getParent()); - return; - } - + // rewrite. InvokeInliningInfo Invoke(II); - + + // Get all of the inlined landing pad instructions. + SmallPtrSet<LandingPadInst*, 16> InlinedLPads; + for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I) + if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) + InlinedLPads.insert(II->getLandingPadInst()); + for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ if (InlinedCodeInfo.ContainsCalls) if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) { @@ -250,13 +259,14 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, continue; } + // Forward any resumes that are remaining here. if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) - Invoke.forwardResume(RI); + Invoke.forwardResume(RI, InlinedLPads); } // Now that everything is happy, we have one final detail. The PHI nodes in // the exception destination block still have entries due to the original - // invoke instruction. Eliminate these entries (which might even delete the + // invoke instruction. Eliminate these entries (which might even delete the // PHI node) now. InvokeDest->removePredecessor(II->getParent()); } @@ -748,8 +758,10 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, // If the call site was an invoke instruction, add a branch to the normal // destination. - if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) - BranchInst::Create(II->getNormalDest(), TheCall); + if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { + BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); + NewBr->setDebugLoc(Returns[0]->getDebugLoc()); + } // If the return instruction returned a value, replace uses of the call with // uses of the returned value. @@ -777,15 +789,16 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, // "starter" and "ender" blocks. How we accomplish this depends on whether // this is an invoke instruction or a call instruction. BasicBlock *AfterCallBB; + BranchInst *CreatedBranchToNormalDest = NULL; if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { // Add an unconditional branch to make this look like the CallInst case... - BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); + CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); // Split the basic block. This guarantees that no PHI nodes will have to be // updated due to new incoming edges, and make the invoke case more // symmetric to the call case. - AfterCallBB = OrigBB->splitBasicBlock(NewBr, + AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest, CalledFunc->getName()+".exit"); } else { // It's a call @@ -840,11 +853,20 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, // Add a branch to the merge points and remove return instructions. + DebugLoc Loc; for (unsigned i = 0, e = Returns.size(); i != e; ++i) { ReturnInst *RI = Returns[i]; - BranchInst::Create(AfterCallBB, RI); + BranchInst* BI = BranchInst::Create(AfterCallBB, RI); + Loc = RI->getDebugLoc(); + BI->setDebugLoc(Loc); RI->eraseFromParent(); } + // We need to set the debug location to *somewhere* inside the + // inlined function. The line number may be nonsensical, but the + // instruction will at least be associated with the right + // function. + if (CreatedBranchToNormalDest) + CreatedBranchToNormalDest->setDebugLoc(Loc); } else if (!Returns.empty()) { // Otherwise, if there is exactly one return value, just replace anything // using the return value of the call with the computed value. @@ -864,6 +886,9 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, AfterCallBB->getInstList().splice(AfterCallBB->begin(), ReturnBB->getInstList()); + if (CreatedBranchToNormalDest) + CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); + // Delete the return instruction now and empty ReturnBB now. Returns[0]->eraseFromParent(); ReturnBB->eraseFromParent(); diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index a54ee08..12e5b3e 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -832,7 +832,24 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, /// Dbg Intrinsic utilities /// -/// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value +/// See if there is a dbg.value intrinsic for DIVar before I. +static bool LdStHasDebugValue(DIVariable &DIVar, Instruction *I) { + // Since we can't guarantee that the original dbg.declare instrinsic + // is removed by LowerDbgDeclare(), we need to make sure that we are + // not inserting the same dbg.value intrinsic over and over. + llvm::BasicBlock::InstListType::iterator PrevI(I); + if (PrevI != I->getParent()->getInstList().begin()) { + --PrevI; + if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI)) + if (DVI->getValue() == I->getOperand(0) && + DVI->getOffset() == 0 && + DVI->getVariable() == DIVar) + return true; + } + return false; +} + +/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI, DIBuilder &Builder) { @@ -840,6 +857,9 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, if (!DIVar.Verify()) return false; + if (LdStHasDebugValue(DIVar, SI)) + return true; + Instruction *DbgVal = NULL; // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. @@ -863,7 +883,7 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, return true; } -/// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value +/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, LoadInst *LI, DIBuilder &Builder) { @@ -871,6 +891,9 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, if (!DIVar.Verify()) return false; + if (LdStHasDebugValue(DIVar, LI)) + return true; + Instruction *DbgVal = Builder.insertDbgValueIntrinsic(LI->getOperand(0), 0, DIVar, LI); @@ -902,6 +925,8 @@ bool llvm::LowerDbgDeclare(Function &F) { E = Dbgs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) { + // We only remove the dbg.declare intrinsic if all uses are + // converted to dbg.value intrinsics. bool RemoveDDI = true; for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ++UI) @@ -985,22 +1010,17 @@ bool llvm::removeUnreachableBlocks(Function &F) { if (Reachable.count(I)) continue; - // Remove the block as predecessor of all its reachable successors. - // Unreachable successors don't matter as they'll soon be removed, too. for (succ_iterator SI = succ_begin(I), SE = succ_end(I); SI != SE; ++SI) if (Reachable.count(*SI)) (*SI)->removePredecessor(I); + I->dropAllReferences(); + } - // Zap all instructions in this basic block. - while (!I->empty()) { - Instruction &Inst = I->back(); - if (!Inst.use_empty()) - Inst.replaceAllUsesWith(UndefValue::get(Inst.getType())); - I->getInstList().pop_back(); - } + for (Function::iterator I = llvm::next(F.begin()), E=F.end(); I != E;) + if (!Reachable.count(I)) + I = F.getBasicBlockList().erase(I); + else + ++I; - --I; - llvm::next(I)->eraseFromParent(); - } return true; } diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index 681bf9c..052ad85 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -59,6 +59,10 @@ static cl::opt<bool> SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), cl::desc("Sink common instructions down to the end block")); +static cl::opt<bool> +HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), + cl::desc("Hoist conditional stores if an unconditional store preceeds")); + STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); STATISTIC(NumSinkCommons, "Number of common instructions sunk down to the end block"); @@ -1332,6 +1336,66 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) { return Changed; } +/// \brief Determine if we can hoist sink a sole store instruction out of a +/// conditional block. +/// +/// We are looking for code like the following: +/// BrBB: +/// store i32 %add, i32* %arrayidx2 +/// ... // No other stores or function calls (we could be calling a memory +/// ... // function). +/// %cmp = icmp ult %x, %y +/// br i1 %cmp, label %EndBB, label %ThenBB +/// ThenBB: +/// store i32 %add5, i32* %arrayidx2 +/// br label EndBB +/// EndBB: +/// ... +/// We are going to transform this into: +/// BrBB: +/// store i32 %add, i32* %arrayidx2 +/// ... // +/// %cmp = icmp ult %x, %y +/// %add.add5 = select i1 %cmp, i32 %add, %add5 +/// store i32 %add.add5, i32* %arrayidx2 +/// ... +/// +/// \return The pointer to the value of the previous store if the store can be +/// hoisted into the predecessor block. 0 otherwise. +Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB, + BasicBlock *StoreBB, BasicBlock *EndBB) { + StoreInst *StoreToHoist = dyn_cast<StoreInst>(I); + if (!StoreToHoist) + return 0; + + // Volatile or atomic. + if (!StoreToHoist->isSimple()) + return 0; + + Value *StorePtr = StoreToHoist->getPointerOperand(); + + // Look for a store to the same pointer in BrBB. + unsigned MaxNumInstToLookAt = 10; + for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), + RE = BrBB->rend(); RI != RE && (--MaxNumInstToLookAt); ++RI) { + Instruction *CurI = &*RI; + + // Could be calling an instruction that effects memory like free(). + if (CurI->mayHaveSideEffects() && !isa<StoreInst>(CurI)) + return 0; + + StoreInst *SI = dyn_cast<StoreInst>(CurI); + // Found the previous store make sure it stores to the same location. + if (SI && SI->getPointerOperand() == StorePtr) + // Found the previous store, return its value operand. + return SI->getValueOperand(); + else if (SI) + return 0; // Unknown store. + } + + return 0; +} + /// \brief Speculate a conditional basic block flattening the CFG. /// /// Note that this is a very risky transform currently. Speculating @@ -1395,6 +1459,8 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) { SmallDenseMap<Instruction *, unsigned, 4> SinkCandidateUseCounts; unsigned SpeculationCost = 0; + Value *SpeculatedStoreValue = 0; + StoreInst *SpeculatedStore = 0; for (BasicBlock::iterator BBI = ThenBB->begin(), BBE = llvm::prior(ThenBB->end()); BBI != BBE; ++BBI) { @@ -1410,13 +1476,21 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) { return false; // Don't hoist the instruction if it's unsafe or expensive. - if (!isSafeToSpeculativelyExecute(I)) + if (!isSafeToSpeculativelyExecute(I) && + !(HoistCondStores && + (SpeculatedStoreValue = isSafeToSpeculateStore(I, BB, ThenBB, + EndBB)))) return false; - if (ComputeSpeculationCost(I) > PHINodeFoldingThreshold) + if (!SpeculatedStoreValue && + ComputeSpeculationCost(I) > PHINodeFoldingThreshold) return false; + // Store the store speculation candidate. + if (SpeculatedStoreValue) + SpeculatedStore = cast<StoreInst>(I); + // Do not hoist the instruction if any of its operands are defined but not - // used in this BB. The transformation will prevent the operand from + // used in BB. The transformation will prevent the operand from // being sunk into the use block. for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { @@ -1473,12 +1547,24 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) { // If there are no PHIs to process, bail early. This helps ensure idempotence // as well. - if (!HaveRewritablePHIs) + if (!HaveRewritablePHIs && !(HoistCondStores && SpeculatedStoreValue)) return false; // If we get here, we can hoist the instruction and if-convert. DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";); + // Insert a select of the value of the speculated store. + if (SpeculatedStoreValue) { + IRBuilder<true, NoFolder> Builder(BI); + Value *TrueV = SpeculatedStore->getValueOperand(); + Value *FalseV = SpeculatedStoreValue; + if (Invert) + std::swap(TrueV, FalseV); + Value *S = Builder.CreateSelect(BrCond, TrueV, FalseV, TrueV->getName() + + "." + FalseV->getName()); + SpeculatedStore->setOperand(0, S); + } + // Hoist the instructions. BB->getInstList().splice(BI, ThenBB->getInstList(), ThenBB->begin(), llvm::prior(ThenBB->end())); @@ -3073,7 +3159,12 @@ static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) { Value *Sub = SI->getCondition(); if (!Offset->isNullValue()) Sub = Builder.CreateAdd(Sub, Offset, Sub->getName()+".off"); - Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); + Value *Cmp; + // If NumCases overflowed, then all possible values jump to the successor. + if (NumCases->isNullValue() && SI->getNumCases() != 0) + Cmp = ConstantInt::getTrue(SI->getContext()); + else + Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); BranchInst *NewBI = Builder.CreateCondBr( Cmp, SI->case_begin().getCaseSuccessor(), SI->getDefaultDest()); diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp index c231704..6bea2dd 100644 --- a/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1518,6 +1518,12 @@ struct FPrintFOpt : public LibCallOptimization { if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr)) return 0; + // Do not do any of the following transformations if the fprintf return + // value is used, in general the fprintf return value is not compatible + // with fwrite(), fputc() or fputs(). + if (!CI->use_empty()) + return 0; + // fprintf(F, "foo") --> fwrite("foo", 3, 1, F) if (CI->getNumArgOperands() == 2) { for (unsigned i = 0, e = FormatStr.size(); i != e; ++i) @@ -1527,11 +1533,10 @@ struct FPrintFOpt : public LibCallOptimization { // These optimizations require DataLayout. if (!TD) return 0; - Value *NewCI = EmitFWrite(CI->getArgOperand(1), - ConstantInt::get(TD->getIntPtrType(*Context), - FormatStr.size()), - CI->getArgOperand(0), B, TD, TLI); - return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0; + return EmitFWrite(CI->getArgOperand(1), + ConstantInt::get(TD->getIntPtrType(*Context), + FormatStr.size()), + CI->getArgOperand(0), B, TD, TLI); } // The remaining optimizations require the format string to be "%s" or "%c" @@ -1544,14 +1549,12 @@ struct FPrintFOpt : public LibCallOptimization { if (FormatStr[1] == 'c') { // fprintf(F, "%c", chr) --> fputc(chr, F) if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0; - Value *NewCI = EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, - TD, TLI); - return NewCI ? ConstantInt::get(CI->getType(), 1) : 0; + return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI); } if (FormatStr[1] == 's') { // fprintf(F, "%s", str) --> fputs(str, F) - if (!CI->getArgOperand(2)->getType()->isPointerTy() || !CI->use_empty()) + if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0; return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI); } diff --git a/lib/Transforms/Utils/Utils.cpp b/lib/Transforms/Utils/Utils.cpp index 5812d46..c3df215 100644 --- a/lib/Transforms/Utils/Utils.cpp +++ b/lib/Transforms/Utils/Utils.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" #include "llvm-c/Initialization.h" using namespace llvm; diff --git a/lib/Transforms/Vectorize/CMakeLists.txt b/lib/Transforms/Vectorize/CMakeLists.txt index e64034a..7ae082f 100644 --- a/lib/Transforms/Vectorize/CMakeLists.txt +++ b/lib/Transforms/Vectorize/CMakeLists.txt @@ -2,6 +2,8 @@ add_llvm_library(LLVMVectorize BBVectorize.cpp Vectorize.cpp LoopVectorize.cpp + SLPVectorizer.cpp + VecUtils.cpp ) add_dependencies(LLVMVectorize intrinsics_gen) diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 930d9c4..9a832f7 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -78,6 +78,7 @@ #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/PatternMatch.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Scalar.h" @@ -87,6 +88,7 @@ #include <map> using namespace llvm; +using namespace llvm::PatternMatch; static cl::opt<unsigned> VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden, @@ -112,9 +114,9 @@ TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), /// We don't unroll loops with a known constant trip count below this number. static const unsigned TinyTripCountUnrollThreshold = 128; -/// When performing a runtime memory check, do not check more than this -/// number of pointers. Notice that the check is quadratic! -static const unsigned RuntimeMemoryCheckThreshold = 4; +/// When performing memory disambiguation checks at runtime do not make more +/// than this number of comparisons. +static const unsigned RuntimeMemoryCheckThreshold = 8; /// We use a metadata with this name to indicate that a scalar loop was /// vectorized and that we don't need to re-vectorize it if we run into it @@ -343,6 +345,7 @@ public: RK_IntegerOr, ///< Bitwise or logical OR of numbers. RK_IntegerAnd, ///< Bitwise or logical AND of numbers. RK_IntegerXor, ///< Bitwise or logical XOR of numbers. + RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()). RK_FloatAdd, ///< Sum of floats. RK_FloatMult ///< Product of floats. }; @@ -356,13 +359,23 @@ public: IK_ReversePtrInduction ///< Reverse ptr indvar. Step = - sizeof(elem). }; + // This enum represents the kind of minmax reduction. + enum MinMaxReductionKind { + MRK_Invalid, + MRK_UIntMin, + MRK_UIntMax, + MRK_SIntMin, + MRK_SIntMax + }; + /// This POD struct holds information about reduction variables. struct ReductionDescriptor { ReductionDescriptor() : StartValue(0), LoopExitInstr(0), - Kind(RK_NoReduction) {} + Kind(RK_NoReduction), MinMaxKind(MRK_Invalid) {} - ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K) - : StartValue(Start), LoopExitInstr(Exit), Kind(K) {} + ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K, + MinMaxReductionKind MK) + : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK) {} // The starting value of the reduction. // It does not have to be zero! @@ -371,6 +384,25 @@ public: Instruction *LoopExitInstr; // The kind of the reduction. ReductionKind Kind; + // If this a min/max reduction the kind of reduction. + MinMaxReductionKind MinMaxKind; + }; + + /// This POD struct holds information about a potential reduction operation. + struct ReductionInstDesc { + ReductionInstDesc(bool IsRedux, Instruction *I) : + IsReduction(IsRedux), PatternLastInst(I), MinMaxKind(MRK_Invalid) {} + + ReductionInstDesc(Instruction *I, MinMaxReductionKind K) : + IsReduction(true), PatternLastInst(I), MinMaxKind(K) {} + + // Is this instruction a reduction candidate. + bool IsReduction; + // The last instruction in a min/max pattern (select of the select(icmp()) + // pattern), or the current reduction instruction otherwise. + Instruction *PatternLastInst; + // If this is a min/max pattern the comparison predicate. + MinMaxReductionKind MinMaxKind; }; // This POD struct holds information about the memory runtime legality @@ -387,7 +419,7 @@ public: } /// Insert a pointer and calculate the start and end SCEVs. - void insert(ScalarEvolution *SE, Loop *Lp, Value *Ptr); + void insert(ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr); /// This flag indicates if we need to add the runtime check. bool Need; @@ -397,6 +429,8 @@ public: SmallVector<const SCEV*, 2> Starts; /// Holds the pointer value at the end of the loop. SmallVector<const SCEV*, 2> Ends; + /// Holds the information if this pointer is used for writing to memory. + SmallVector<bool, 2> IsWritePtr; }; /// A POD for saving information about induction variables. @@ -461,6 +495,11 @@ public: /// Returns the information that we collected about runtime memory check. RuntimePointerCheck *getRuntimePointerCheck() { return &PtrRtCheck; } + + /// This function returns the identity element (or neutral element) for + /// the operation K. + static Constant *getReductionIdentity(ReductionKind K, Type *Tp, + MinMaxReductionKind MinMaxK); private: /// Check if a single basic block loop is vectorizable. /// At this point we know that this is a loop with a constant trip count @@ -487,9 +526,17 @@ private: /// Returns True, if 'Phi' is the kind of reduction variable for type /// 'Kind'. If this is a reduction variable, it adds it to ReductionList. bool AddReductionVar(PHINode *Phi, ReductionKind Kind); - /// Returns true if the instruction I can be a reduction variable of type - /// 'Kind'. - bool isReductionInstr(Instruction *I, ReductionKind Kind); + /// Returns a struct describing if the instruction 'I' can be a reduction + /// variable of type 'Kind'. If the reduction is a min/max pattern of + /// select(icmp()) this function advances the instruction pointer 'I' from the + /// compare instruction to the select instruction and stores this pointer in + /// 'PatternLastInst' member of the returned struct. + ReductionInstDesc isReductionInstr(Instruction *I, ReductionKind Kind, + ReductionInstDesc &Desc); + /// Returns true if the instruction is a Select(ICmp(X, Y), X, Y) instruction + /// pattern corresponding to a min(X, Y) or max(X, Y). + static ReductionInstDesc isMinMaxSelectCmpPattern(Instruction *I, + ReductionInstDesc &Prev); /// Returns the induction kind of Phi. This function may return NoInduction /// if the PHI is not an induction variable. InductionKind isInductionVariable(PHINode *Phi); @@ -662,6 +709,11 @@ struct LoopVectorize : public LoopPass { AA = getAnalysisIfAvailable<AliasAnalysis>(); TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); + if (DL == NULL) { + DEBUG(dbgs() << "LV: Not vectorizing because of missing data layout"); + return false; + } + DEBUG(dbgs() << "LV: Checking a loop in \"" << L->getHeader()->getParent()->getName() << "\"\n"); @@ -737,7 +789,8 @@ struct LoopVectorize : public LoopPass { void LoopVectorizationLegality::RuntimePointerCheck::insert(ScalarEvolution *SE, - Loop *Lp, Value *Ptr) { + Loop *Lp, Value *Ptr, + bool WritePtr) { const SCEV *Sc = SE->getSCEV(Ptr); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); assert(AR && "Invalid addrec expression"); @@ -746,6 +799,7 @@ LoopVectorizationLegality::RuntimePointerCheck::insert(ScalarEvolution *SE, Pointers.push_back(Ptr); Starts.push_back(AR->getStart()); Ends.push_back(ScEnd); + IsWritePtr.push_back(WritePtr); } Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { @@ -906,12 +960,18 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); + unsigned ScalarAllocatedSize = DL->getTypeAllocSize(ScalarDataTy); + unsigned VectorElementSize = DL->getTypeStoreSize(DataTy)/VF; + + if (ScalarAllocatedSize != VectorElementSize) + return scalarizeInstruction(Instr); + // If the pointer is loop invariant or if it is non consecutive, // scalarize the load. - int Stride = Legal->isConsecutivePtr(Ptr); - bool Reverse = Stride < 0; + int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); + bool Reverse = ConsecutiveStride < 0; bool UniformLoad = LI && Legal->isUniform(Ptr); - if (Stride == 0 || UniformLoad) + if (!ConsecutiveStride || UniformLoad) return scalarizeInstruction(Instr); Constant *Zero = Builder.getInt32(0); @@ -1040,10 +1100,10 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr) { // Create a new entry in the WidenMap and initialize it to Undef or Null. VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); - // For each scalar that we create: - for (unsigned Width = 0; Width < VF; ++Width) { - // For each vector unroll 'part': - for (unsigned Part = 0; Part < UF; ++Part) { + // For each vector unroll 'part': + for (unsigned Part = 0; Part < UF; ++Part) { + // For each scalar that we create: + for (unsigned Width = 0; Width < VF; ++Width) { Instruction *Cloned = Instr->clone(); if (!IsVoidRetTy) Cloned->setName(Instr->getName() + ".cloned"); @@ -1110,6 +1170,10 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal, for (unsigned i = 0; i < NumPointers; ++i) { for (unsigned j = i+1; j < NumPointers; ++j) { + // No need to check if two readonly pointers intersect. + if (!PtrRtCheck->IsWritePtr[i] && !PtrRtCheck->IsWritePtr[j]) + continue; + Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy, "bc"); Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy, "bc"); Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy, "bc"); @@ -1436,26 +1500,45 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) { /// This function returns the identity element (or neutral element) for /// the operation K. -static Constant* -getReductionIdentity(LoopVectorizationLegality::ReductionKind K, Type *Tp) { +Constant* +LoopVectorizationLegality::getReductionIdentity(ReductionKind K, Type *Tp, + MinMaxReductionKind MinMaxK) { switch (K) { - case LoopVectorizationLegality:: RK_IntegerXor: - case LoopVectorizationLegality:: RK_IntegerAdd: - case LoopVectorizationLegality:: RK_IntegerOr: + case RK_IntegerXor: + case RK_IntegerAdd: + case RK_IntegerOr: // Adding, Xoring, Oring zero to a number does not change it. return ConstantInt::get(Tp, 0); - case LoopVectorizationLegality:: RK_IntegerMult: + case RK_IntegerMult: // Multiplying a number by 1 does not change it. return ConstantInt::get(Tp, 1); - case LoopVectorizationLegality:: RK_IntegerAnd: + case RK_IntegerAnd: // AND-ing a number with an all-1 value does not change it. return ConstantInt::get(Tp, -1, true); - case LoopVectorizationLegality:: RK_FloatMult: + case RK_FloatMult: // Multiplying a number by 1 does not change it. return ConstantFP::get(Tp, 1.0L); - case LoopVectorizationLegality:: RK_FloatAdd: + case RK_FloatAdd: // Adding zero to a number does not change it. return ConstantFP::get(Tp, 0.0L); + case RK_IntegerMinMax: + switch(MinMaxK) { + default: llvm_unreachable("Unknown min/max predicate"); + case MRK_UIntMin: + return ConstantInt::getAllOnesValue(Tp); + case MRK_UIntMax: + return ConstantInt::get(Tp, 0); + case MRK_SIntMin: { + unsigned BitWidth = Tp->getPrimitiveSizeInBits(); + return ConstantInt::get(Tp->getContext(), + APInt::getSignedMaxValue(BitWidth)); + } + case LoopVectorizationLegality::MRK_SIntMax: { + unsigned BitWidth = Tp->getPrimitiveSizeInBits(); + return ConstantInt::get(Tp->getContext(), + APInt::getSignedMinValue(BitWidth)); + } + } default: llvm_unreachable("Unknown reduction kind"); } @@ -1566,7 +1649,7 @@ getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI) { } /// This function translates the reduction kind to an LLVM binary operator. -static Instruction::BinaryOps +static unsigned getReductionBinOp(LoopVectorizationLegality::ReductionKind Kind) { switch (Kind) { case LoopVectorizationLegality::RK_IntegerAdd: @@ -1583,11 +1666,38 @@ getReductionBinOp(LoopVectorizationLegality::ReductionKind Kind) { return Instruction::FMul; case LoopVectorizationLegality::RK_FloatAdd: return Instruction::FAdd; + case LoopVectorizationLegality::RK_IntegerMinMax: + return Instruction::ICmp; default: llvm_unreachable("Unknown reduction operation"); } } +Value *createMinMaxOp(IRBuilder<> &Builder, + LoopVectorizationLegality::MinMaxReductionKind RK, + Value *Left, + Value *Right) { + CmpInst::Predicate P = CmpInst::ICMP_NE; + switch (RK) { + default: + llvm_unreachable("Unknown min/max reduction kind"); + case LoopVectorizationLegality::MRK_UIntMin: + P = CmpInst::ICMP_ULT; + break; + case LoopVectorizationLegality::MRK_UIntMax: + P = CmpInst::ICMP_UGT; + break; + case LoopVectorizationLegality::MRK_SIntMin: + P = CmpInst::ICMP_SLT; + break; + case LoopVectorizationLegality::MRK_SIntMax: + P = CmpInst::ICMP_SGT; + } + Value *Cmp = Builder.CreateICmp(P, Left, Right, "rdx.minmax.cmp"); + Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select"); + return Select; +} + void InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) { //===------------------------------------------------===// @@ -1651,7 +1761,10 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) { // Find the reduction identity variable. Zero for addition, or, xor, // one for multiplication, -1 for And. - Constant *Iden = getReductionIdentity(RdxDesc.Kind, VecTy->getScalarType()); + Constant *Iden = + LoopVectorizationLegality::getReductionIdentity(RdxDesc.Kind, + VecTy->getScalarType(), + RdxDesc.MinMaxKind); Constant *Identity = ConstantVector::getSplat(VF, Iden); // This vector is the Identity vector where the first element is the @@ -1699,10 +1812,15 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) { // Reduce all of the unrolled parts into a single vector. Value *ReducedPartRdx = RdxParts[0]; + unsigned Op = getReductionBinOp(RdxDesc.Kind); for (unsigned part = 1; part < UF; ++part) { - Instruction::BinaryOps Op = getReductionBinOp(RdxDesc.Kind); - ReducedPartRdx = Builder.CreateBinOp(Op, RdxParts[part], ReducedPartRdx, - "bin.rdx"); + if (Op != Instruction::ICmp) + ReducedPartRdx = Builder.CreateBinOp((Instruction::BinaryOps)Op, + RdxParts[part], ReducedPartRdx, + "bin.rdx"); + else + ReducedPartRdx = createMinMaxOp(Builder, RdxDesc.MinMaxKind, + ReducedPartRdx, RdxParts[part]); } // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles @@ -1727,8 +1845,11 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) { ConstantVector::get(ShuffleMask), "rdx.shuf"); - Instruction::BinaryOps Op = getReductionBinOp(RdxDesc.Kind); - TmpVec = Builder.CreateBinOp(Op, TmpVec, Shuf, "bin.rdx"); + if (Op != Instruction::ICmp) + TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf, + "bin.rdx"); + else + TmpVec = createMinMaxOp(Builder, RdxDesc.MinMaxKind, TmpVec, Shuf); } // The result is in the first element of the vector. @@ -2315,6 +2436,10 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { DEBUG(dbgs() << "LV: Found a XOR reduction PHI."<< *Phi <<"\n"); continue; } + if (AddReductionVar(Phi, RK_IntegerMinMax)) { + DEBUG(dbgs() << "LV: Found a MINMAX reduction PHI."<< *Phi <<"\n"); + continue; + } if (AddReductionVar(Phi, RK_FloatMult)) { DEBUG(dbgs() << "LV: Found an FMult reduction PHI."<< *Phi <<"\n"); continue; @@ -2442,13 +2567,6 @@ LoopVectorizationLegality::hasPossibleGlobalWriteReorder( bool LoopVectorizationLegality::canVectorizeMemory() { - if (TheLoop->isAnnotatedParallel()) { - DEBUG(dbgs() - << "LV: A loop annotated parallel, ignore memory dependency " - << "checks.\n"); - return true; - } - typedef SmallVector<Value*, 16> ValueVector; typedef SmallPtrSet<Value*, 16> ValueSet; // Holds the Load and Store *instructions*. @@ -2457,6 +2575,8 @@ bool LoopVectorizationLegality::canVectorizeMemory() { PtrRtCheck.Pointers.clear(); PtrRtCheck.Need = false; + const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); + // For each block. for (Loop::block_iterator bb = TheLoop->block_begin(), be = TheLoop->block_end(); bb != be; ++bb) { @@ -2471,7 +2591,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() { if (it->mayReadFromMemory()) { LoadInst *Ld = dyn_cast<LoadInst>(it); if (!Ld) return false; - if (!Ld->isSimple()) { + if (!Ld->isSimple() && !IsAnnotatedParallel) { DEBUG(dbgs() << "LV: Found a non-simple load.\n"); return false; } @@ -2483,7 +2603,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() { if (it->mayWriteToMemory()) { StoreInst *St = dyn_cast<StoreInst>(it); if (!St) return false; - if (!St->isSimple()) { + if (!St->isSimple() && !IsAnnotatedParallel) { DEBUG(dbgs() << "LV: Found a non-simple store.\n"); return false; } @@ -2530,6 +2650,13 @@ bool LoopVectorizationLegality::canVectorizeMemory() { ReadWrites.insert(std::make_pair(Ptr, ST)); } + if (IsAnnotatedParallel) { + DEBUG(dbgs() + << "LV: A loop annotated parallel, ignore memory dependency " + << "checks.\n"); + return true; + } + for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { LoadInst *LD = cast<LoadInst>(*I); Value* Ptr = LD->getPointerOperand(); @@ -2552,6 +2679,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() { return true; } + unsigned NumReadPtrs = 0; + unsigned NumWritePtrs = 0; + // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRT = true; @@ -2559,7 +2689,8 @@ bool LoopVectorizationLegality::canVectorizeMemory() { for (MI = ReadWrites.begin(), ME = ReadWrites.end(); MI != ME; ++MI) { Value *V = (*MI).first; if (hasComputableBounds(V)) { - PtrRtCheck.insert(SE, TheLoop, V); + PtrRtCheck.insert(SE, TheLoop, V, true); + NumWritePtrs++; DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n"); } else { CanDoRT = false; @@ -2569,7 +2700,8 @@ bool LoopVectorizationLegality::canVectorizeMemory() { for (MI = Reads.begin(), ME = Reads.end(); MI != ME; ++MI) { Value *V = (*MI).first; if (hasComputableBounds(V)) { - PtrRtCheck.insert(SE, TheLoop, V); + PtrRtCheck.insert(SE, TheLoop, V, false); + NumReadPtrs++; DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n"); } else { CanDoRT = false; @@ -2579,7 +2711,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() { // Check that we did not collect too many pointers or found a // unsizeable pointer. - if (!CanDoRT || PtrRtCheck.Pointers.size() > RuntimeMemoryCheckThreshold) { + unsigned NumComparisons = (NumWritePtrs * (NumReadPtrs + NumWritePtrs - 1)); + DEBUG(dbgs() << "LV: We need to compare " << NumComparisons << " ptrs.\n"); + if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) { PtrRtCheck.reset(); CanDoRT = false; } @@ -2733,7 +2867,18 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, // used as reduction variables (such as ADD). We may have a single // out-of-block user. The cycle must end with the original PHI. Instruction *Iter = Phi; - while (true) { + + // To recognize min/max patterns formed by a icmp select sequence, we store + // the number of instruction we saw from the recognized min/max pattern, + // such that we don't stop when we see the phi has two uses (one by the select + // and one by the icmp) and to make sure we only see exactly the two + // instructions. + unsigned NumICmpSelectPatternInst = 0; + ReductionInstDesc ReduxDesc(false, 0); + + // Avoid cycles in the chain. + SmallPtrSet<Instruction *, 8> VisitedInsts; + while (VisitedInsts.insert(Iter)) { // If the instruction has no users then this is a broken // chain and can't be a reduction variable. if (Iter->use_empty()) @@ -2747,9 +2892,6 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, // Is this a bin op ? FoundBinOp |= !isa<PHINode>(Iter); - // Remember the current instruction. - Instruction *OldIter = Iter; - // For each of the *users* of iter. for (Value::use_iterator it = Iter->use_begin(), e = Iter->use_end(); it != e; ++it) { @@ -2778,25 +2920,33 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, Iter->hasNUsesOrMore(2)) continue; - // We can't have multiple inside users. - if (FoundInBlockUser) + // We can't have multiple inside users except for a combination of + // icmp/select both using the phi. + if (FoundInBlockUser && !NumICmpSelectPatternInst) return false; FoundInBlockUser = true; // Any reduction instr must be of one of the allowed kinds. - if (!isReductionInstr(U, Kind)) + ReduxDesc = isReductionInstr(U, Kind, ReduxDesc); + if (!ReduxDesc.IsReduction) return false; + if (Kind == RK_IntegerMinMax && (isa<ICmpInst>(U) || + isa<SelectInst>(U))) + ++NumICmpSelectPatternInst; + // Reductions of instructions such as Div, and Sub is only // possible if the LHS is the reduction variable. - if (!U->isCommutative() && !isa<PHINode>(U) && U->getOperand(0) != Iter) + if (!U->isCommutative() && !isa<PHINode>(U) && !isa<SelectInst>(U) && + !isa<ICmpInst>(U) && U->getOperand(0) != Iter) return false; - Iter = U; + Iter = ReduxDesc.PatternLastInst; } - // If all uses were skipped this can't be a reduction variable. - if (Iter == OldIter) + // This means we have seen one but not the other instruction of the + // pattern or more than just a select and cmp. + if (Kind == RK_IntegerMinMax && NumICmpSelectPatternInst != 2) return false; // We found a reduction var if we have reached the original @@ -2807,47 +2957,94 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, AllowedExit.insert(ExitInstruction); // Save the description of this reduction variable. - ReductionDescriptor RD(RdxStart, ExitInstruction, Kind); + ReductionDescriptor RD(RdxStart, ExitInstruction, Kind, + ReduxDesc.MinMaxKind); Reductions[Phi] = RD; // We've ended the cycle. This is a reduction variable if we have an // outside user and it has a binary op. return FoundBinOp && ExitInstruction; } } + + return false; } -bool +/// Returns true if the instruction is a Select(ICmp(X, Y), X, Y) instruction +/// pattern corresponding to a min(X, Y) or max(X, Y). +LoopVectorizationLegality::ReductionInstDesc +LoopVectorizationLegality::isMinMaxSelectCmpPattern(Instruction *I, ReductionInstDesc &Prev) { + + assert((isa<ICmpInst>(I) || isa<SelectInst>(I)) && + "Expect a select instruction"); + ICmpInst *Cmp = 0; + SelectInst *Select = 0; + + // We must handle the select(cmp()) as a single instruction. Advance to the + // select. + if ((Cmp = dyn_cast<ICmpInst>(I))) { + if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->use_begin()))) + return ReductionInstDesc(false, I); + return ReductionInstDesc(Select, Prev.MinMaxKind); + } + + // Only handle single use cases for now. + if (!(Select = dyn_cast<SelectInst>(I))) + return ReductionInstDesc(false, I); + if (!(Cmp = dyn_cast<ICmpInst>(I->getOperand(0)))) + return ReductionInstDesc(false, I); + if (!Cmp->hasOneUse()) + return ReductionInstDesc(false, I); + + Value *CmpLeft = Cmp->getOperand(0); + Value *CmpRight = Cmp->getOperand(1); + + // Look for a min/max pattern. + if (m_UMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select)) + return ReductionInstDesc(Select, MRK_UIntMin); + else if (m_UMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select)) + return ReductionInstDesc(Select, MRK_UIntMax); + else if (m_SMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select)) + return ReductionInstDesc(Select, MRK_SIntMax); + else if (m_SMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select)) + return ReductionInstDesc(Select, MRK_SIntMin); + + return ReductionInstDesc(false, I); +} + +LoopVectorizationLegality::ReductionInstDesc LoopVectorizationLegality::isReductionInstr(Instruction *I, - ReductionKind Kind) { + ReductionKind Kind, + ReductionInstDesc &Prev) { bool FP = I->getType()->isFloatingPointTy(); bool FastMath = (FP && I->isCommutative() && I->isAssociative()); - switch (I->getOpcode()) { default: - return false; + return ReductionInstDesc(false, I); case Instruction::PHI: if (FP && (Kind != RK_FloatMult && Kind != RK_FloatAdd)) - return false; - // possibly. - return true; + return ReductionInstDesc(false, I); + return ReductionInstDesc(I, Prev.MinMaxKind); case Instruction::Sub: case Instruction::Add: - return Kind == RK_IntegerAdd; - case Instruction::SDiv: - case Instruction::UDiv: + return ReductionInstDesc(Kind == RK_IntegerAdd, I); case Instruction::Mul: - return Kind == RK_IntegerMult; + return ReductionInstDesc(Kind == RK_IntegerMult, I); case Instruction::And: - return Kind == RK_IntegerAnd; + return ReductionInstDesc(Kind == RK_IntegerAnd, I); case Instruction::Or: - return Kind == RK_IntegerOr; + return ReductionInstDesc(Kind == RK_IntegerOr, I); case Instruction::Xor: - return Kind == RK_IntegerXor; + return ReductionInstDesc(Kind == RK_IntegerXor, I); case Instruction::FMul: - return Kind == RK_FloatMult && FastMath; + return ReductionInstDesc(Kind == RK_FloatMult && FastMath, I); case Instruction::FAdd: - return Kind == RK_FloatAdd && FastMath; - } + return ReductionInstDesc(Kind == RK_FloatAdd && FastMath, I); + case Instruction::ICmp: + case Instruction::Select: + if (Kind != RK_IntegerMinMax) + return ReductionInstDesc(false, I); + return isMinMaxSelectCmpPattern(I, Prev); + } } LoopVectorizationLegality::InductionKind @@ -3331,8 +3528,19 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { case Instruction::AShr: case Instruction::And: case Instruction::Or: - case Instruction::Xor: - return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy); + case Instruction::Xor: { + // Certain instructions can be cheaper to vectorize if they have a constant + // second vector operand. One example of this are shifts on x86. + TargetTransformInfo::OperandValueKind Op1VK = + TargetTransformInfo::OK_AnyValue; + TargetTransformInfo::OperandValueKind Op2VK = + TargetTransformInfo::OK_AnyValue; + + if (isa<ConstantInt>(I->getOperand(1))) + Op2VK = TargetTransformInfo::OK_UniformConstantValue; + + return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK); + } case Instruction::Select: { SelectInst *SI = cast<SelectInst>(I); const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); @@ -3369,9 +3577,11 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); // Scalarized loads/stores. - int Stride = Legal->isConsecutivePtr(Ptr); - bool Reverse = Stride < 0; - if (0 == Stride) { + int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); + bool Reverse = ConsecutiveStride < 0; + unsigned ScalarAllocatedSize = DL->getTypeAllocSize(ValTy); + unsigned VectorElementSize = DL->getTypeStoreSize(VectorTy)/VF; + if (!ConsecutiveStride || ScalarAllocatedSize != VectorElementSize) { unsigned Cost = 0; // The cost of extracting from the value vector and pointer vector. Type *PtrTy = ToVectorTy(Ptr->getType(), VF); diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp new file mode 100644 index 0000000..cc30cc9 --- /dev/null +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -0,0 +1,348 @@ +//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This pass implements the Bottom Up SLP vectorizer. It detects consecutive +// stores that can be put together into vector-stores. Next, it attempts to +// construct vectorizable tree using the use-def chains. If a profitable tree +// was found, the SLP vectorizer performs vectorization on the tree. +// +// The pass is inspired by the work described in the paper: +// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. +// +//===----------------------------------------------------------------------===// +#define SV_NAME "slp-vectorizer" +#define DEBUG_TYPE SV_NAME + +#include "VecUtils.h" +#include "llvm/Transforms/Vectorize.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Value.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include <map> + +using namespace llvm; + +static cl::opt<int> +SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, + cl::desc("Only vectorize trees if the gain is above this " + "number. (gain = -cost of vectorization)")); +namespace { + +/// The SLPVectorizer Pass. +struct SLPVectorizer : public FunctionPass { + typedef std::map<Value*, BoUpSLP::StoreList> StoreListMap; + + /// Pass identification, replacement for typeid + static char ID; + + explicit SLPVectorizer() : FunctionPass(ID) { + initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); + } + + ScalarEvolution *SE; + DataLayout *DL; + TargetTransformInfo *TTI; + AliasAnalysis *AA; + LoopInfo *LI; + + virtual bool runOnFunction(Function &F) { + SE = &getAnalysis<ScalarEvolution>(); + DL = getAnalysisIfAvailable<DataLayout>(); + TTI = &getAnalysis<TargetTransformInfo>(); + AA = &getAnalysis<AliasAnalysis>(); + LI = &getAnalysis<LoopInfo>(); + + StoreRefs.clear(); + bool Changed = false; + + // Must have DataLayout. We can't require it because some tests run w/o + // triple. + if (!DL) + return false; + + for (Function::iterator it = F.begin(), e = F.end(); it != e; ++it) { + BasicBlock *BB = it; + bool BBChanged = false; + + // Use the bollom up slp vectorizer to construct chains that start with + // he store instructions. + BoUpSLP R(BB, SE, DL, TTI, AA, LI->getLoopFor(BB)); + + // Vectorize trees that end at reductions. + BBChanged |= vectorizeReductions(BB, R); + + // Vectorize trees that end at stores. + if (unsigned count = collectStores(BB, R)) { + (void)count; + DEBUG(dbgs()<<"SLP: Found " << count << " stores to vectorize.\n"); + BBChanged |= vectorizeStoreChains(R); + } + + // Try to hoist some of the scalarization code to the preheader. + if (BBChanged) hoistGatherSequence(LI, BB, R); + + Changed |= BBChanged; + } + + if (Changed) { + DEBUG(dbgs()<<"SLP: vectorized \""<<F.getName()<<"\"\n"); + DEBUG(verifyFunction(F)); + } + return Changed; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + FunctionPass::getAnalysisUsage(AU); + AU.addRequired<ScalarEvolution>(); + AU.addRequired<AliasAnalysis>(); + AU.addRequired<TargetTransformInfo>(); + AU.addRequired<LoopInfo>(); + } + +private: + + /// \brief Collect memory references and sort them according to their base + /// object. We sort the stores to their base objects to reduce the cost of the + /// quadratic search on the stores. TODO: We can further reduce this cost + /// if we flush the chain creation every time we run into a memory barrier. + unsigned collectStores(BasicBlock *BB, BoUpSLP &R); + + /// \brief Try to vectorize a chain that starts at two arithmetic instrs. + bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); + + /// \brief Try to vectorize a list of operands. + bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); + + /// \brief Try to vectorize a chain that may start at the operands of \V; + bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); + + /// \brief Vectorize the stores that were collected in StoreRefs. + bool vectorizeStoreChains(BoUpSLP &R); + + /// \brief Try to hoist gather sequences outside of the loop in cases where + /// all of the sources are loop invariant. + void hoistGatherSequence(LoopInfo *LI, BasicBlock *BB, BoUpSLP &R); + + /// \brief Scan the basic block and look for reductions that may start a + /// vectorization chain. + bool vectorizeReductions(BasicBlock *BB, BoUpSLP &R); + +private: + StoreListMap StoreRefs; +}; + +unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { + unsigned count = 0; + StoreRefs.clear(); + for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { + StoreInst *SI = dyn_cast<StoreInst>(it); + if (!SI) + continue; + + // Check that the pointer points to scalars. + Type *Ty = SI->getValueOperand()->getType(); + if (Ty->isAggregateType() || Ty->isVectorTy()) + return 0; + + // Find the base of the GEP. + Value *Ptr = SI->getPointerOperand(); + if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) + Ptr = GEP->getPointerOperand(); + + // Save the store locations. + StoreRefs[Ptr].push_back(SI); + count++; + } + return count; +} + +bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { + if (!A || !B) return false; + Value *VL[] = { A, B }; + return tryToVectorizeList(VL, R); +} + +bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { + DEBUG(dbgs()<<"SLP: Vectorizing a list of length = " << VL.size() << ".\n"); + + // Check that all of the parts are scalar. + for (int i = 0, e = VL.size(); i < e; ++i) { + Type *Ty = VL[i]->getType(); + if (Ty->isAggregateType() || Ty->isVectorTy()) + return 0; + } + + int Cost = R.getTreeCost(VL); + int ExtrCost = R.getScalarizationCost(VL); + DEBUG(dbgs()<<"SLP: Cost of pair:" << Cost << + " Cost of extract:" << ExtrCost << ".\n"); + if ((Cost+ExtrCost) >= -SLPCostThreshold) return false; + DEBUG(dbgs()<<"SLP: Vectorizing pair.\n"); + R.vectorizeArith(VL); + return true; +} + +bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { + if (!V) return false; + // Try to vectorize V. + if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) + return true; + + BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); + BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); + // Try to skip B. + if (B && B->hasOneUse()) { + BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); + BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); + if (tryToVectorizePair(A, B0, R)) { + B->moveBefore(V); + return true; + } + if (tryToVectorizePair(A, B1, R)) { + B->moveBefore(V); + return true; + } + } + + // Try to skip A. + if (A && A->hasOneUse()) { + BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); + BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); + if (tryToVectorizePair(A0, B, R)) { + A->moveBefore(V); + return true; + } + if (tryToVectorizePair(A1, B, R)) { + A->moveBefore(V); + return true; + } + } + return 0; +} + +bool SLPVectorizer::vectorizeReductions(BasicBlock *BB, BoUpSLP &R) { + bool Changed = false; + for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { + if (isa<DbgInfoIntrinsic>(it)) continue; + + // Try to vectorize reductions that use PHINodes. + if (PHINode *P = dyn_cast<PHINode>(it)) { + // Check that the PHI is a reduction PHI. + if (P->getNumIncomingValues() != 2) return Changed; + Value *Rdx = (P->getIncomingBlock(0) == BB ? P->getIncomingValue(0) : + (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : + 0)); + // Check if this is a Binary Operator. + BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); + if (!BI) + continue; + + Value *Inst = BI->getOperand(0); + if (Inst == P) Inst = BI->getOperand(1); + Changed |= tryToVectorize(dyn_cast<BinaryOperator>(Inst), R); + continue; + } + + // Try to vectorize trees that start at compare instructions. + if (CmpInst *CI = dyn_cast<CmpInst>(it)) { + if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { + Changed |= true; + continue; + } + for (int i = 0; i < 2; ++i) + if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) + Changed |= tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R); + continue; + } + } + + return Changed; +} + +bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { + bool Changed = false; + // Attempt to sort and vectorize each of the store-groups. + for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); + it != e; ++it) { + if (it->second.size() < 2) + continue; + + DEBUG(dbgs()<<"SLP: Analyzing a store chain of length " << + it->second.size() << ".\n"); + + Changed |= R.vectorizeStores(it->second, -SLPCostThreshold); + } + return Changed; +} + +void SLPVectorizer::hoistGatherSequence(LoopInfo *LI, BasicBlock *BB, + BoUpSLP &R) { + // Check if this block is inside a loop. + Loop *L = LI->getLoopFor(BB); + if (!L) + return; + + // Check if it has a preheader. + BasicBlock *PreHeader = L->getLoopPreheader(); + if (!PreHeader) + return; + + // Mark the insertion point for the block. + Instruction *Location = PreHeader->getTerminator(); + + BoUpSLP::ValueList &Gathers = R.getGatherSeqInstructions(); + for (BoUpSLP::ValueList::iterator it = Gathers.begin(), e = Gathers.end(); + it != e; ++it) { + InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); + + // The InsertElement sequence can be simplified into a constant. + if (!Insert) + continue; + + // If the vector or the element that we insert into it are + // instructions that are defined in this basic block then we can't + // hoist this instruction. + Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); + Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); + if (CurrVec && L->contains(CurrVec)) continue; + if (NewElem && L->contains(NewElem)) continue; + + // We can hoist this instruction. Move it to the pre-header. + Insert->moveBefore(Location); + } +} + +} // end anonymous namespace + +char SLPVectorizer::ID = 0; +static const char lv_name[] = "SLP Vectorizer"; +INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(LoopSimplify) +INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) + +namespace llvm { + Pass *createSLPVectorizerPass() { + return new SLPVectorizer(); + } +} + diff --git a/lib/Transforms/Vectorize/VecUtils.cpp b/lib/Transforms/Vectorize/VecUtils.cpp new file mode 100644 index 0000000..9b94366 --- /dev/null +++ b/lib/Transforms/Vectorize/VecUtils.cpp @@ -0,0 +1,730 @@ +//===- VecUtils.cpp --- Vectorization Utilities ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "SLP" + +#include "VecUtils.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Value.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetLibraryInfo.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/Local.h" +#include <algorithm> +#include <map> + +using namespace llvm; + +static const unsigned MinVecRegSize = 128; + +static const unsigned RecursionMaxDepth = 6; + +namespace llvm { + +BoUpSLP::BoUpSLP(BasicBlock *Bb, ScalarEvolution *S, DataLayout *Dl, + TargetTransformInfo *Tti, AliasAnalysis *Aa, Loop *Lp) : + BB(Bb), SE(S), DL(Dl), TTI(Tti), AA(Aa), L(Lp) { + numberInstructions(); +} + +void BoUpSLP::numberInstructions() { + int Loc = 0; + InstrIdx.clear(); + InstrVec.clear(); + // Number the instructions in the block. + for (BasicBlock::iterator it=BB->begin(), e=BB->end(); it != e; ++it) { + InstrIdx[it] = Loc++; + InstrVec.push_back(it); + assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); + } +} + +Value *BoUpSLP::getPointerOperand(Value *I) { + if (LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->getPointerOperand(); + if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->getPointerOperand(); + return 0; +} + +unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { + if (LoadInst *L=dyn_cast<LoadInst>(I)) return L->getPointerAddressSpace(); + if (StoreInst *S=dyn_cast<StoreInst>(I)) return S->getPointerAddressSpace(); + return -1; +} + +bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { + Value *PtrA = getPointerOperand(A); + Value *PtrB = getPointerOperand(B); + unsigned ASA = getAddressSpaceOperand(A); + unsigned ASB = getAddressSpaceOperand(B); + + // Check that the address spaces match and that the pointers are valid. + if (!PtrA || !PtrB || (ASA != ASB)) return false; + + // Check that A and B are of the same type. + if (PtrA->getType() != PtrB->getType()) return false; + + // Calculate the distance. + const SCEV *PtrSCEVA = SE->getSCEV(PtrA); + const SCEV *PtrSCEVB = SE->getSCEV(PtrB); + const SCEV *OffsetSCEV = SE->getMinusSCEV(PtrSCEVA, PtrSCEVB); + const SCEVConstant *ConstOffSCEV = dyn_cast<SCEVConstant>(OffsetSCEV); + + // Non constant distance. + if (!ConstOffSCEV) return false; + + int64_t Offset = ConstOffSCEV->getValue()->getSExtValue(); + Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); + // The Instructions are connsecutive if the size of the first load/store is + // the same as the offset. + int64_t Sz = DL->getTypeStoreSize(Ty); + return ((-Offset) == Sz); +} + +bool BoUpSLP::vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold) { + Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); + unsigned Sz = DL->getTypeSizeInBits(StoreTy); + unsigned VF = MinVecRegSize / Sz; + + if (!isPowerOf2_32(Sz) || VF < 2) return false; + + bool Changed = false; + // Look for profitable vectorizable trees at all offsets, starting at zero. + for (unsigned i = 0, e = Chain.size(); i < e; ++i) { + if (i + VF > e) return Changed; + DEBUG(dbgs()<<"SLP: Analyzing " << VF << " stores at offset "<< i << "\n"); + ArrayRef<Value *> Operands = Chain.slice(i, VF); + + int Cost = getTreeCost(Operands); + DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); + if (Cost < CostThreshold) { + DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); + vectorizeTree(Operands, VF); + i += VF - 1; + Changed = true; + } + } + + return Changed; +} + +bool BoUpSLP::vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold) { + ValueSet Heads, Tails; + SmallDenseMap<Value*, Value*> ConsecutiveChain; + + // We may run into multiple chains that merge into a single chain. We mark the + // stores that we vectorized so that we don't visit the same store twice. + ValueSet VectorizedStores; + bool Changed = false; + + // Do a quadratic search on all of the given stores and find + // all of the pairs of loads that follow each other. + for (unsigned i = 0, e = Stores.size(); i < e; ++i) + for (unsigned j = 0; j < e; ++j) { + if (i == j) continue; + if (isConsecutiveAccess(Stores[i], Stores[j])) { + Tails.insert(Stores[j]); + Heads.insert(Stores[i]); + ConsecutiveChain[Stores[i]] = Stores[j]; + } + } + + // For stores that start but don't end a link in the chain: + for (ValueSet::iterator it = Heads.begin(), e = Heads.end();it != e; ++it) { + if (Tails.count(*it)) continue; + + // We found a store instr that starts a chain. Now follow the chain and try + // to vectorize it. + ValueList Operands; + Value *I = *it; + // Collect the chain into a list. + while (Tails.count(I) || Heads.count(I)) { + if (VectorizedStores.count(I)) break; + Operands.push_back(I); + // Move to the next value in the chain. + I = ConsecutiveChain[I]; + } + + bool Vectorized = vectorizeStoreChain(Operands, costThreshold); + + // Mark the vectorized stores so that we don't vectorize them again. + if (Vectorized) + VectorizedStores.insert(Operands.begin(), Operands.end()); + Changed |= Vectorized; + } + + return Changed; +} + +int BoUpSLP::getScalarizationCost(ArrayRef<Value *> VL) { + // Find the type of the operands in VL. + Type *ScalarTy = VL[0]->getType(); + if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) + ScalarTy = SI->getValueOperand()->getType(); + VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); + // Find the cost of inserting/extracting values from the vector. + return getScalarizationCost(VecTy); +} + +int BoUpSLP::getScalarizationCost(Type *Ty) { + int Cost = 0; + for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) + Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); + return Cost; +} + +AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { + if (StoreInst *SI = dyn_cast<StoreInst>(I)) return AA->getLocation(SI); + if (LoadInst *LI = dyn_cast<LoadInst>(I)) return AA->getLocation(LI); + return AliasAnalysis::Location(); +} + +Value *BoUpSLP::isUnsafeToSink(Instruction *Src, Instruction *Dst) { + assert(Src->getParent() == Dst->getParent() && "Not the same BB"); + BasicBlock::iterator I = Src, E = Dst; + /// Scan all of the instruction from SRC to DST and check if + /// the source may alias. + for (++I; I != E; ++I) { + // Ignore store instructions that are marked as 'ignore'. + if (MemBarrierIgnoreList.count(I)) continue; + if (Src->mayWriteToMemory()) /* Write */ { + if (!I->mayReadOrWriteMemory()) continue; + } else /* Read */ { + if (!I->mayWriteToMemory()) continue; + } + AliasAnalysis::Location A = getLocation(&*I); + AliasAnalysis::Location B = getLocation(Src); + + if (!A.Ptr || !B.Ptr || AA->alias(A, B)) + return I; + } + return 0; +} + +void BoUpSLP::vectorizeArith(ArrayRef<Value *> Operands) { + Value *Vec = vectorizeTree(Operands, Operands.size()); + BasicBlock::iterator Loc = cast<Instruction>(Vec); + IRBuilder<> Builder(++Loc); + // After vectorizing the operands we need to generate extractelement + // instructions and replace all of the uses of the scalar values with + // the values that we extracted from the vectorized tree. + for (unsigned i = 0, e = Operands.size(); i != e; ++i) { + Value *S = Builder.CreateExtractElement(Vec, Builder.getInt32(i)); + Operands[i]->replaceAllUsesWith(S); + } +} + +int BoUpSLP::getTreeCost(ArrayRef<Value *> VL) { + // Get rid of the list of stores that were removed, and from the + // lists of instructions with multiple users. + MemBarrierIgnoreList.clear(); + LaneMap.clear(); + MultiUserVals.clear(); + MustScalarize.clear(); + + // Scan the tree and find which value is used by which lane, and which values + // must be scalarized. + getTreeUses_rec(VL, 0); + + // Check that instructions with multiple users can be vectorized. Mark unsafe + // instructions. + for (ValueSet::iterator it = MultiUserVals.begin(), + e = MultiUserVals.end(); it != e; ++it) { + // Check that all of the users of this instr are within the tree + // and that they are all from the same lane. + int Lane = -1; + for (Value::use_iterator I = (*it)->use_begin(), E = (*it)->use_end(); + I != E; ++I) { + if (LaneMap.find(*I) == LaneMap.end()) { + MustScalarize.insert(*it); + DEBUG(dbgs()<<"SLP: Adding " << **it << + " to MustScalarize because of an out of tree usage.\n"); + break; + } + if (Lane == -1) Lane = LaneMap[*I]; + if (Lane != LaneMap[*I]) { + MustScalarize.insert(*it); + DEBUG(dbgs()<<"Adding " << **it << + " to MustScalarize because multiple lane use it: " + << Lane << " and " << LaneMap[*I] << ".\n"); + break; + } + } + } + + // Now calculate the cost of vectorizing the tree. + return getTreeCost_rec(VL, 0); +} + +void BoUpSLP::getTreeUses_rec(ArrayRef<Value *> VL, unsigned Depth) { + if (Depth == RecursionMaxDepth) return; + + // Don't handle vectors. + if (VL[0]->getType()->isVectorTy()) return; + if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) + if (SI->getValueOperand()->getType()->isVectorTy()) return; + + // Check if all of the operands are constants. + bool AllConst = true; + bool AllSameScalar = true; + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + AllConst &= isa<Constant>(VL[i]); + AllSameScalar &= (VL[0] == VL[i]); + Instruction *I = dyn_cast<Instruction>(VL[i]); + // If one of the instructions is out of this BB, we need to scalarize all. + if (I && I->getParent() != BB) return; + } + + // If all of the operands are identical or constant we have a simple solution. + if (AllConst || AllSameScalar) return; + + // Scalarize unknown structures. + Instruction *VL0 = dyn_cast<Instruction>(VL[0]); + if (!VL0) return; + + unsigned Opcode = VL0->getOpcode(); + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + Instruction *I = dyn_cast<Instruction>(VL[i]); + // If not all of the instructions are identical then we have to scalarize. + if (!I || Opcode != I->getOpcode()) return; + } + + // Mark instructions with multiple users. + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + Instruction *I = dyn_cast<Instruction>(VL[i]); + // Remember to check if all of the users of this instr are vectorized + // within our tree. + if (I && I->getNumUses() > 1) MultiUserVals.insert(I); + } + + for (int i = 0, e = VL.size(); i < e; ++i) { + // Check that the instruction is only used within + // one lane. + if (LaneMap.count(VL[i]) && LaneMap[VL[i]] != i) return; + // Make this instruction as 'seen' and remember the lane. + LaneMap[VL[i]] = i; + } + + switch (Opcode) { + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { + ValueList Operands; + // Prepare the operand vector. + for (unsigned j = 0; j < VL.size(); ++j) + Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + + getTreeUses_rec(Operands, Depth+1); + } + return; + } + case Instruction::Store: { + ValueList Operands; + for (unsigned j = 0; j < VL.size(); ++j) + Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); + getTreeUses_rec(Operands, Depth+1); + return; + } + default: + return; + } +} + +int BoUpSLP::getTreeCost_rec(ArrayRef<Value *> VL, unsigned Depth) { + Type *ScalarTy = VL[0]->getType(); + + if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) + ScalarTy = SI->getValueOperand()->getType(); + + /// Don't mess with vectors. + if (ScalarTy->isVectorTy()) return max_cost; + VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); + + if (Depth == RecursionMaxDepth) return getScalarizationCost(VecTy); + + // Check if all of the operands are constants. + bool AllConst = true; + bool AllSameScalar = true; + bool MustScalarizeFlag = false; + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + AllConst &= isa<Constant>(VL[i]); + AllSameScalar &= (VL[0] == VL[i]); + // Must have a single use. + Instruction *I = dyn_cast<Instruction>(VL[i]); + MustScalarizeFlag |= MustScalarize.count(VL[i]); + // This instruction is outside the basic block. + if (I && I->getParent() != BB) + return getScalarizationCost(VecTy); + } + + // Is this a simple vector constant. + if (AllConst) return 0; + + // If all of the operands are identical we can broadcast them. + Instruction *VL0 = dyn_cast<Instruction>(VL[0]); + if (AllSameScalar) { + // If we are in a loop, and this is not an instruction (e.g. constant or + // argument) or the instruction is defined outside the loop then assume + // that the cost is zero. + if (L && (!VL0 || !L->contains(VL0))) + return 0; + + // We need to broadcast the scalar. + return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); + } + + // If this is not a constant, or a scalar from outside the loop then we + // need to scalarize it. + if (MustScalarizeFlag) + return getScalarizationCost(VecTy); + + if (!VL0) return getScalarizationCost(VecTy); + assert(VL0->getParent() == BB && "Wrong BB"); + + unsigned Opcode = VL0->getOpcode(); + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + Instruction *I = dyn_cast<Instruction>(VL[i]); + // If not all of the instructions are identical then we have to scalarize. + if (!I || Opcode != I->getOpcode()) return getScalarizationCost(VecTy); + } + + // Check if it is safe to sink the loads or the stores. + if (Opcode == Instruction::Load || Opcode == Instruction::Store) { + int MaxIdx = InstrIdx[VL0]; + for (unsigned i = 1, e = VL.size(); i < e; ++i ) + MaxIdx = std::max(MaxIdx, InstrIdx[VL[i]]); + + Instruction *Last = InstrVec[MaxIdx]; + for (unsigned i = 0, e = VL.size(); i < e; ++i ) { + if (VL[i] == Last) continue; + Value *Barrier = isUnsafeToSink(cast<Instruction>(VL[i]), Last); + if (Barrier) { + DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << + *Last << "\n because of " << *Barrier << "\n"); + return max_cost; + } + } + } + + switch (Opcode) { + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + int Cost = 0; + ValueList Operands; + Type *SrcTy = VL0->getOperand(0)->getType(); + // Prepare the operand vector. + for (unsigned j = 0; j < VL.size(); ++j) { + Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); + // Check that the casted type is the same for all users. + if (cast<Instruction>(VL[j])->getOperand(0)->getType() != SrcTy) + return getScalarizationCost(VecTy); + } + + Cost += getTreeCost_rec(Operands, Depth+1); + if (Cost >= max_cost) return max_cost; + + // Calculate the cost of this instruction. + int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), + VL0->getType(), SrcTy); + + VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); + int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); + Cost += (VecCost - ScalarCost); + return Cost; + } + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + int Cost = 0; + // Calculate the cost of all of the operands. + for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { + ValueList Operands; + // Prepare the operand vector. + for (unsigned j = 0; j < VL.size(); ++j) + Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); + + Cost += getTreeCost_rec(Operands, Depth+1); + if (Cost >= max_cost) return max_cost; + } + + // Calculate the cost of this instruction. + int ScalarCost = VecTy->getNumElements() * + TTI->getArithmeticInstrCost(Opcode, ScalarTy); + + int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy); + Cost += (VecCost - ScalarCost); + return Cost; + } + case Instruction::Load: { + // If we are scalarize the loads, add the cost of forming the vector. + for (unsigned i = 0, e = VL.size()-1; i < e; ++i) + if (!isConsecutiveAccess(VL[i], VL[i+1])) + return getScalarizationCost(VecTy); + + // Cost of wide load - cost of scalar loads. + int ScalarLdCost = VecTy->getNumElements() * + TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); + int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); + return VecLdCost - ScalarLdCost; + } + case Instruction::Store: { + // We know that we can merge the stores. Calculate the cost. + int ScalarStCost = VecTy->getNumElements() * + TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); + int VecStCost = TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1,0); + int StoreCost = VecStCost - ScalarStCost; + + ValueList Operands; + for (unsigned j = 0; j < VL.size(); ++j) { + Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); + MemBarrierIgnoreList.insert(VL[j]); + } + + int TotalCost = StoreCost + getTreeCost_rec(Operands, Depth + 1); + return TotalCost; + } + default: + // Unable to vectorize unknown instructions. + return getScalarizationCost(VecTy); + } +} + +Instruction *BoUpSLP::GetLastInstr(ArrayRef<Value *> VL, unsigned VF) { + int MaxIdx = InstrIdx[BB->getFirstNonPHI()]; + for (unsigned i = 0; i < VF; ++i ) + MaxIdx = std::max(MaxIdx, InstrIdx[VL[i]]); + return InstrVec[MaxIdx + 1]; +} + +Value *BoUpSLP::Scalarize(ArrayRef<Value *> VL, VectorType *Ty) { + IRBuilder<> Builder(GetLastInstr(VL, Ty->getNumElements())); + Value *Vec = UndefValue::get(Ty); + for (unsigned i=0; i < Ty->getNumElements(); ++i) { + // Generate the 'InsertElement' instruction. + Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); + // Remember that this instruction is used as part of a 'gather' sequence. + // The caller of the bottom-up slp vectorizer can try to hoist the sequence + // if the users are outside of the basic block. + GatherInstructions.push_back(Vec); + } + + return Vec; +} + +Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, int VF) { + Value *V = vectorizeTree_rec(VL, VF); + // We moved some instructions around. We have to number them again + // before we can do any analysis. + numberInstructions(); + MustScalarize.clear(); + return V; +} + +Value *BoUpSLP::vectorizeTree_rec(ArrayRef<Value *> VL, int VF) { + Type *ScalarTy = VL[0]->getType(); + if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) + ScalarTy = SI->getValueOperand()->getType(); + VectorType *VecTy = VectorType::get(ScalarTy, VF); + + // Check if all of the operands are constants or identical. + bool AllConst = true; + bool AllSameScalar = true; + for (unsigned i = 0, e = VF; i < e; ++i) { + AllConst &= isa<Constant>(VL[i]); + AllSameScalar &= (VL[0] == VL[i]); + // The instruction must be in the same BB, and it must be vectorizable. + Instruction *I = dyn_cast<Instruction>(VL[i]); + if (MustScalarize.count(VL[i]) || (I && I->getParent() != BB)) + return Scalarize(VL, VecTy); + } + + // Check that this is a simple vector constant. + if (AllConst || AllSameScalar) return Scalarize(VL, VecTy); + + // Scalarize unknown structures. + Instruction *VL0 = dyn_cast<Instruction>(VL[0]); + if (!VL0) return Scalarize(VL, VecTy); + + if (VectorizedValues.count(VL0)) return VectorizedValues[VL0]; + + unsigned Opcode = VL0->getOpcode(); + for (unsigned i = 0, e = VF; i < e; ++i) { + Instruction *I = dyn_cast<Instruction>(VL[i]); + // If not all of the instructions are identical then we have to scalarize. + if (!I || Opcode != I->getOpcode()) return Scalarize(VL, VecTy); + } + + switch (Opcode) { + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + ValueList INVL; + for (int i = 0; i < VF; ++i) + INVL.push_back(cast<Instruction>(VL[i])->getOperand(0)); + Value *InVec = vectorizeTree_rec(INVL, VF); + IRBuilder<> Builder(GetLastInstr(VL, VF)); + CastInst *CI = dyn_cast<CastInst>(VL0); + Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); + VectorizedValues[VL0] = V; + return V; + } + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + ValueList LHSVL, RHSVL; + for (int i = 0; i < VF; ++i) { + RHSVL.push_back(cast<Instruction>(VL[i])->getOperand(0)); + LHSVL.push_back(cast<Instruction>(VL[i])->getOperand(1)); + } + + Value *RHS = vectorizeTree_rec(RHSVL, VF); + Value *LHS = vectorizeTree_rec(LHSVL, VF); + IRBuilder<> Builder(GetLastInstr(VL, VF)); + BinaryOperator *BinOp = cast<BinaryOperator>(VL0); + Value *V = Builder.CreateBinOp(BinOp->getOpcode(), RHS,LHS); + VectorizedValues[VL0] = V; + return V; + } + case Instruction::Load: { + LoadInst *LI = cast<LoadInst>(VL0); + unsigned Alignment = LI->getAlignment(); + + // Check if all of the loads are consecutive. + for (unsigned i = 1, e = VF; i < e; ++i) + if (!isConsecutiveAccess(VL[i-1], VL[i])) + return Scalarize(VL, VecTy); + + IRBuilder<> Builder(GetLastInstr(VL, VF)); + Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), + VecTy->getPointerTo()); + LI = Builder.CreateLoad(VecPtr); + LI->setAlignment(Alignment); + VectorizedValues[VL0] = LI; + return LI; + } + case Instruction::Store: { + StoreInst *SI = cast<StoreInst>(VL0); + unsigned Alignment = SI->getAlignment(); + + ValueList ValueOp; + for (int i = 0; i < VF; ++i) + ValueOp.push_back(cast<StoreInst>(VL[i])->getValueOperand()); + + Value *VecValue = vectorizeTree_rec(ValueOp, VF); + + IRBuilder<> Builder(GetLastInstr(VL, VF)); + Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), + VecTy->getPointerTo()); + Builder.CreateStore(VecValue, VecPtr)->setAlignment(Alignment); + + for (int i = 0; i < VF; ++i) + cast<Instruction>(VL[i])->eraseFromParent(); + return 0; + } + default: + Value *S = Scalarize(VL, VecTy); + VectorizedValues[VL0] = S; + return S; + } +} + +} // end of namespace diff --git a/lib/Transforms/Vectorize/VecUtils.h b/lib/Transforms/Vectorize/VecUtils.h new file mode 100644 index 0000000..5456c6c --- /dev/null +++ b/lib/Transforms/Vectorize/VecUtils.h @@ -0,0 +1,164 @@ +//===- VecUtils.h - Vectorization Utilities -------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of classes and functions manipulate vectors and chains of +// vectors. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_VECUTILS_H +#define LLVM_TRANSFORMS_VECTORIZE_VECUTILS_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include <vector> + +namespace llvm { + +class BasicBlock; class Instruction; class Type; +class VectorType; class StoreInst; class Value; +class ScalarEvolution; class DataLayout; +class TargetTransformInfo; class AliasAnalysis; +class Loop; + +/// Bottom Up SLP vectorization utility class. +struct BoUpSLP { + typedef SmallVector<Value*, 8> ValueList; + typedef SmallPtrSet<Value*, 16> ValueSet; + typedef SmallVector<StoreInst*, 8> StoreList; + static const int max_cost = 1<<20; + + // \brief C'tor. + BoUpSLP(BasicBlock *Bb, ScalarEvolution *Se, DataLayout *Dl, + TargetTransformInfo *Tti, AliasAnalysis *Aa, Loop *Lp); + + /// \brief Take the pointer operand from the Load/Store instruction. + /// \returns NULL if this is not a valid Load/Store instruction. + static Value *getPointerOperand(Value *I); + + /// \brief Take the address space operand from the Load/Store instruction. + /// \returns -1 if this is not a valid Load/Store instruction. + static unsigned getAddressSpaceOperand(Value *I); + + /// \returns true if the memory operations A and B are consecutive. + bool isConsecutiveAccess(Value *A, Value *B); + + /// \brief Vectorize the tree that starts with the elements in \p VL. + /// \returns the vectorized value. + Value *vectorizeTree(ArrayRef<Value *> VL, int VF); + + /// \returns the vectorization cost of the subtree that starts at \p VL. + /// A negative number means that this is profitable. + int getTreeCost(ArrayRef<Value *> VL); + + /// \returns the scalarization cost for this list of values. Assuming that + /// this subtree gets vectorized, we may need to extract the values from the + /// roots. This method calculates the cost of extracting the values. + int getScalarizationCost(ArrayRef<Value *> VL); + + /// \brief Attempts to order and vectorize a sequence of stores. This + /// function does a quadratic scan of the given stores. + /// \returns true if the basic block was modified. + bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold); + + /// \brief Vectorize a group of scalars into a vector tree. + void vectorizeArith(ArrayRef<Value *> Operands); + + /// \returns the list of new instructions that were added in order to collect + /// scalars into vectors. This list can be used to further optimize the gather + /// sequences. + ValueList &getGatherSeqInstructions() {return GatherInstructions; } + +private: + /// \brief This method contains the recursive part of getTreeCost. + int getTreeCost_rec(ArrayRef<Value *> VL, unsigned Depth); + + /// \brief This recursive method looks for vectorization hazards such as + /// values that are used by multiple users and checks that values are used + /// by only one vector lane. It updates the variables LaneMap, MultiUserVals. + void getTreeUses_rec(ArrayRef<Value *> VL, unsigned Depth); + + /// \brief This method contains the recursive part of vectorizeTree. + Value *vectorizeTree_rec(ArrayRef<Value *> VL, int VF); + + /// \brief Number all of the instructions in the block. + void numberInstructions(); + + /// \brief Vectorize a sorted sequence of stores. + bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold); + + /// \returns the scalarization cost for this type. Scalarization in this + /// context means the creation of vectors from a group of scalars. + int getScalarizationCost(Type *Ty); + + /// \returns the AA location that is being access by the instruction. + AliasAnalysis::Location getLocation(Instruction *I); + + /// \brief Checks if it is possible to sink an instruction from + /// \p Src to \p Dst. + /// \returns the pointer to the barrier instruction if we can't sink. + Value *isUnsafeToSink(Instruction *Src, Instruction *Dst); + + /// \returns the instruction that appears last in the BB from \p VL. + /// Only consider the first \p VF elements. + Instruction *GetLastInstr(ArrayRef<Value *> VL, unsigned VF); + + /// \returns a vector from a collection of scalars in \p VL. + Value *Scalarize(ArrayRef<Value *> VL, VectorType *Ty); + +private: + /// Maps instructions to numbers and back. + SmallDenseMap<Value*, int> InstrIdx; + /// Maps integers to Instructions. + std::vector<Instruction*> InstrVec; + + // -- containers that are used during getTreeCost -- // + + /// Contains values that must be scalarized because they are used + /// by multiple lanes, or by users outside the tree. + /// NOTICE: The vectorization methods also use this set. + ValueSet MustScalarize; + + /// Contains a list of values that are used outside the current tree. This + /// set must be reset between runs. + ValueSet MultiUserVals; + /// Maps values in the tree to the vector lanes that uses them. This map must + /// be reset between runs of getCost. + std::map<Value*, int> LaneMap; + /// A list of instructions to ignore while sinking + /// memory instructions. This map must be reset between runs of getCost. + SmallPtrSet<Value *, 8> MemBarrierIgnoreList; + + // -- Containers that are used during vectorizeTree -- // + + /// Maps between the first scalar to the vector. This map must be reset + ///between runs. + DenseMap<Value*, Value*> VectorizedValues; + + // -- Containers that are used after vectorization by the caller -- // + + /// A list of instructions that are used when gathering scalars into vectors. + /// In many cases these instructions can be hoisted outside of the BB. + /// Iterating over this list is faster than calling LICM. + ValueList GatherInstructions; + + // Analysis and block reference. + BasicBlock *BB; + ScalarEvolution *SE; + DataLayout *DL; + TargetTransformInfo *TTI; + AliasAnalysis *AA; + Loop *L; +}; + +} // end of namespace + +#endif // LLVM_TRANSFORMS_VECTORIZE_VECUTILS_H diff --git a/lib/Transforms/Vectorize/Vectorize.cpp b/lib/Transforms/Vectorize/Vectorize.cpp index 19eefd2..a927fe1 100644 --- a/lib/Transforms/Vectorize/Vectorize.cpp +++ b/lib/Transforms/Vectorize/Vectorize.cpp @@ -1,4 +1,4 @@ - //===-- Vectorize.cpp -----------------------------------------------------===// +//===-- Vectorize.cpp -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -28,6 +28,7 @@ using namespace llvm; void llvm::initializeVectorization(PassRegistry &Registry) { initializeBBVectorizePass(Registry); initializeLoopVectorizePass(Registry); + initializeSLPVectorizerPass(Registry); } void LLVMInitializeVectorization(LLVMPassRegistryRef R) { @@ -41,3 +42,7 @@ void LLVMAddBBVectorizePass(LLVMPassManagerRef PM) { void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopVectorizePass()); } + +void LLVMAddSLPVectorizePass(LLVMPassManagerRef PM) { + unwrap(PM)->add(createSLPVectorizerPass()); +} |
