From c6a4f5e819217e1e12c458aed8e7b122e23a3a58 Mon Sep 17 00:00:00 2001 From: Stephen Hines Date: Mon, 21 Jul 2014 00:45:20 -0700 Subject: Update LLVM for rebase to r212749. Includes a cherry-pick of: r212948 - fixes a small issue with atomic calls Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18 --- lib/Analysis/AliasAnalysis.cpp | 40 ++- lib/Analysis/Analysis.cpp | 1 + lib/Analysis/Android.mk | 1 + lib/Analysis/BasicAliasAnalysis.cpp | 220 ++++++++--------- lib/Analysis/BlockFrequencyInfoImpl.cpp | 337 ++------------------------ lib/Analysis/CMakeLists.txt | 1 + lib/Analysis/ConstantFolding.cpp | 43 +++- lib/Analysis/CostModel.cpp | 37 ++- lib/Analysis/IPA/CallGraphSCCPass.cpp | 8 +- lib/Analysis/IPA/InlineCost.cpp | 19 +- lib/Analysis/IVUsers.cpp | 5 +- lib/Analysis/InstructionSimplify.cpp | 170 +++++-------- lib/Analysis/JumpInstrTableInfo.cpp | 40 +++ lib/Analysis/LoopPass.cpp | 5 +- lib/Analysis/NoAliasAnalysis.cpp | 8 + lib/Analysis/RegionPass.cpp | 8 +- lib/Analysis/ScalarEvolution.cpp | 13 +- lib/Analysis/ScalarEvolutionExpander.cpp | 3 +- lib/Analysis/ScalarEvolutionNormalization.cpp | 2 +- lib/Analysis/ValueTracking.cpp | 23 +- 20 files changed, 403 insertions(+), 581 deletions(-) create mode 100644 lib/Analysis/JumpInstrTableInfo.cpp (limited to 'lib/Analysis') diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index 57237e5..5cde979 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -60,6 +60,13 @@ bool AliasAnalysis::pointsToConstantMemory(const Location &Loc, return AA->pointsToConstantMemory(Loc, OrLocal); } +AliasAnalysis::Location +AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, + AliasAnalysis::ModRefResult &Mask) { + assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); + return AA->getArgLocation(CS, ArgIdx, Mask); +} + void AliasAnalysis::deleteValue(Value *V) { assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); AA->deleteValue(V); @@ -91,22 +98,26 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS, if (onlyAccessesArgPointees(MRB)) { bool doesAlias = false; + ModRefResult AllArgsMask = NoModRef; if (doesAccessArgPointees(MRB)) { - MDNode *CSTag = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa); for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; if (!Arg->getType()->isPointerTy()) continue; - Location CSLoc(Arg, UnknownSize, CSTag); + ModRefResult ArgMask; + Location CSLoc = + getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI), + ArgMask); if (!isNoAlias(CSLoc, Loc)) { doesAlias = true; - break; + AllArgsMask = ModRefResult(AllArgsMask | ArgMask); } } } if (!doesAlias) return NoModRef; + Mask = ModRefResult(Mask & AllArgsMask); } // If Loc is a constant memory location, the call definitely could not @@ -150,14 +161,23 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { if (onlyAccessesArgPointees(CS2B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS2B)) { - MDNode *CS2Tag = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa); for (ImmutableCallSite::arg_iterator I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; - Location CS2Loc(Arg, UnknownSize, CS2Tag); - R = ModRefResult((R | getModRefInfo(CS1, CS2Loc)) & Mask); + ModRefResult ArgMask; + Location CS2Loc = + getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I), + ArgMask); + // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of + // CS1 on that location is the inverse. + if (ArgMask == Mod) + ArgMask = ModRef; + else if (ArgMask == Ref) + ArgMask = Mod; + + R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask); if (R == Mask) break; } @@ -170,14 +190,16 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { if (onlyAccessesArgPointees(CS1B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS1B)) { - MDNode *CS1Tag = CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa); for (ImmutableCallSite::arg_iterator I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; - Location CS1Loc(Arg, UnknownSize, CS1Tag); - if (getModRefInfo(CS2, CS1Loc) != NoModRef) { + ModRefResult ArgMask; + Location CS1Loc = + getArgLocation(CS1, (unsigned) std::distance(CS1.arg_begin(), I), + ArgMask); + if ((getModRefInfo(CS2, CS1Loc) & ArgMask) != NoModRef) { R = Mask; break; } diff --git a/lib/Analysis/Analysis.cpp b/lib/Analysis/Analysis.cpp index 01c1c7e..ade940a 100644 --- a/lib/Analysis/Analysis.cpp +++ b/lib/Analysis/Analysis.cpp @@ -48,6 +48,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) { initializeIVUsersPass(Registry); initializeInstCountPass(Registry); initializeIntervalPartitionPass(Registry); + initializeJumpInstrTableInfoPass(Registry); initializeLazyValueInfoPass(Registry); initializeLibCallAliasAnalysisPass(Registry); initializeLintPass(Registry); diff --git a/lib/Analysis/Android.mk b/lib/Analysis/Android.mk index bca673e..4e435a1 100644 --- a/lib/Analysis/Android.mk +++ b/lib/Analysis/Android.mk @@ -27,6 +27,7 @@ analysis_SRC_FILES := \ InstructionSimplify.cpp \ Interval.cpp \ IntervalPartition.cpp \ + JumpInstrTableInfo.cpp \ LazyCallGraph.cpp \ LazyValueInfo.cpp \ LibCallAliasAnalysis.cpp \ diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index fe90b84..c50dd4a 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -490,6 +490,10 @@ namespace { /// global) or not. bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override; + /// Get the location associated with a pointer argument of a callsite. + Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, + ModRefResult &Mask) override; + /// getModRefBehavior - Return the behavior when calling the given /// call site. ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; @@ -653,6 +657,21 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { return Worklist.empty(); } +static bool isMemsetPattern16(const Function *MS, + const TargetLibraryInfo &TLI) { + if (TLI.has(LibFunc::memset_pattern16) && + MS->getName() == "memset_pattern16") { + FunctionType *MemsetType = MS->getFunctionType(); + if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && + isa(MemsetType->getParamType(0)) && + isa(MemsetType->getParamType(1)) && + isa(MemsetType->getParamType(2))) + return true; + } + + return false; +} + /// getModRefBehavior - Return the behavior when calling the given call site. AliasAnalysis::ModRefBehavior BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { @@ -692,10 +711,93 @@ BasicAliasAnalysis::getModRefBehavior(const Function *F) { if (F->onlyReadsMemory()) Min = OnlyReadsMemory; + const TargetLibraryInfo &TLI = getAnalysis(); + if (isMemsetPattern16(F, TLI)) + Min = OnlyAccessesArgumentPointees; + // Otherwise be conservative. return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); } +AliasAnalysis::Location +BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, + ModRefResult &Mask) { + Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask); + const TargetLibraryInfo &TLI = getAnalysis(); + const IntrinsicInst *II = dyn_cast(CS.getInstruction()); + if (II != nullptr) + switch (II->getIntrinsicID()) { + default: break; + case Intrinsic::memset: + case Intrinsic::memcpy: + case Intrinsic::memmove: { + assert((ArgIdx == 0 || ArgIdx == 1) && + "Invalid argument index for memory intrinsic"); + if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2))) + Loc.Size = LenCI->getZExtValue(); + assert(Loc.Ptr == II->getArgOperand(ArgIdx) && + "Memory intrinsic location pointer not argument?"); + Mask = ArgIdx ? Ref : Mod; + break; + } + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: + case Intrinsic::invariant_start: { + assert(ArgIdx == 1 && "Invalid argument index"); + assert(Loc.Ptr == II->getArgOperand(ArgIdx) && + "Intrinsic location pointer not argument?"); + Loc.Size = cast(II->getArgOperand(0))->getZExtValue(); + break; + } + case Intrinsic::invariant_end: { + assert(ArgIdx == 2 && "Invalid argument index"); + assert(Loc.Ptr == II->getArgOperand(ArgIdx) && + "Intrinsic location pointer not argument?"); + Loc.Size = cast(II->getArgOperand(1))->getZExtValue(); + break; + } + case Intrinsic::arm_neon_vld1: { + assert(ArgIdx == 0 && "Invalid argument index"); + assert(Loc.Ptr == II->getArgOperand(ArgIdx) && + "Intrinsic location pointer not argument?"); + // LLVM's vld1 and vst1 intrinsics currently only support a single + // vector register. + if (DL) + Loc.Size = DL->getTypeStoreSize(II->getType()); + break; + } + case Intrinsic::arm_neon_vst1: { + assert(ArgIdx == 0 && "Invalid argument index"); + assert(Loc.Ptr == II->getArgOperand(ArgIdx) && + "Intrinsic location pointer not argument?"); + if (DL) + Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType()); + break; + } + } + + // We can bound the aliasing properties of memset_pattern16 just as we can + // for memcpy/memset. This is particularly important because the + // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 + // whenever possible. + else if (CS.getCalledFunction() && + isMemsetPattern16(CS.getCalledFunction(), TLI)) { + assert((ArgIdx == 0 || ArgIdx == 1) && + "Invalid argument index for memset_pattern16"); + if (ArgIdx == 1) + Loc.Size = 16; + else if (const ConstantInt *LenCI = + dyn_cast(CS.getArgument(2))) + Loc.Size = LenCI->getZExtValue(); + assert(Loc.Ptr == CS.getArgument(ArgIdx) && + "memset_pattern16 location pointer not argument?"); + Mask = ArgIdx ? Ref : Mod; + } + // FIXME: Handle memset_pattern4 and memset_pattern8 also. + + return Loc; +} + /// getModRefInfo - Check to see if the specified callsite can clobber the /// specified memory object. Since we only look at local properties of this /// function, we really can't say much about this query. We do, however, use @@ -748,124 +850,8 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, return NoModRef; } - const TargetLibraryInfo &TLI = getAnalysis(); - ModRefResult Min = ModRef; - - // Finally, handle specific knowledge of intrinsics. - const IntrinsicInst *II = dyn_cast(CS.getInstruction()); - if (II != nullptr) - switch (II->getIntrinsicID()) { - default: break; - case Intrinsic::memcpy: - case Intrinsic::memmove: { - uint64_t Len = UnknownSize; - if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2))) - Len = LenCI->getZExtValue(); - Value *Dest = II->getArgOperand(0); - Value *Src = II->getArgOperand(1); - // If it can't overlap the source dest, then it doesn't modref the loc. - if (isNoAlias(Location(Dest, Len), Loc)) { - if (isNoAlias(Location(Src, Len), Loc)) - return NoModRef; - // If it can't overlap the dest, then worst case it reads the loc. - Min = Ref; - } else if (isNoAlias(Location(Src, Len), Loc)) { - // If it can't overlap the source, then worst case it mutates the loc. - Min = Mod; - } - break; - } - case Intrinsic::memset: - // Since memset is 'accesses arguments' only, the AliasAnalysis base class - // will handle it for the variable length case. - if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2))) { - uint64_t Len = LenCI->getZExtValue(); - Value *Dest = II->getArgOperand(0); - if (isNoAlias(Location(Dest, Len), Loc)) - return NoModRef; - } - // We know that memset doesn't load anything. - Min = Mod; - break; - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - case Intrinsic::invariant_start: { - uint64_t PtrSize = - cast(II->getArgOperand(0))->getZExtValue(); - if (isNoAlias(Location(II->getArgOperand(1), - PtrSize, - II->getMetadata(LLVMContext::MD_tbaa)), - Loc)) - return NoModRef; - break; - } - case Intrinsic::invariant_end: { - uint64_t PtrSize = - cast(II->getArgOperand(1))->getZExtValue(); - if (isNoAlias(Location(II->getArgOperand(2), - PtrSize, - II->getMetadata(LLVMContext::MD_tbaa)), - Loc)) - return NoModRef; - break; - } - case Intrinsic::arm_neon_vld1: { - // LLVM's vld1 and vst1 intrinsics currently only support a single - // vector register. - uint64_t Size = - DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize; - if (isNoAlias(Location(II->getArgOperand(0), Size, - II->getMetadata(LLVMContext::MD_tbaa)), - Loc)) - return NoModRef; - break; - } - case Intrinsic::arm_neon_vst1: { - uint64_t Size = - DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; - if (isNoAlias(Location(II->getArgOperand(0), Size, - II->getMetadata(LLVMContext::MD_tbaa)), - Loc)) - return NoModRef; - break; - } - } - - // We can bound the aliasing properties of memset_pattern16 just as we can - // for memcpy/memset. This is particularly important because the - // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 - // whenever possible. - else if (TLI.has(LibFunc::memset_pattern16) && - CS.getCalledFunction() && - CS.getCalledFunction()->getName() == "memset_pattern16") { - const Function *MS = CS.getCalledFunction(); - FunctionType *MemsetType = MS->getFunctionType(); - if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && - isa(MemsetType->getParamType(0)) && - isa(MemsetType->getParamType(1)) && - isa(MemsetType->getParamType(2))) { - uint64_t Len = UnknownSize; - if (const ConstantInt *LenCI = dyn_cast(CS.getArgument(2))) - Len = LenCI->getZExtValue(); - const Value *Dest = CS.getArgument(0); - const Value *Src = CS.getArgument(1); - // If it can't overlap the source dest, then it doesn't modref the loc. - if (isNoAlias(Location(Dest, Len), Loc)) { - // Always reads 16 bytes of the source. - if (isNoAlias(Location(Src, 16), Loc)) - return NoModRef; - // If it can't overlap the dest, then worst case it reads the loc. - Min = Ref; - // Always reads 16 bytes of the source. - } else if (isNoAlias(Location(Src, 16), Loc)) { - // If it can't overlap the source, then worst case it mutates the loc. - Min = Mod; - } - } - } - // The AliasAnalysis base class has some smarts, lets use them. - return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min); + return AliasAnalysis::getModRefInfo(CS, Loc); } /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction diff --git a/lib/Analysis/BlockFrequencyInfoImpl.cpp b/lib/Analysis/BlockFrequencyInfoImpl.cpp index 87d93a4..4fd2c11 100644 --- a/lib/Analysis/BlockFrequencyInfoImpl.cpp +++ b/lib/Analysis/BlockFrequencyInfoImpl.cpp @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/BlockFrequencyInfoImpl.h" -#include "llvm/ADT/APFloat.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/Support/raw_ostream.h" #include @@ -24,298 +23,13 @@ using namespace llvm::bfi_detail; //===----------------------------------------------------------------------===// // -// UnsignedFloat implementation. -// -//===----------------------------------------------------------------------===// -#ifndef _MSC_VER -const int32_t UnsignedFloatBase::MaxExponent; -const int32_t UnsignedFloatBase::MinExponent; -#endif - -static void appendDigit(std::string &Str, unsigned D) { - assert(D < 10); - Str += '0' + D % 10; -} - -static void appendNumber(std::string &Str, uint64_t N) { - while (N) { - appendDigit(Str, N % 10); - N /= 10; - } -} - -static bool doesRoundUp(char Digit) { - switch (Digit) { - case '5': - case '6': - case '7': - case '8': - case '9': - return true; - default: - return false; - } -} - -static std::string toStringAPFloat(uint64_t D, int E, unsigned Precision) { - assert(E >= UnsignedFloatBase::MinExponent); - assert(E <= UnsignedFloatBase::MaxExponent); - - // Find a new E, but don't let it increase past MaxExponent. - int LeadingZeros = UnsignedFloatBase::countLeadingZeros64(D); - int NewE = std::min(UnsignedFloatBase::MaxExponent, E + 63 - LeadingZeros); - int Shift = 63 - (NewE - E); - assert(Shift <= LeadingZeros); - assert(Shift == LeadingZeros || NewE == UnsignedFloatBase::MaxExponent); - D <<= Shift; - E = NewE; - - // Check for a denormal. - unsigned AdjustedE = E + 16383; - if (!(D >> 63)) { - assert(E == UnsignedFloatBase::MaxExponent); - AdjustedE = 0; - } - - // Build the float and print it. - uint64_t RawBits[2] = {D, AdjustedE}; - APFloat Float(APFloat::x87DoubleExtended, APInt(80, RawBits)); - SmallVector Chars; - Float.toString(Chars, Precision, 0); - return std::string(Chars.begin(), Chars.end()); -} - -static std::string stripTrailingZeros(const std::string &Float) { - size_t NonZero = Float.find_last_not_of('0'); - assert(NonZero != std::string::npos && "no . in floating point string"); - - if (Float[NonZero] == '.') - ++NonZero; - - return Float.substr(0, NonZero + 1); -} - -std::string UnsignedFloatBase::toString(uint64_t D, int16_t E, int Width, - unsigned Precision) { - if (!D) - return "0.0"; - - // Canonicalize exponent and digits. - uint64_t Above0 = 0; - uint64_t Below0 = 0; - uint64_t Extra = 0; - int ExtraShift = 0; - if (E == 0) { - Above0 = D; - } else if (E > 0) { - if (int Shift = std::min(int16_t(countLeadingZeros64(D)), E)) { - D <<= Shift; - E -= Shift; - - if (!E) - Above0 = D; - } - } else if (E > -64) { - Above0 = D >> -E; - Below0 = D << (64 + E); - } else if (E > -120) { - Below0 = D >> (-E - 64); - Extra = D << (128 + E); - ExtraShift = -64 - E; - } - - // Fall back on APFloat for very small and very large numbers. - if (!Above0 && !Below0) - return toStringAPFloat(D, E, Precision); - - // Append the digits before the decimal. - std::string Str; - size_t DigitsOut = 0; - if (Above0) { - appendNumber(Str, Above0); - DigitsOut = Str.size(); - } else - appendDigit(Str, 0); - std::reverse(Str.begin(), Str.end()); - - // Return early if there's nothing after the decimal. - if (!Below0) - return Str + ".0"; - - // Append the decimal and beyond. - Str += '.'; - uint64_t Error = UINT64_C(1) << (64 - Width); - - // We need to shift Below0 to the right to make space for calculating - // digits. Save the precision we're losing in Extra. - Extra = (Below0 & 0xf) << 56 | (Extra >> 8); - Below0 >>= 4; - size_t SinceDot = 0; - size_t AfterDot = Str.size(); - do { - if (ExtraShift) { - --ExtraShift; - Error *= 5; - } else - Error *= 10; - - Below0 *= 10; - Extra *= 10; - Below0 += (Extra >> 60); - Extra = Extra & (UINT64_MAX >> 4); - appendDigit(Str, Below0 >> 60); - Below0 = Below0 & (UINT64_MAX >> 4); - if (DigitsOut || Str.back() != '0') - ++DigitsOut; - ++SinceDot; - } while (Error && (Below0 << 4 | Extra >> 60) >= Error / 2 && - (!Precision || DigitsOut <= Precision || SinceDot < 2)); - - // Return early for maximum precision. - if (!Precision || DigitsOut <= Precision) - return stripTrailingZeros(Str); - - // Find where to truncate. - size_t Truncate = - std::max(Str.size() - (DigitsOut - Precision), AfterDot + 1); - - // Check if there's anything to truncate. - if (Truncate >= Str.size()) - return stripTrailingZeros(Str); - - bool Carry = doesRoundUp(Str[Truncate]); - if (!Carry) - return stripTrailingZeros(Str.substr(0, Truncate)); - - // Round with the first truncated digit. - for (std::string::reverse_iterator I(Str.begin() + Truncate), E = Str.rend(); - I != E; ++I) { - if (*I == '.') - continue; - if (*I == '9') { - *I = '0'; - continue; - } - - ++*I; - Carry = false; - break; - } - - // Add "1" in front if we still need to carry. - return stripTrailingZeros(std::string(Carry, '1') + Str.substr(0, Truncate)); -} - -raw_ostream &UnsignedFloatBase::print(raw_ostream &OS, uint64_t D, int16_t E, - int Width, unsigned Precision) { - return OS << toString(D, E, Width, Precision); -} - -void UnsignedFloatBase::dump(uint64_t D, int16_t E, int Width) { - print(dbgs(), D, E, Width, 0) << "[" << Width << ":" << D << "*2^" << E - << "]"; -} - -static std::pair -getRoundedFloat(uint64_t N, bool ShouldRound, int64_t Shift) { - if (ShouldRound) - if (!++N) - // Rounding caused an overflow. - return std::make_pair(UINT64_C(1), Shift + 64); - return std::make_pair(N, Shift); -} - -std::pair UnsignedFloatBase::divide64(uint64_t Dividend, - uint64_t Divisor) { - // Input should be sanitized. - assert(Divisor); - assert(Dividend); - - // Minimize size of divisor. - int16_t Shift = 0; - if (int Zeros = countTrailingZeros(Divisor)) { - Shift -= Zeros; - Divisor >>= Zeros; - } - - // Check for powers of two. - if (Divisor == 1) - return std::make_pair(Dividend, Shift); - - // Maximize size of dividend. - if (int Zeros = countLeadingZeros64(Dividend)) { - Shift -= Zeros; - Dividend <<= Zeros; - } - - // Start with the result of a divide. - uint64_t Quotient = Dividend / Divisor; - Dividend %= Divisor; - - // Continue building the quotient with long division. - // - // TODO: continue with largers digits. - while (!(Quotient >> 63) && Dividend) { - // Shift Dividend, and check for overflow. - bool IsOverflow = Dividend >> 63; - Dividend <<= 1; - --Shift; - - // Divide. - bool DoesDivide = IsOverflow || Divisor <= Dividend; - Quotient = (Quotient << 1) | uint64_t(DoesDivide); - Dividend -= DoesDivide ? Divisor : 0; - } - - // Round. - if (Dividend >= getHalf(Divisor)) - if (!++Quotient) - // Rounding caused an overflow in Quotient. - return std::make_pair(UINT64_C(1), Shift + 64); - - return getRoundedFloat(Quotient, Dividend >= getHalf(Divisor), Shift); -} - -std::pair UnsignedFloatBase::multiply64(uint64_t L, - uint64_t R) { - // Separate into two 32-bit digits (U.L). - uint64_t UL = L >> 32, LL = L & UINT32_MAX, UR = R >> 32, LR = R & UINT32_MAX; - - // Compute cross products. - uint64_t P1 = UL * UR, P2 = UL * LR, P3 = LL * UR, P4 = LL * LR; - - // Sum into two 64-bit digits. - uint64_t Upper = P1, Lower = P4; - auto addWithCarry = [&](uint64_t N) { - uint64_t NewLower = Lower + (N << 32); - Upper += (N >> 32) + (NewLower < Lower); - Lower = NewLower; - }; - addWithCarry(P2); - addWithCarry(P3); - - // Check whether the upper digit is empty. - if (!Upper) - return std::make_pair(Lower, 0); - - // Shift as little as possible to maximize precision. - unsigned LeadingZeros = countLeadingZeros64(Upper); - int16_t Shift = 64 - LeadingZeros; - if (LeadingZeros) - Upper = Upper << LeadingZeros | Lower >> Shift; - bool ShouldRound = Shift && (Lower & UINT64_C(1) << (Shift - 1)); - return getRoundedFloat(Upper, ShouldRound, Shift); -} - -//===----------------------------------------------------------------------===// -// // BlockMass implementation. // //===----------------------------------------------------------------------===// -UnsignedFloat BlockMass::toFloat() const { +ScaledNumber BlockMass::toScaled() const { if (isFull()) - return UnsignedFloat(1, 0); - return UnsignedFloat(getMass() + 1, -64); + return ScaledNumber(1, 0); + return ScaledNumber(getMass() + 1, -64); } void BlockMass::dump() const { print(dbgs()); } @@ -342,7 +56,7 @@ namespace { typedef BlockFrequencyInfoImplBase::BlockNode BlockNode; typedef BlockFrequencyInfoImplBase::Distribution Distribution; typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList; -typedef BlockFrequencyInfoImplBase::Float Float; +typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64; typedef BlockFrequencyInfoImplBase::LoopData LoopData; typedef BlockFrequencyInfoImplBase::Weight Weight; typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData; @@ -622,7 +336,7 @@ bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( /// /// Gives the maximum number of estimated iterations allowed for a loop. Very /// large numbers cause problems downstream (even within 64-bits). -static Float getMaxLoopScale() { return Float(1, 12); } +static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); } /// \brief Compute the loop scale for a loop. void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { @@ -634,7 +348,7 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass; // Block scale stores the inverse of the scale. - Loop.Scale = ExitMass.toFloat().inverse(); + Loop.Scale = ExitMass.toScaled().inverse(); DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull() << " - " << Loop.BackedgeMass << ")\n" @@ -708,15 +422,16 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source, } static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, - const Float &Min, const Float &Max) { + const Scaled64 &Min, const Scaled64 &Max) { // Scale the Factor to a size that creates integers. Ideally, integers would // be scaled so that Max == UINT64_MAX so that they can be best // differentiated. However, the register allocator currently deals poorly // with large numbers. Instead, push Min up a little from 1 to give some // room to differentiate small, unequal numbers. // - // TODO: fix issues downstream so that ScalingFactor can be Float(1,64)/Max. - Float ScalingFactor = Min.inverse(); + // TODO: fix issues downstream so that ScalingFactor can be + // Scaled64(1,64)/Max. + Scaled64 ScalingFactor = Min.inverse(); if ((Max / Min).lg() < 60) ScalingFactor <<= 3; @@ -724,10 +439,10 @@ static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max << ", factor = " << ScalingFactor << "\n"); for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) { - Float Scaled = BFI.Freqs[Index].Floating * ScalingFactor; + Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor; BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt()); DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = " - << BFI.Freqs[Index].Floating << ", scaled = " << Scaled + << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled << ", int = " << BFI.Freqs[Index].Integer << "\n"); } } @@ -740,7 +455,7 @@ static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop) << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale << "\n"); - Loop.Scale *= Loop.Mass.toFloat(); + Loop.Scale *= Loop.Mass.toScaled(); Loop.IsPackaged = false; DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n"); @@ -749,9 +464,9 @@ static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { // final head scale will be used for updated the rest of the members. for (const BlockNode &N : Loop.Nodes) { const auto &Working = BFI.Working[N.Index]; - Float &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale - : BFI.Freqs[N.Index].Floating; - Float New = Loop.Scale * F; + Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale + : BFI.Freqs[N.Index].Scaled; + Scaled64 New = Loop.Scale * F; DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New << "\n"); F = New; @@ -761,7 +476,7 @@ static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { void BlockFrequencyInfoImplBase::unwrapLoops() { // Set initial frequencies from loop-local masses. for (size_t Index = 0; Index < Working.size(); ++Index) - Freqs[Index].Floating = Working[Index].Mass.toFloat(); + Freqs[Index].Scaled = Working[Index].Mass.toScaled(); for (LoopData &Loop : Loops) unwrapLoop(*this, Loop); @@ -770,12 +485,12 @@ void BlockFrequencyInfoImplBase::unwrapLoops() { void BlockFrequencyInfoImplBase::finalizeMetrics() { // Unwrap loop packages in reverse post-order, tracking min and max // frequencies. - auto Min = Float::getLargest(); - auto Max = Float::getZero(); + auto Min = Scaled64::getLargest(); + auto Max = Scaled64::getZero(); for (size_t Index = 0; Index < Working.size(); ++Index) { // Update min/max scale. - Min = std::min(Min, Freqs[Index].Floating); - Max = std::max(Max, Freqs[Index].Floating); + Min = std::min(Min, Freqs[Index].Scaled); + Max = std::max(Max, Freqs[Index].Scaled); } // Convert to integers. @@ -794,11 +509,11 @@ BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const { return 0; return Freqs[Node.Index].Integer; } -Float +Scaled64 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const { if (!Node.isValid()) - return Float::getZero(); - return Freqs[Node.Index].Floating; + return Scaled64::getZero(); + return Freqs[Node.Index].Scaled; } std::string @@ -819,8 +534,8 @@ BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, raw_ostream & BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, const BlockFrequency &Freq) const { - Float Block(Freq.getFrequency(), 0); - Float Entry(getEntryFreq(), 0); + Scaled64 Block(Freq.getFrequency(), 0); + Scaled64 Entry(getEntryFreq(), 0); return OS << Block / Entry; } diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt index b546789..d1632fd 100644 --- a/lib/Analysis/CMakeLists.txt +++ b/lib/Analysis/CMakeLists.txt @@ -25,6 +25,7 @@ add_llvm_library(LLVMAnalysis InstructionSimplify.cpp Interval.cpp IntervalPartition.cpp + JumpInstrTableInfo.cpp LazyCallGraph.cpp LazyValueInfo.cpp LibCallAliasAnalysis.cpp diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 0ac1cb5..eb3e2c6 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -21,6 +21,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Config/config.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" @@ -31,11 +32,15 @@ #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Operator.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/FEnv.h" #include "llvm/Support/MathExtras.h" #include "llvm/Target/TargetLibraryInfo.h" #include #include + +#ifdef HAVE_FENV_H +#include +#endif + using namespace llvm; //===----------------------------------------------------------------------===// @@ -706,7 +711,7 @@ static Constant *CastGEPIndices(ArrayRef Ops, static Constant* StripPtrCastKeepAS(Constant* Ptr) { assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); PointerType *OldPtrTy = cast(Ptr->getType()); - Ptr = cast(Ptr->stripPointerCasts()); + Ptr = Ptr->stripPointerCasts(); PointerType *NewPtrTy = cast(Ptr->getType()); // Preserve the address space number of the pointer. @@ -1314,12 +1319,34 @@ static Constant *GetConstantFoldFPValue(double V, Type *Ty) { } +namespace { +/// llvm_fenv_clearexcept - Clear the floating-point exception state. +static inline void llvm_fenv_clearexcept() { +#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT + feclearexcept(FE_ALL_EXCEPT); +#endif + errno = 0; +} + +/// llvm_fenv_testexcept - Test if a floating-point exception was raised. +static inline bool llvm_fenv_testexcept() { + int errno_val = errno; + if (errno_val == ERANGE || errno_val == EDOM) + return true; +#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT + if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) + return true; +#endif + return false; +} +} // End namespace + static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) { - sys::llvm_fenv_clearexcept(); + llvm_fenv_clearexcept(); V = NativeFP(V); - if (sys::llvm_fenv_testexcept()) { - sys::llvm_fenv_clearexcept(); + if (llvm_fenv_testexcept()) { + llvm_fenv_clearexcept(); return nullptr; } @@ -1328,10 +1355,10 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V, double W, Type *Ty) { - sys::llvm_fenv_clearexcept(); + llvm_fenv_clearexcept(); V = NativeFP(V, W); - if (sys::llvm_fenv_testexcept()) { - sys::llvm_fenv_clearexcept(); + if (llvm_fenv_testexcept()) { + llvm_fenv_clearexcept(); return nullptr; } diff --git a/lib/Analysis/CostModel.cpp b/lib/Analysis/CostModel.cpp index 780b1aa..1b74f8c 100644 --- a/lib/Analysis/CostModel.cpp +++ b/lib/Analysis/CostModel.cpp @@ -95,6 +95,31 @@ static bool isReverseVectorMask(SmallVectorImpl &Mask) { return true; } +static bool isAlternateVectorMask(SmallVectorImpl &Mask) { + bool isAlternate = true; + unsigned MaskSize = Mask.size(); + + // Example: shufflevector A, B, <0,5,2,7> + for (unsigned i = 0; i < MaskSize && isAlternate; ++i) { + if (Mask[i] < 0) + continue; + isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i); + } + + if (isAlternate) + return true; + + isAlternate = true; + // Example: shufflevector A, B, <4,1,6,3> + for (unsigned i = 0; i < MaskSize && isAlternate; ++i) { + if (Mask[i] < 0) + continue; + isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i); + } + + return isAlternate; +} + static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) { TargetTransformInfo::OperandValueKind OpInfo = TargetTransformInfo::OK_AnyValue; @@ -466,9 +491,15 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const { unsigned NumVecElems = VecTypOp0->getVectorNumElements(); SmallVector Mask = Shuffle->getShuffleMask(); - if (NumVecElems == Mask.size() && isReverseVectorMask(Mask)) - return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, 0, - nullptr); + if (NumVecElems == Mask.size()) { + if (isReverseVectorMask(Mask)) + return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, + 0, nullptr); + if (isAlternateVectorMask(Mask)) + return TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, + VecTypOp0, 0, nullptr); + } + return -1; } case Instruction::Call: diff --git a/lib/Analysis/IPA/CallGraphSCCPass.cpp b/lib/Analysis/IPA/CallGraphSCCPass.cpp index bfab744..c27edbf 100644 --- a/lib/Analysis/IPA/CallGraphSCCPass.cpp +++ b/lib/Analysis/IPA/CallGraphSCCPass.cpp @@ -602,8 +602,12 @@ namespace { bool runOnSCC(CallGraphSCC &SCC) override { Out << Banner; - for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) - (*I)->getFunction()->print(Out); + for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { + if ((*I)->getFunction()) + (*I)->getFunction()->print(Out); + else + Out << "\nPrinting Function\n"; + } return false; } }; diff --git a/lib/Analysis/IPA/InlineCost.cpp b/lib/Analysis/IPA/InlineCost.cpp index 66f3f8e..8807529 100644 --- a/lib/Analysis/IPA/InlineCost.cpp +++ b/lib/Analysis/IPA/InlineCost.cpp @@ -841,10 +841,7 @@ bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { // original function which is extremely undefined behavior. // FIXME: This logic isn't really right; we can safely inline functions with // indirectbr's as long as no other function or global references the - // blockaddress of a block within the current function. And as a QOI issue, - // if someone is using a blockaddress without an indirectbr, and that - // reference somehow ends up in another function or global, we probably don't - // want to inline this function. + // blockaddress of a block within the current function. HasIndirectBr = true; return false; } @@ -1121,6 +1118,15 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { if (BB->empty()) continue; + // Disallow inlining a blockaddress. A blockaddress only has defined + // behavior for an indirect branch in the same function, and we do not + // currently support inlining indirect branches. But, the inliner may not + // see an indirect branch that ends up being dead code at a particular call + // site. If the blockaddress escapes the function, e.g., via a global + // variable, inlining may lead to an invalid cross-function reference. + if (BB->hasAddressTaken()) + return false; + // Analyze the cost of this block. If we blow through the threshold, this // returns false, and we can bail on out. if (!analyzeBlock(BB)) { @@ -1303,8 +1309,9 @@ bool InlineCostAnalysis::isInlineViable(Function &F) { F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::ReturnsTwice); for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { - // Disallow inlining of functions which contain an indirect branch. - if (isa(BI->getTerminator())) + // Disallow inlining of functions which contain indirect branches or + // blockaddresses. + if (isa(BI->getTerminator()) || BI->hasAddressTaken()) return false; for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index c819bd3..24655aa 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -287,7 +287,10 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { OS << ")"; } OS << " in "; - UI->getUser()->print(OS); + if (UI->getUser()) + UI->getUser()->print(OS); + else + OS << "Printing User"; OS << '\n'; } } diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 3684fda..bd42af1 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -39,7 +39,6 @@ using namespace llvm::PatternMatch; enum { RecursionLimit = 3 }; STATISTIC(NumExpand, "Number of expansions"); -STATISTIC(NumFactor , "Number of factorizations"); STATISTIC(NumReassoc, "Number of reassociations"); struct Query { @@ -183,78 +182,6 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS, return nullptr; } -/// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term -/// using the operation OpCodeToExtract. For example, when Opcode is Add and -/// OpCodeToExtract is Mul then this tries to turn "(A*B)+(A*C)" into "A*(B+C)". -/// Returns the simplified value, or null if no simplification was performed. -static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS, - unsigned OpcToExtract, const Query &Q, - unsigned MaxRecurse) { - Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract; - // Recursion is always used, so bail out at once if we already hit the limit. - if (!MaxRecurse--) - return nullptr; - - BinaryOperator *Op0 = dyn_cast(LHS); - BinaryOperator *Op1 = dyn_cast(RHS); - - if (!Op0 || Op0->getOpcode() != OpcodeToExtract || - !Op1 || Op1->getOpcode() != OpcodeToExtract) - return nullptr; - - // The expression has the form "(A op' B) op (C op' D)". - Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); - Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); - - // Use left distributivity, i.e. "X op' (Y op Z) = (X op' Y) op (X op' Z)". - // Does the instruction have the form "(A op' B) op (A op' D)" or, in the - // commutative case, "(A op' B) op (C op' A)"? - if (A == C || (Instruction::isCommutative(OpcodeToExtract) && A == D)) { - Value *DD = A == C ? D : C; - // Form "A op' (B op DD)" if it simplifies completely. - // Does "B op DD" simplify? - if (Value *V = SimplifyBinOp(Opcode, B, DD, Q, MaxRecurse)) { - // It does! Return "A op' V" if it simplifies or is already available. - // If V equals B then "A op' V" is just the LHS. If V equals DD then - // "A op' V" is just the RHS. - if (V == B || V == DD) { - ++NumFactor; - return V == B ? LHS : RHS; - } - // Otherwise return "A op' V" if it simplifies. - if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, Q, MaxRecurse)) { - ++NumFactor; - return W; - } - } - } - - // Use right distributivity, i.e. "(X op Y) op' Z = (X op' Z) op (Y op' Z)". - // Does the instruction have the form "(A op' B) op (C op' B)" or, in the - // commutative case, "(A op' B) op (B op' D)"? - if (B == D || (Instruction::isCommutative(OpcodeToExtract) && B == C)) { - Value *CC = B == D ? C : D; - // Form "(A op CC) op' B" if it simplifies completely.. - // Does "A op CC" simplify? - if (Value *V = SimplifyBinOp(Opcode, A, CC, Q, MaxRecurse)) { - // It does! Return "V op' B" if it simplifies or is already available. - // If V equals A then "V op' B" is just the LHS. If V equals CC then - // "V op' B" is just the RHS. - if (V == A || V == CC) { - ++NumFactor; - return V == A ? LHS : RHS; - } - // Otherwise return "V op' B" if it simplifies. - if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, Q, MaxRecurse)) { - ++NumFactor; - return W; - } - } - } - - return nullptr; -} - /// SimplifyAssociativeBinOp - Generic simplifications for associative binary /// operations. Returns the simpler value, or null if none was found. static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS, @@ -634,11 +561,6 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, MaxRecurse)) return V; - // Mul distributes over Add. Try some generic simplifications based on this. - if (Value *V = FactorizeBinOp(Instruction::Add, Op0, Op1, Instruction::Mul, - Q, MaxRecurse)) - return V; - // Threading Add over selects and phi nodes is pointless, so don't bother. // Threading over the select in "A + select(cond, B, C)" means evaluating // "A+B" and "A+C" and seeing if they are equal; but they are equal if and @@ -754,16 +676,9 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, if (Op0 == Op1) return Constant::getNullValue(Op0->getType()); - // (X*2) - X -> X - // (X<<1) - X -> X - Value *X = nullptr; - if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) || - match(Op0, m_Shl(m_Specific(Op1), m_One()))) - return Op1; - // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. // For example, (X + Y) - Y -> X; (Y + X) - Y -> X - Value *Y = nullptr, *Z = Op1; + Value *X = nullptr, *Y = nullptr, *Z = Op1; if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z // See if "V === Y - Z" simplifies. if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) @@ -835,11 +750,6 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, if (Constant *Result = computePointerDifference(Q.DL, X, Y)) return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); - // Mul distributes over Sub. Try some generic simplifications based on this. - if (Value *V = FactorizeBinOp(Instruction::Sub, Op0, Op1, Instruction::Mul, - Q, MaxRecurse)) - return V; - // i1 sub -> xor. if (MaxRecurse && Op0->getType()->isIntegerTy(1)) if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) @@ -1518,11 +1428,6 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q, Q, MaxRecurse)) return V; - // Or distributes over And. Try some generic simplifications based on this. - if (Value *V = FactorizeBinOp(Instruction::And, Op0, Op1, Instruction::Or, - Q, MaxRecurse)) - return V; - // If the operation is with the result of a select instruction, check whether // operating on either branch of the select always yields the same value. if (isa(Op0) || isa(Op1)) @@ -1613,11 +1518,6 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, MaxRecurse)) return V; - // And distributes over Or. Try some generic simplifications based on this. - if (Value *V = FactorizeBinOp(Instruction::Or, Op0, Op1, Instruction::And, - Q, MaxRecurse)) - return V; - // If the operation is with the result of a select instruction, check whether // operating on either branch of the select always yields the same value. if (isa(Op0) || isa(Op1)) @@ -1625,6 +1525,38 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, MaxRecurse)) return V; + // (A & C)|(B & D) + Value *C = nullptr, *D = nullptr; + if (match(Op0, m_And(m_Value(A), m_Value(C))) && + match(Op1, m_And(m_Value(B), m_Value(D)))) { + ConstantInt *C1 = dyn_cast(C); + ConstantInt *C2 = dyn_cast(D); + if (C1 && C2 && (C1->getValue() == ~C2->getValue())) { + // (A & C1)|(B & C2) + // If we have: ((V + N) & C1) | (V & C2) + // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 + // replace with V+N. + Value *V1, *V2; + if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+ + match(A, m_Add(m_Value(V1), m_Value(V2)))) { + // Add commutes, try both ways. + if (V1 == B && MaskedValueIsZero(V2, C2->getValue())) + return A; + if (V2 == B && MaskedValueIsZero(V1, C2->getValue())) + return A; + } + // Or commutes, try both ways. + if ((C1->getValue() & (C1->getValue() + 1)) == 0 && + match(B, m_Add(m_Value(V1), m_Value(V2)))) { + // Add commutes, try both ways. + if (V1 == A && MaskedValueIsZero(V2, C1->getValue())) + return B; + if (V2 == A && MaskedValueIsZero(V1, C1->getValue())) + return B; + } + } + } + // If the operation is with the result of a phi instruction, check whether // operating on all incoming values of the phi always yields the same value. if (isa(Op0) || isa(Op1)) @@ -1677,11 +1609,6 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q, MaxRecurse)) return V; - // And distributes over Xor. Try some generic simplifications based on this. - if (Value *V = FactorizeBinOp(Instruction::Xor, Op0, Op1, Instruction::And, - Q, MaxRecurse)) - return V; - // Threading Xor over selects and phi nodes is pointless, so don't bother. // Threading over the select in "A ^ select(cond, B, C)" means evaluating // "A^B" and "A^C" and seeing if they are equal; but they are equal if and @@ -2021,9 +1948,15 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (!CI2->isZero()) Upper = NegOne.udiv(CI2->getValue()) + 1; } else if (match(LHS, m_SDiv(m_ConstantInt(CI2), m_Value()))) { - // 'sdiv CI2, x' produces [-|CI2|, |CI2|]. - Upper = CI2->getValue().abs() + 1; - Lower = (-Upper) + 1; + if (CI2->isMinSignedValue()) { + // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. + Lower = CI2->getValue(); + Upper = Lower.lshr(1) + 1; + } else { + // 'sdiv CI2, x' produces [-|CI2|, |CI2|]. + Upper = CI2->getValue().abs() + 1; + Lower = (-Upper) + 1; + } } else if (match(LHS, m_SDiv(m_Value(), m_ConstantInt(CI2)))) { // 'sdiv x, CI2' produces [INT_MIN / CI2, INT_MAX / CI2]. APInt IntMin = APInt::getSignedMinValue(Width); @@ -2241,6 +2174,25 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, } } + // If a bit is known to be zero for A and known to be one for B, + // then A and B cannot be equal. + if (ICmpInst::isEquality(Pred)) { + if (ConstantInt *CI = dyn_cast(RHS)) { + uint32_t BitWidth = CI->getBitWidth(); + APInt LHSKnownZero(BitWidth, 0); + APInt LHSKnownOne(BitWidth, 0); + computeKnownBits(LHS, LHSKnownZero, LHSKnownOne); + APInt RHSKnownZero(BitWidth, 0); + APInt RHSKnownOne(BitWidth, 0); + computeKnownBits(RHS, RHSKnownZero, RHSKnownOne); + if (((LHSKnownOne & RHSKnownZero) != 0) || + ((LHSKnownZero & RHSKnownOne) != 0)) + return (Pred == ICmpInst::ICMP_EQ) + ? ConstantInt::getFalse(CI->getContext()) + : ConstantInt::getTrue(CI->getContext()); + } + } + // Special logic for binary operators. BinaryOperator *LBO = dyn_cast(LHS); BinaryOperator *RBO = dyn_cast(RHS); diff --git a/lib/Analysis/JumpInstrTableInfo.cpp b/lib/Analysis/JumpInstrTableInfo.cpp new file mode 100644 index 0000000..b5b4265 --- /dev/null +++ b/lib/Analysis/JumpInstrTableInfo.cpp @@ -0,0 +1,40 @@ +//===-- JumpInstrTableInfo.cpp: Info for Jump-Instruction Tables ----------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief Information about jump-instruction tables that have been created by +/// JumpInstrTables pass. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "jiti" + +#include "llvm/Analysis/JumpInstrTableInfo.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Type.h" + +using namespace llvm; + +INITIALIZE_PASS(JumpInstrTableInfo, "jump-instr-table-info", + "Jump-Instruction Table Info", true, true) +char JumpInstrTableInfo::ID = 0; + +ImmutablePass *llvm::createJumpInstrTableInfoPass() { + return new JumpInstrTableInfo(); +} + +JumpInstrTableInfo::JumpInstrTableInfo() : ImmutablePass(ID), Tables() { + initializeJumpInstrTableInfoPass(*PassRegistry::getPassRegistry()); +} + +JumpInstrTableInfo::~JumpInstrTableInfo() {} + +void JumpInstrTableInfo::insertEntry(FunctionType *TableFunTy, Function *Target, + Function *Jump) { + Tables[TableFunTy].push_back(JumpPair(Target, Jump)); +} diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp index 8df18e7..7bd866e 100644 --- a/lib/Analysis/LoopPass.cpp +++ b/lib/Analysis/LoopPass.cpp @@ -45,7 +45,10 @@ public: for (Loop::block_iterator b = L->block_begin(), be = L->block_end(); b != be; ++b) { - (*b)->print(Out); + if (*b) + (*b)->print(Out); + else + Out << "Printing block"; } return false; } diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp index 4e11e50..139fa38 100644 --- a/lib/Analysis/NoAliasAnalysis.cpp +++ b/lib/Analysis/NoAliasAnalysis.cpp @@ -15,6 +15,7 @@ #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/LLVMContext.h" #include "llvm/Pass.h" using namespace llvm; @@ -53,6 +54,13 @@ namespace { bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override { return false; } + Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, + ModRefResult &Mask) override { + Mask = ModRef; + return Location(CS.getArgument(ArgIdx), UnknownSize, + CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)); + } + ModRefResult getModRefInfo(ImmutableCallSite CS, const Location &Loc) override { return ModRef; diff --git a/lib/Analysis/RegionPass.cpp b/lib/Analysis/RegionPass.cpp index 3c7798f..71de144 100644 --- a/lib/Analysis/RegionPass.cpp +++ b/lib/Analysis/RegionPass.cpp @@ -195,8 +195,12 @@ public: bool runOnRegion(Region *R, RGPassManager &RGM) override { Out << Banner; - for (const auto &BB : R->blocks()) - BB->print(Out); + for (const auto &BB : R->blocks()) { + if (BB) + BB->print(Out); + else + Out << "Printing Block"; + } return false; } diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 42a7aa2..06dbde5 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -7216,6 +7216,15 @@ public: cast(Zero)->getValue(); Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); + if (Remainder->isZero()) { + // The Quotient is obtained by replacing Denominator by 1 in Numerator. + RewriteMap[cast(Denominator)->getValue()] = + cast(One)->getValue(); + Quotient = + SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); + return; + } + // Quotient is (Numerator - Remainder) divided by Denominator. const SCEV *Q, *R; const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); @@ -7356,7 +7365,7 @@ const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { if (StoreInst *Store = dyn_cast(Inst)) Ty = Store->getValueOperand()->getType(); else if (LoadInst *Load = dyn_cast(Inst)) - Ty = Load->getPointerOperand()->getType(); + Ty = Load->getType(); else return nullptr; @@ -7370,7 +7379,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl &Terms, SmallVectorImpl &Sizes, const SCEV *ElementSize) const { - if (Terms.size() < 1) + if (Terms.size() < 1 || !ElementSize) return; // Early return when Terms do not contain parameters: we do not delinearize diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index b507043..8c75b0d 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -16,6 +16,7 @@ #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" +#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" @@ -1706,7 +1707,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, // Fold constant phis. They may be congruent to other constant phis and // would confuse the logic below that expects proper IVs. - if (Value *V = Phi->hasConstantValue()) { + if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT)) { Phi->replaceAllUsesWith(V); DeadInsts.push_back(Phi); ++NumElim; diff --git a/lib/Analysis/ScalarEvolutionNormalization.cpp b/lib/Analysis/ScalarEvolutionNormalization.cpp index e9db295..3ccefb0 100644 --- a/lib/Analysis/ScalarEvolutionNormalization.cpp +++ b/lib/Analysis/ScalarEvolutionNormalization.cpp @@ -241,7 +241,7 @@ TransformSubExpr(const SCEV *S, Instruction *User, Value *OperandValToReplace) { } /// Top level driver for transforming an expression DAG into its requested -/// post-inc form (either "Normalized" or "Denormalized". +/// post-inc form (either "Normalized" or "Denormalized"). const SCEV *llvm::TransformForPostIncUse(TransformKind Kind, const SCEV *S, Instruction *User, diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 4f48753..5264745 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -188,7 +188,8 @@ static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, KnownOne.setBit(BitWidth - 1); } -void llvm::computeKnownBitsLoad(const MDNode &Ranges, APInt &KnownZero) { +void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, + APInt &KnownZero) { unsigned BitWidth = KnownZero.getBitWidth(); unsigned NumRanges = Ranges.getNumOperands() / 2; assert(NumRanges >= 1); @@ -338,7 +339,7 @@ void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, default: break; case Instruction::Load: if (MDNode *MD = cast(I)->getMetadata(LLVMContext::MD_range)) - computeKnownBitsLoad(*MD, KnownZero); + computeKnownBitsFromRangeMetadata(*MD, KnownZero); break; case Instruction::And: { // If either the LHS or the RHS are Zero, the result is zero. @@ -733,6 +734,12 @@ void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, break; } case Instruction::Call: + case Instruction::Invoke: + if (MDNode *MD = cast(I)->getMetadata(LLVMContext::MD_range)) + computeKnownBitsFromRangeMetadata(*MD, KnownZero); + // If a range metadata is attached to this IntrinsicInst, intersect the + // explicit range specified by the metadata and the implicit range of + // the intrinsic. if (IntrinsicInst *II = dyn_cast(I)) { switch (II->getIntrinsicID()) { default: break; @@ -742,16 +749,16 @@ void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, // If this call is undefined for 0, the result will be less than 2^n. if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) LowBits -= 1; - KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); break; } case Intrinsic::ctpop: { unsigned LowBits = Log2_32(BitWidth)+1; - KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); break; } case Intrinsic::x86_sse42_crc32_64_64: - KnownZero = APInt::getHighBitsSet(64, 32); + KnownZero |= APInt::getHighBitsSet(64, 32); break; } } @@ -1977,7 +1984,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V, return true; case Instruction::UDiv: case Instruction::URem: - // x / y is undefined if y == 0, but calcuations like x / 3 are safe. + // x / y is undefined if y == 0, but calculations like x / 3 are safe. return isKnownNonZero(Inst->getOperand(1), TD); case Instruction::SDiv: case Instruction::SRem: { @@ -2000,12 +2007,12 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V, // Speculative load may create a race that did not exist in the source. LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) return false; - return LI->getPointerOperand()->isDereferenceablePointer(); + return LI->getPointerOperand()->isDereferenceablePointer(TD); } case Instruction::Call: { if (const IntrinsicInst *II = dyn_cast(Inst)) { switch (II->getIntrinsicID()) { - // These synthetic intrinsics have no side-effects, and just mark + // These synthetic intrinsics have no side-effects and just mark // information about their operands. // FIXME: There are other no-op synthetic instructions that potentially // should be considered at least *safe* to speculate... -- cgit v1.1