aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/BasicTargetTransformInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/BasicTargetTransformInfo.cpp')
-rw-r--r--lib/CodeGen/BasicTargetTransformInfo.cpp90
1 files changed, 80 insertions, 10 deletions
diff --git a/lib/CodeGen/BasicTargetTransformInfo.cpp b/lib/CodeGen/BasicTargetTransformInfo.cpp
index c6654ec2..7f31b1a 100644
--- a/lib/CodeGen/BasicTargetTransformInfo.cpp
+++ b/lib/CodeGen/BasicTargetTransformInfo.cpp
@@ -15,13 +15,21 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "basictti"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <utility>
using namespace llvm;
+static cl::opt<unsigned>
+PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
+ cl::desc("Threshold for partial unrolling"), cl::Hidden);
+
+#define DEBUG_TYPE "basictti"
+
namespace {
class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
@@ -34,7 +42,7 @@ class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
public:
- BasicTTI() : ImmutablePass(ID), TM(0) {
+ BasicTTI() : ImmutablePass(ID), TM(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
@@ -186,7 +194,61 @@ bool BasicTTI::haveFastSqrt(Type *Ty) const {
return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
}
-void BasicTTI::getUnrollingPreferences(Loop *, UnrollingPreferences &) const { }
+void BasicTTI::getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const {
+ // This unrolling functionality is target independent, but to provide some
+ // motivation for its intended use, for x86:
+
+ // According to the Intel 64 and IA-32 Architectures Optimization Reference
+ // Manual, Intel Core models and later have a loop stream detector
+ // (and associated uop queue) that can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have no more than 4 (8 for Nehalem and later) branches
+ // taken, and none of them may be calls.
+ // - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+ // According to the Software Optimization Guide for AMD Family 15h Processors,
+ // models 30h-4fh (Steamroller and later) have a loop predictor and loop
+ // buffer which can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have fewer than 16 branches
+ // - The loop must have less than 40 uops in all executed loop branches
+
+ // The number of taken branches in a loop is hard to estimate here, and
+ // benchmarking has revealed that it is better not to be conservative when
+ // estimating the branch count. As a result, we'll ignore the branch limits
+ // until someone finds a case where it matters in practice.
+
+ unsigned MaxOps;
+ const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
+ if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+ MaxOps = PartialUnrollingThreshold;
+ else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
+ MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
+ else
+ return;
+
+ // Scan the loop: don't unroll loops with calls.
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+
+ for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+ if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+ ImmutableCallSite CS(J);
+ if (const Function *F = CS.getCalledFunction()) {
+ if (!TopTTI->isLoweredToCall(F))
+ continue;
+ }
+
+ return;
+ }
+ }
+
+ // Enable runtime and partial unrolling up to the specified size.
+ UP.Partial = UP.Runtime = true;
+ UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
+}
//===----------------------------------------------------------------------===//
//
@@ -424,12 +486,14 @@ unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
// This is a vector load that legalizes to a larger type than the vector
// itself. Unless the corresponding extending load or truncating store is
// legal, then this will scalarize.
- TargetLowering::LegalizeAction LA;
- MVT MemVT = getTLI()->getSimpleValueType(Src, true);
- if (Opcode == Instruction::Store)
- LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
- else
- LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT);
+ TargetLowering::LegalizeAction LA = TargetLowering::Expand;
+ EVT MemVT = getTLI()->getValueType(Src, true);
+ if (MemVT.isSimple() && MemVT != MVT::Other) {
+ if (Opcode == Instruction::Store)
+ LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
+ else
+ LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT());
+ }
if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
// This is a vector load/store for some illegal type that is scalarized.
@@ -484,7 +548,7 @@ unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
case Intrinsic::round: ISD = ISD::FROUND; break;
case Intrinsic::pow: ISD = ISD::FPOW; break;
case Intrinsic::fma: ISD = ISD::FMA; break;
- case Intrinsic::fmuladd: ISD = ISD::FMA; break; // FIXME: mul + add?
+ case Intrinsic::fmuladd: ISD = ISD::FMA; break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return 0;
@@ -509,6 +573,12 @@ unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
return LT.first * 2;
}
+ // If we can't lower fmuladd into an FMA estimate the cost as a floating
+ // point mul followed by an add.
+ if (IID == Intrinsic::fmuladd)
+ return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
+ TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
+
// Else, assume that we need to scalarize this intrinsic. For math builtins
// this will emit a costly libcall, adding call overhead and spills. Make it
// very expensive.