aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnold Schwaighofer <aschwaighofer@apple.com>2013-09-30 15:28:56 +0000
committerArnold Schwaighofer <aschwaighofer@apple.com>2013-09-30 15:28:56 +0000
commitd42730dc712026cbfb1322a979e0ac72cd31a19e (patch)
tree191050f59933595fe9aeb6b77193c74686c694e3
parenta5b9cd1c68eda914ffcf133228824ca58e1ba518 (diff)
downloadexternal_llvm-d42730dc712026cbfb1322a979e0ac72cd31a19e.zip
external_llvm-d42730dc712026cbfb1322a979e0ac72cd31a19e.tar.gz
external_llvm-d42730dc712026cbfb1322a979e0ac72cd31a19e.tar.bz2
IfConverter: Use TargetSchedule for instruction latencies
For targets that have instruction itineraries this means no change. Targets that move over to the new schedule model will use be able the new schedule module for instruction latencies in the if-converter (the logic is such that if there is no itineary we will use the new sched model for the latencies). Before, we queried "TTI->getInstructionLatency()" for the instruction latency and the extra prediction cost. Now, we query the TargetSchedule abstraction for the instruction latency and TargetInstrInfo for the extra predictation cost. The TargetSchedule abstraction will internally call "TTI->getInstructionLatency" if an itinerary exists, otherwise it will use the new schedule model. ATTENTION: Out of tree targets! (I will also send out an email later to LLVMDev) This means, if your target implements unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr *MI, unsigned *PredCost); and returns a value for "PredCost", you now also need to implement unsigned getPredictationCost(const MachineInstr *MI); (if your target uses the IfConversion.cpp pass) radar://15077010 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191671 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/TargetSchedule.h8
-rw-r--r--include/llvm/Target/TargetInstrInfo.h2
-rw-r--r--lib/CodeGen/IfConversion.cpp20
-rw-r--r--lib/CodeGen/TargetInstrInfo.cpp4
-rw-r--r--lib/CodeGen/TargetSchedule.cpp7
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp18
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.h2
-rw-r--r--lib/Target/R600/R600InstrInfo.cpp4
-rw-r--r--lib/Target/R600/R600InstrInfo.h2
9 files changed, 57 insertions, 10 deletions
diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h
index f2adcf8..8ef26b7 100644
--- a/include/llvm/CodeGen/TargetSchedule.h
+++ b/include/llvm/CodeGen/TargetSchedule.h
@@ -152,7 +152,13 @@ public:
/// Compute and return the expected latency of this instruction independent of
/// a particular use. computeOperandLatency is the prefered API, but this is
/// occasionally useful to help estimate instruction cost.
- unsigned computeInstrLatency(const MachineInstr *MI) const;
+ ///
+ /// If UseDefaultDefLatency is false and no new machine sched model is
+ /// present this method falls back to TII->getInstrLatency with an empty
+ /// instruction itinerary (this is so we preserve the previous behavior of the
+ /// if converter after moving it to TargetSchedModel).
+ unsigned computeInstrLatency(const MachineInstr *MI,
+ bool UseDefaultDefLatency = true) const;
/// \brief Output dependency latency of a pair of defs of the same register.
///
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index d92ad42..b8599da 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -823,6 +823,8 @@ public:
const MachineInstr *MI,
unsigned *PredCost = 0) const;
+ virtual unsigned getPredicationCost(const MachineInstr *MI) const;
+
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index 418994d..597a237 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -31,6 +32,8 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
using namespace llvm;
// Hidden options for help debugging.
@@ -150,11 +153,11 @@ namespace {
/// BBAnalysis - Results of if-conversion feasibility analysis indexed by
/// basic block number.
std::vector<BBInfo> BBAnalysis;
+ TargetSchedModel SchedModel;
const TargetLoweringBase *TLI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- const InstrItineraryData *InstrItins;
const MachineBranchProbabilityInfo *MBPI;
MachineRegisterInfo *MRI;
@@ -267,7 +270,11 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getTarget().getRegisterInfo();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MRI = &MF.getRegInfo();
- InstrItins = MF.getTarget().getInstrItineraryData();
+
+ const TargetSubtargetInfo &ST =
+ MF.getTarget().getSubtarget<TargetSubtargetInfo>();
+ SchedModel.init(*ST.getSchedModel(), &ST, TII);
+
if (!TII) return false;
PreRegAlloc = MRI->isSSA();
@@ -672,9 +679,8 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
if (!isPredicated) {
BBI.NonPredSize++;
- unsigned ExtraPredCost = 0;
- unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I,
- &ExtraPredCost);
+ unsigned ExtraPredCost = TII->getPredicationCost(&*I);
+ unsigned NumCycles = SchedModel.computeInstrLatency(&*I, false);
if (NumCycles > 1)
BBI.ExtraCost += NumCycles-1;
BBI.ExtraCost2 += ExtraPredCost;
@@ -1511,8 +1517,8 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
- unsigned ExtraPredCost = 0;
- unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I, &ExtraPredCost);
+ unsigned ExtraPredCost = TII->getPredicationCost(&*I);
+ unsigned NumCycles = SchedModel.computeInstrLatency(&*I, false);
if (NumCycles > 1)
ToBBI.ExtraCost += NumCycles-1;
ToBBI.ExtraCost2 += ExtraPredCost;
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index bb8bd42..5e6162f 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -630,6 +630,10 @@ unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
return 1;
}
+unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
+ return 0;
+}
+
unsigned TargetInstrInfo::
getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
diff --git a/lib/CodeGen/TargetSchedule.cpp b/lib/CodeGen/TargetSchedule.cpp
index fd3f496..b0f2ca6 100644
--- a/lib/CodeGen/TargetSchedule.cpp
+++ b/lib/CodeGen/TargetSchedule.cpp
@@ -225,10 +225,13 @@ unsigned TargetSchedModel::computeOperandLatency(
return DefMI->isTransient() ? 0 : TII->defaultDefLatency(&SchedModel, DefMI);
}
-unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
+unsigned
+TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
+ bool UseDefaultDefLatency) const {
// For the itinerary model, fall back to the old subtarget hook.
// Allow subtargets to compute Bundle latencies outside the machine model.
- if (hasInstrItineraries() || MI->isBundle())
+ if (hasInstrItineraries() || MI->isBundle() ||
+ (!hasInstrSchedModel() && !UseDefaultDefLatency))
return TII->getInstrLatency(&InstrItins, MI);
if (hasInstrSchedModel()) {
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 321c3f4..4076e3b 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -3669,6 +3669,24 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
+unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const {
+ if (MI->isCopyLike() || MI->isInsertSubreg() ||
+ MI->isRegSequence() || MI->isImplicitDef())
+ return 0;
+
+ if (MI->isBundle())
+ return 0;
+
+ const MCInstrDesc &MCID = MI->getDesc();
+
+ if (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) {
+ // When predicated, CPSR is an additional source operand for CPSR updating
+ // instructions, this apparently increases their latencies.
+ return 1;
+ }
+ return 0;
+}
+
unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h
index 96f8637..8ab06fd 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -264,6 +264,8 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
+ unsigned getPredicationCost(const MachineInstr *MI) const;
+
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 93931e4..92aae63 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -989,6 +989,10 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
return false;
}
+unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
+ return 2;
+}
+
unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index 0d1ffc8..235a875 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -181,6 +181,8 @@ namespace llvm {
bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
+ unsigned int getPredicationCost(const MachineInstr *) const;
+
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;