aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-10-17 20:14:58 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-10-17 20:14:58 +0000
commit281cc67b6ac794b1eb8232e6efca366d870dad43 (patch)
treeac7eadad1814ff2f989e716c1ed0bd8fd1557e9a
parent06d8c96f9e7be298f0e1372ccb8dea18fe149afa (diff)
parentbfc2d688b591c574c0cc788348c74545ce894efa (diff)
downloadexternal_llvm-281cc67b6ac794b1eb8232e6efca366d870dad43.zip
external_llvm-281cc67b6ac794b1eb8232e6efca366d870dad43.tar.gz
external_llvm-281cc67b6ac794b1eb8232e6efca366d870dad43.tar.bz2
Merge "Bring in fixes for Cortex-A53 errata + build updates."
-rw-r--r--include/llvm/CodeGen/MachineInstr.h1
-rw-r--r--include/llvm/MC/MCInstrDesc.h12
-rw-r--r--include/llvm/Target/TargetInstrInfo.h9
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.cpp19
-rw-r--r--lib/CodeGen/LiveRangeEdit.cpp2
-rw-r--r--lib/CodeGen/MachineCSE.cpp2
-rw-r--r--lib/CodeGen/MachineLICM.cpp2
-rw-r--r--lib/CodeGen/MachineSink.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp2
-rw-r--r--lib/Target/AArch64/AArch64.h2
-rw-r--r--lib/Target/AArch64/AArch64A53Fix835769.cpp240
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp694
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td8
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp45
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h2
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h2
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp20
-rw-r--r--lib/Target/AArch64/Android.mk2
-rw-r--r--lib/Target/AArch64/CMakeLists.txt2
-rw-r--r--test/CodeGen/AArch64/a57-csel.ll11
-rw-r--r--test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll323
-rw-r--r--test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll534
-rw-r--r--test/CodeGen/AArch64/remat.ll16
-rw-r--r--test/CodeGen/X86/critical-anti-dep-breaker.ll28
24 files changed, 1965 insertions, 15 deletions
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index 3c82811..1e2db7c 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -614,7 +614,6 @@ public:
/// are not marking copies from and to the same register class with this flag.
bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
// Only returns true for a bundle if all bundled instructions are cheap.
- // FIXME: This probably requires a target hook.
return hasProperty(MCID::CheapAsAMove, Type);
}
diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h
index 5896de7..101778e 100644
--- a/include/llvm/MC/MCInstrDesc.h
+++ b/include/llvm/MC/MCInstrDesc.h
@@ -451,9 +451,12 @@ public:
}
/// isRematerializable - Returns true if this instruction is a candidate for
- /// remat. This flag is deprecated, please don't use it anymore. If this
- /// flag is set, the isReallyTriviallyReMaterializable() method is called to
- /// verify the instruction is really rematable.
+ /// remat. This flag is only used in TargetInstrInfo method
+ /// isTriviallyRematerializable.
+ ///
+ /// If this flag is set, the isReallyTriviallyReMaterializable()
+ /// or isReallyTriviallyReMaterializableGeneric methods are called to verify
+ /// the instruction is really rematable.
bool isRematerializable() const {
return Flags & (1 << MCID::Rematerializable);
}
@@ -464,6 +467,9 @@ public:
/// where we would like to remat or hoist the instruction, but not if it costs
/// more than moving the instruction into the appropriate register. Note, we
/// are not marking copies from and to the same register class with this flag.
+ ///
+ /// This method could be called by interface TargetInstrInfo::isAsCheapAsAMove
+ /// for different subtargets.
bool isAsCheapAsAMove() const {
return Flags & (1 << MCID::CheapAsAMove);
}
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 87e7c14..a589d0e 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -200,6 +200,15 @@ public:
unsigned &Size, unsigned &Offset,
const TargetMachine *TM) const;
+ /// isAsCheapAsAMove - Return true if the instruction is as cheap as a move
+ /// instruction.
+ ///
+ /// Targets for different archs need to override this, and different
+ /// micro-architectures can also be finely tuned inside.
+ virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
+ return MI->isAsCheapAsAMove();
+ }
+
/// reMaterialize - Re-issue the specified 'original' instruction at the
/// specific location targeting a new destination register.
/// The register in Orig->getOperand(0).getReg() will be substituted by
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp
index d3ffcc7..d2231ec 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -94,7 +94,14 @@ void CriticalAntiDepBreaker::FinishBlock() {
void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
unsigned InsertPosIndex) {
- if (MI->isDebugValue())
+ // Kill instructions can define registers but are really nops, and there might
+ // be a real definition earlier that needs to be paired with uses dominated by
+ // this kill.
+
+ // FIXME: It may be possible to remove the isKill() restriction once PR18663
+ // has been properly fixed. There can be value in processing kills as seen in
+ // the AggressiveAntiDepBreaker class.
+ if (MI->isDebugValue() || MI->isKill())
return;
assert(Count < InsertPosIndex && "Instruction index out of expected range!");
@@ -237,6 +244,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// Update liveness.
// Proceeding upwards, registers that are defed but not used in this
// instruction are now dead.
+ assert(!MI->isKill() && "Attempting to scan a kill instruction");
if (!TII->isPredicated(MI)) {
// Predicated defs are modeled as read + write, i.e. similar to two
@@ -527,7 +535,14 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
unsigned Count = InsertPosIndex - 1;
for (MachineBasicBlock::iterator I = End, E = Begin; I != E; --Count) {
MachineInstr *MI = --I;
- if (MI->isDebugValue())
+ // Kill instructions can define registers but are really nops, and there
+ // might be a real definition earlier that needs to be paired with uses
+ // dominated by this kill.
+
+ // FIXME: It may be possible to remove the isKill() restriction once PR18663
+ // has been properly fixed. There can be value in processing kills as seen
+ // in the AggressiveAntiDepBreaker class.
+ if (MI->isDebugValue() || MI->isKill())
continue;
// Check if this instruction has a dependence on the critical path that
diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp
index 431241f..c27d630 100644
--- a/lib/CodeGen/LiveRangeEdit.cpp
+++ b/lib/CodeGen/LiveRangeEdit.cpp
@@ -135,7 +135,7 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM,
}
// If only cheap remats were requested, bail out early.
- if (cheapAsAMove && !RM.OrigMI->isAsCheapAsAMove())
+ if (cheapAsAMove && !TII.isAsCheapAsAMove(RM.OrigMI))
return false;
// Verify that all used registers are available with the same values.
diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp
index 7da439c..c2ab76e 100644
--- a/lib/CodeGen/MachineCSE.cpp
+++ b/lib/CodeGen/MachineCSE.cpp
@@ -380,7 +380,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
- if (MI->isAsCheapAsAMove()) {
+ if (TII->isAsCheapAsAMove(MI)) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
if (CSBB != BB && !CSBB->isSuccessor(BB))
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index 68d2efd..94cdab5 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -1039,7 +1039,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
/// the operand latency between its def and a use is one or less.
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
- if (MI.isAsCheapAsAMove() || MI.isCopyLike())
+ if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike())
return true;
if (!InstrItins || InstrItins->isEmpty())
return false;
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index f44e4d1..0ae495c 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -292,7 +292,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
if (!CEBCandidates.insert(std::make_pair(From, To)))
return true;
- if (!MI->isCopy() && !MI->isAsCheapAsAMove())
+ if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI))
return true;
// MI is cheap, we probably don't want to break the critical edge for it.
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 5aaeb87..65b0528 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -751,7 +751,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
IsDefCopy = true;
return false;
}
- if (!DefMI->isAsCheapAsAMove())
+ if (!TII->isAsCheapAsAMove(DefMI))
return false;
if (!TII->isTriviallyReMaterializable(DefMI, AA))
return false;
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index 1c022aa..7b52e55 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -37,6 +37,8 @@ FunctionPass *createAArch64ExpandPseudoPass();
FunctionPass *createAArch64LoadStoreOptimizationPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64AddressTypePromotionPass();
+FunctionPass *createAArch64A57FPLoadBalancing();
+FunctionPass *createAArch64A53Fix835769();
/// \brief Creates an ARM-specific Target Transformation Info pass.
ImmutablePass *
createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM);
diff --git a/lib/Target/AArch64/AArch64A53Fix835769.cpp b/lib/Target/AArch64/AArch64A53Fix835769.cpp
new file mode 100644
index 0000000..852a635
--- /dev/null
+++ b/lib/Target/AArch64/AArch64A53Fix835769.cpp
@@ -0,0 +1,240 @@
+//===-- AArch64A53Fix835769.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This pass changes code to work around Cortex-A53 erratum 835769.
+// It works around it by inserting a nop instruction in code sequences that
+// in some circumstances may trigger the erratum.
+// It inserts a nop instruction between a sequence of the following 2 classes
+// of instructions:
+// instr 1: mem-instr (including loads, stores and prefetches).
+// instr 2: non-SIMD integer multiply-accumulate writing 64-bit X registers.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-fix-cortex-a53-835769"
+
+STATISTIC(NumNopsAdded, "Number of Nops added to work around erratum 835769");
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+
+// Is the instruction a match for the instruction that comes first in the
+// sequence of instructions that can trigger the erratum?
+static bool isFirstInstructionInSequence(MachineInstr *MI) {
+ // Must return true if this instruction is a load, a store or a prefetch.
+ switch (MI->getOpcode()) {
+ case AArch64::PRFMl:
+ case AArch64::PRFMroW:
+ case AArch64::PRFMroX:
+ case AArch64::PRFMui:
+ case AArch64::PRFUMi:
+ return true;
+ default:
+ return (MI->mayLoad() || MI->mayStore());
+ }
+}
+
+// Is the instruction a match for the instruction that comes second in the
+// sequence that can trigger the erratum?
+static bool isSecondInstructionInSequence(MachineInstr *MI) {
+ // Must return true for non-SIMD integer multiply-accumulates, writing
+ // to a 64-bit register.
+ switch (MI->getOpcode()) {
+ // Erratum cannot be triggered when the destination register is 32 bits,
+ // therefore only include the following.
+ case AArch64::MSUBXrrr:
+ case AArch64::MADDXrrr:
+ case AArch64::SMADDLrrr:
+ case AArch64::SMSUBLrrr:
+ case AArch64::UMADDLrrr:
+ case AArch64::UMSUBLrrr:
+ // Erratum can only be triggered by multiply-adds, not by regular
+ // non-accumulating multiplies, i.e. when Ra=XZR='11111'
+ return MI->getOperand(3).getReg() != AArch64::XZR;
+ default:
+ return false;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AArch64A53Fix835769 : public MachineFunctionPass {
+ const AArch64InstrInfo *TII;
+
+public:
+ static char ID;
+ explicit AArch64A53Fix835769() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ const char *getPassName() const override {
+ return "Workaround A53 erratum 835769 pass";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+private:
+ bool runOnBasicBlock(MachineBasicBlock &MBB);
+};
+char AArch64A53Fix835769::ID = 0;
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+
+bool
+AArch64A53Fix835769::runOnMachineFunction(MachineFunction &F) {
+ const TargetMachine &TM = F.getTarget();
+
+ bool Changed = false;
+ DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n");
+
+ TII = TM.getSubtarget<AArch64Subtarget>().getInstrInfo();
+
+ for (auto &MBB : F) {
+ Changed |= runOnBasicBlock(MBB);
+ }
+
+ return Changed;
+}
+
+// Return the block that was fallen through to get to MBB, if any,
+// otherwise nullptr.
+static MachineBasicBlock *getBBFallenThrough(MachineBasicBlock *MBB,
+ const TargetInstrInfo *TII) {
+ // Get the previous machine basic block in the function.
+ MachineFunction::iterator MBBI = *MBB;
+
+ // Can't go off top of function.
+ if (MBBI == MBB->getParent()->begin())
+ return nullptr;
+
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ SmallVector<MachineOperand, 2> Cond;
+
+ MachineBasicBlock *PrevBB = std::prev(MBBI);
+ for (MachineBasicBlock *S : MBB->predecessors())
+ if (S == PrevBB && !TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond) &&
+ !TBB && !FBB)
+ return S;
+
+ return nullptr;
+}
+
+// Iterate through fallen through blocks trying to find a previous non-pseudo if
+// there is one, otherwise return nullptr. Only look for instructions in
+// previous blocks, not the current block, since we only use this to look at
+// previous blocks.
+static MachineInstr *getLastNonPseudo(MachineBasicBlock &MBB,
+ const TargetInstrInfo *TII) {
+ MachineBasicBlock *FMBB = &MBB;
+
+ // If there is no non-pseudo in the current block, loop back around and try
+ // the previous block (if there is one).
+ while ((FMBB = getBBFallenThrough(FMBB, TII))) {
+ for (auto I = FMBB->rbegin(), E = FMBB->rend(); I != E; ++I) {
+ if (!I->isPseudo())
+ return &*I;
+ }
+ }
+
+ // There was no previous non-pseudo in the fallen through blocks
+ return nullptr;
+}
+
+static void insertNopBeforeInstruction(MachineBasicBlock &MBB, MachineInstr* MI,
+ const TargetInstrInfo *TII) {
+ // If we are the first instruction of the block, put the NOP at the end of
+ // the previous fallthrough block
+ if (MI == &MBB.front()) {
+ MachineInstr *I = getLastNonPseudo(MBB, TII);
+ assert(I && "Expected instruction");
+ DebugLoc DL = I->getDebugLoc();
+ BuildMI(I->getParent(), DL, TII->get(AArch64::HINT)).addImm(0);
+ }
+ else {
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(MBB, MI, DL, TII->get(AArch64::HINT)).addImm(0);
+ }
+
+ ++NumNopsAdded;
+}
+
+bool
+AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+ DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n");
+
+ // First, scan the basic block, looking for a sequence of 2 instructions
+ // that match the conditions under which the erratum may trigger.
+
+ // List of terminating instructions in matching sequences
+ std::vector<MachineInstr*> Sequences;
+ unsigned Idx = 0;
+ MachineInstr *PrevInstr = nullptr;
+
+ // Try and find the last non-pseudo instruction in any fallen through blocks,
+ // if there isn't one, then we use nullptr to represent that.
+ PrevInstr = getLastNonPseudo(MBB, TII);
+
+ for (auto &MI : MBB) {
+ MachineInstr *CurrInstr = &MI;
+ DEBUG(dbgs() << " Examining: " << MI);
+ if (PrevInstr) {
+ DEBUG(dbgs() << " PrevInstr: " << *PrevInstr
+ << " CurrInstr: " << *CurrInstr
+ << " isFirstInstructionInSequence(PrevInstr): "
+ << isFirstInstructionInSequence(PrevInstr) << "\n"
+ << " isSecondInstructionInSequence(CurrInstr): "
+ << isSecondInstructionInSequence(CurrInstr) << "\n");
+ if (isFirstInstructionInSequence(PrevInstr) &&
+ isSecondInstructionInSequence(CurrInstr)) {
+ DEBUG(dbgs() << " ** pattern found at Idx " << Idx << "!\n");
+ Sequences.push_back(CurrInstr);
+ }
+ }
+ if (!CurrInstr->isPseudo())
+ PrevInstr = CurrInstr;
+ ++Idx;
+ }
+
+ DEBUG(dbgs() << "Scan complete, "<< Sequences.size()
+ << " occurences of pattern found.\n");
+
+ // Then update the basic block, inserting nops between the detected sequences.
+ for (auto &MI : Sequences) {
+ Changed = true;
+ insertNopBeforeInstruction(MBB, MI, TII);
+ }
+
+ return Changed;
+}
+
+// Factory function used by AArch64TargetMachine to add the pass to
+// the passmanager.
+FunctionPass *llvm::createAArch64A53Fix835769() {
+ return new AArch64A53Fix835769();
+}
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
new file mode 100644
index 0000000..195a48e
--- /dev/null
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -0,0 +1,694 @@
+//===-- AArch64A57FPLoadBalancing.cpp - Balance FP ops statically on A57---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// For best-case performance on Cortex-A57, we should try to use a balanced
+// mix of odd and even D-registers when performing a critical sequence of
+// independent, non-quadword FP/ASIMD floating-point multiply or
+// multiply-accumulate operations.
+//
+// This pass attempts to detect situations where the register allocation may
+// adversely affect this load balancing and to change the registers used so as
+// to better utilize the CPU.
+//
+// Ideally we'd just take each multiply or multiply-accumulate in turn and
+// allocate it alternating even or odd registers. However, multiply-accumulates
+// are most efficiently performed in the same functional unit as their
+// accumulation operand. Therefore this pass tries to find maximal sequences
+// ("Chains") of multiply-accumulates linked via their accumulation operand,
+// and assign them all the same "color" (oddness/evenness).
+//
+// This optimization affects S-register and D-register floating point
+// multiplies and FMADD/FMAs, as well as vector (floating point only) muls and
+// FMADD/FMA. Q register instructions (and 128-bit vector instructions) are
+// not affected.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <list>
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-a57-fp-load-balancing"
+
+// Enforce the algorithm to use the scavenged register even when the original
+// destination register is the correct color. Used for testing.
+static cl::opt<bool>
+TransformAll("aarch64-a57-fp-load-balancing-force-all",
+ cl::desc("Always modify dest registers regardless of color"),
+ cl::init(false), cl::Hidden);
+
+// Never use the balance information obtained from chains - return a specific
+// color always. Used for testing.
+static cl::opt<unsigned>
+OverrideBalance("aarch64-a57-fp-load-balancing-override",
+ cl::desc("Ignore balance information, always return "
+ "(1: Even, 2: Odd)."),
+ cl::init(0), cl::Hidden);
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+
+// Is the instruction a type of multiply on 64-bit (or 32-bit) FPRs?
+static bool isMul(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AArch64::FMULSrr:
+ case AArch64::FNMULSrr:
+ case AArch64::FMULDrr:
+ case AArch64::FNMULDrr:
+
+ case AArch64::FMULv2f32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Is the instruction a type of FP multiply-accumulate on 64-bit (or 32-bit) FPRs?
+static bool isMla(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AArch64::FMSUBSrrr:
+ case AArch64::FMADDSrrr:
+ case AArch64::FNMSUBSrrr:
+ case AArch64::FNMADDSrrr:
+ case AArch64::FMSUBDrrr:
+ case AArch64::FMADDDrrr:
+ case AArch64::FNMSUBDrrr:
+ case AArch64::FNMADDDrrr:
+
+ case AArch64::FMLAv2f32:
+ case AArch64::FMLSv2f32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// A "color", which is either even or odd. Yes, these aren't really colors
+/// but the algorithm is conceptually doing two-color graph coloring.
+enum class Color { Even, Odd };
+static const char *ColorNames[2] = { "Even", "Odd" };
+
+class Chain;
+
+class AArch64A57FPLoadBalancing : public MachineFunctionPass {
+ const AArch64InstrInfo *TII;
+ MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+ RegisterClassInfo RCI;
+
+public:
+ static char ID;
+ explicit AArch64A57FPLoadBalancing() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ const char *getPassName() const override {
+ return "A57 FP Anti-dependency breaker";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+private:
+ bool runOnBasicBlock(MachineBasicBlock &MBB);
+ bool colorChainSet(std::vector<Chain*> GV, MachineBasicBlock &MBB,
+ int &Balance);
+ bool colorChain(Chain *G, Color C, MachineBasicBlock &MBB);
+ int scavengeRegister(Chain *G, Color C, MachineBasicBlock &MBB);
+ void scanInstruction(MachineInstr *MI, unsigned Idx,
+ std::map<unsigned, Chain*> &Chains,
+ std::set<Chain*> &ChainSet);
+ void maybeKillChain(MachineOperand &MO, unsigned Idx,
+ std::map<unsigned, Chain*> &RegChains);
+ Color getColor(unsigned Register);
+ Chain *getAndEraseNext(Color PreferredColor, std::vector<Chain*> &L);
+};
+char AArch64A57FPLoadBalancing::ID = 0;
+
+/// A Chain is a sequence of instructions that are linked together by
+/// an accumulation operand. For example:
+///
+/// fmul d0<def>, ?
+/// fmla d1<def>, ?, ?, d0<kill>
+/// fmla d2<def>, ?, ?, d1<kill>
+///
+/// There may be other instructions interleaved in the sequence that
+/// do not belong to the chain. These other instructions must not use
+/// the "chain" register at any point.
+///
+/// We currently only support chains where the "chain" operand is killed
+/// at each link in the chain for simplicity.
+/// A chain has three important instructions - Start, Last and Kill.
+/// * The start instruction is the first instruction in the chain.
+/// * Last is the final instruction in the chain.
+/// * Kill may or may not be defined. If defined, Kill is the instruction
+/// where the outgoing value of the Last instruction is killed.
+/// This information is important as if we know the outgoing value is
+/// killed with no intervening uses, we can safely change its register.
+///
+/// Without a kill instruction, we must assume the outgoing value escapes
+/// beyond our model and either must not change its register or must
+/// create a fixup FMOV to keep the old register value consistent.
+///
+class Chain {
+public:
+ /// The important (marker) instructions.
+ MachineInstr *StartInst, *LastInst, *KillInst;
+ /// The index, from the start of the basic block, that each marker
+ /// appears. These are stored so we can do quick interval tests.
+ unsigned StartInstIdx, LastInstIdx, KillInstIdx;
+ /// All instructions in the chain.
+ std::set<MachineInstr*> Insts;
+ /// True if KillInst cannot be modified. If this is true,
+ /// we cannot change LastInst's outgoing register.
+ /// This will be true for tied values and regmasks.
+ bool KillIsImmutable;
+ /// The "color" of LastInst. This will be the preferred chain color,
+ /// as changing intermediate nodes is easy but changing the last
+ /// instruction can be more tricky.
+ Color LastColor;
+
+ Chain(MachineInstr *MI, unsigned Idx, Color C) :
+ StartInst(MI), LastInst(MI), KillInst(NULL),
+ StartInstIdx(Idx), LastInstIdx(Idx), KillInstIdx(0),
+ LastColor(C) {
+ Insts.insert(MI);
+ }
+
+ /// Add a new instruction into the chain. The instruction's dest operand
+ /// has the given color.
+ void add(MachineInstr *MI, unsigned Idx, Color C) {
+ LastInst = MI;
+ LastInstIdx = Idx;
+ LastColor = C;
+
+ Insts.insert(MI);
+ }
+
+ /// Return true if MI is a member of the chain.
+ bool contains(MachineInstr *MI) { return Insts.count(MI) > 0; }
+
+ /// Return the number of instructions in the chain.
+ unsigned size() const {
+ return Insts.size();
+ }
+
+ /// Inform the chain that its last active register (the dest register of
+ /// LastInst) is killed by MI with no intervening uses or defs.
+ void setKill(MachineInstr *MI, unsigned Idx, bool Immutable) {
+ KillInst = MI;
+ KillInstIdx = Idx;
+ KillIsImmutable = Immutable;
+ }
+
+ /// Return the first instruction in the chain.
+ MachineInstr *getStart() const { return StartInst; }
+ /// Return the last instruction in the chain.
+ MachineInstr *getLast() const { return LastInst; }
+ /// Return the "kill" instruction (as set with setKill()) or NULL.
+ MachineInstr *getKill() const { return KillInst; }
+ /// Return an instruction that can be used as an iterator for the end
+ /// of the chain. This is the maximum of KillInst (if set) and LastInst.
+ MachineInstr *getEnd() const {
+ return ++MachineBasicBlock::iterator(KillInst ? KillInst : LastInst);
+ }
+
+ /// Can the Kill instruction (assuming one exists) be modified?
+ bool isKillImmutable() const { return KillIsImmutable; }
+
+ /// Return the preferred color of this chain.
+ Color getPreferredColor() {
+ if (OverrideBalance != 0)
+ return OverrideBalance == 1 ? Color::Even : Color::Odd;
+ return LastColor;
+ }
+
+ /// Return true if this chain (StartInst..KillInst) overlaps with Other.
+ bool rangeOverlapsWith(Chain *Other) {
+ unsigned End = KillInst ? KillInstIdx : LastInstIdx;
+ unsigned OtherEnd = Other->KillInst ?
+ Other->KillInstIdx : Other->LastInstIdx;
+
+ return StartInstIdx <= OtherEnd && Other->StartInstIdx <= End;
+ }
+
+ /// Return true if this chain starts before Other.
+ bool startsBefore(Chain *Other) {
+ return StartInstIdx < Other->StartInstIdx;
+ }
+
+ /// Return true if the group will require a fixup MOV at the end.
+ bool requiresFixup() const {
+ return (getKill() && isKillImmutable()) || !getKill();
+ }
+
+ /// Return a simple string representation of the chain.
+ std::string str() const {
+ std::string S;
+ raw_string_ostream OS(S);
+
+ OS << "{";
+ StartInst->print(OS, NULL, true);
+ OS << " -> ";
+ LastInst->print(OS, NULL, true);
+ if (KillInst) {
+ OS << " (kill @ ";
+ KillInst->print(OS, NULL, true);
+ OS << ")";
+ }
+ OS << "}";
+
+ return OS.str();
+ }
+
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+
+bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
+ bool Changed = false;
+ DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n");
+
+ const TargetMachine &TM = F.getTarget();
+ MRI = &F.getRegInfo();
+ TRI = F.getRegInfo().getTargetRegisterInfo();
+ TII = TM.getSubtarget<AArch64Subtarget>().getInstrInfo();
+ RCI.runOnMachineFunction(F);
+
+ for (auto &MBB : F) {
+ Changed |= runOnBasicBlock(MBB);
+ }
+
+ return Changed;
+}
+
+bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+ DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n");
+
+ // First, scan the basic block producing a set of chains.
+
+ // The currently "active" chains - chains that can be added to and haven't
+ // been killed yet. This is keyed by register - all chains can only have one
+ // "link" register between each inst in the chain.
+ std::map<unsigned, Chain*> ActiveChains;
+ std::set<Chain*> AllChains;
+ unsigned Idx = 0;
+ for (auto &MI : MBB)
+ scanInstruction(&MI, Idx++, ActiveChains, AllChains);
+
+ DEBUG(dbgs() << "Scan complete, "<< AllChains.size() << " chains created.\n");
+
+ // Group the chains into disjoint sets based on their liveness range. This is
+ // a poor-man's version of graph coloring. Ideally we'd create an interference
+ // graph and perform full-on graph coloring on that, but;
+ // (a) That's rather heavyweight for only two colors.
+ // (b) We expect multiple disjoint interference regions - in practice the live
+ // range of chains is quite small and they are clustered between loads
+ // and stores.
+ EquivalenceClasses<Chain*> EC;
+ for (auto *I : AllChains)
+ EC.insert(I);
+
+ for (auto *I : AllChains) {
+ for (auto *J : AllChains) {
+ if (I != J && I->rangeOverlapsWith(J))
+ EC.unionSets(I, J);
+ }
+ }
+ DEBUG(dbgs() << "Created " << EC.getNumClasses() << " disjoint sets.\n");
+
+ // Now we assume that every member of an equivalence class interferes
+ // with every other member of that class, and with no members of other classes.
+
+ // Convert the EquivalenceClasses to a simpler set of sets.
+ std::vector<std::vector<Chain*> > V;
+ for (auto I = EC.begin(), E = EC.end(); I != E; ++I) {
+ std::vector<Chain*> Cs(EC.member_begin(I), EC.member_end());
+ if (Cs.empty()) continue;
+ V.push_back(Cs);
+ }
+
+ // Now we have a set of sets, order them by start address so
+ // we can iterate over them sequentially.
+ std::sort(V.begin(), V.end(),
+ [](const std::vector<Chain*> &A,
+ const std::vector<Chain*> &B) {
+ return A.front()->startsBefore(B.front());
+ });
+
+ // As we only have two colors, we can track the global (BB-level) balance of
+ // odds versus evens. We aim to keep this near zero to keep both execution
+ // units fed.
+ // Positive means we're even-heavy, negative we're odd-heavy.
+ //
+ // FIXME: If chains have interdependencies, for example:
+ // mul r0, r1, r2
+ // mul r3, r0, r1
+ // We do not model this and may color each one differently, assuming we'll
+ // get ILP when we obviously can't. This hasn't been seen to be a problem
+ // in practice so far, so we simplify the algorithm by ignoring it.
+ int Parity = 0;
+
+ for (auto &I : V)
+ Changed |= colorChainSet(I, MBB, Parity);
+
+ for (auto *C : AllChains)
+ delete C;
+
+ return Changed;
+}
+
+Chain *AArch64A57FPLoadBalancing::getAndEraseNext(Color PreferredColor,
+ std::vector<Chain*> &L) {
+ if (L.empty())
+ return nullptr;
+
+ // We try and get the best candidate from L to color next, given that our
+ // preferred color is "PreferredColor". L is ordered from larger to smaller
+ // chains. It is beneficial to color the large chains before the small chains,
+ // but if we can't find a chain of the maximum length with the preferred color,
+ // we fuzz the size and look for slightly smaller chains before giving up and
+ // returning a chain that must be recolored.
+
+ // FIXME: Does this need to be configurable?
+ const unsigned SizeFuzz = 1;
+ unsigned MinSize = L.front()->size() - SizeFuzz;
+ for (auto I = L.begin(), E = L.end(); I != E; ++I) {
+ if ((*I)->size() <= MinSize) {
+ // We've gone past the size limit. Return the previous item.
+ Chain *Ch = *--I;
+ L.erase(I);
+ return Ch;
+ }
+
+ if ((*I)->getPreferredColor() == PreferredColor) {
+ Chain *Ch = *I;
+ L.erase(I);
+ return Ch;
+ }
+ }
+
+ // Bailout case - just return the first item.
+ Chain *Ch = L.front();
+ L.erase(L.begin());
+ return Ch;
+}
+
+bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
+ MachineBasicBlock &MBB,
+ int &Parity) {
+ bool Changed = false;
+ DEBUG(dbgs() << "colorChainSet(): #sets=" << GV.size() << "\n");
+
+ // Sort by descending size order so that we allocate the most important
+ // sets first.
+ // Tie-break equivalent sizes by sorting chains requiring fixups before
+ // those without fixups. The logic here is that we should look at the
+ // chains that we cannot change before we look at those we can,
+ // so the parity counter is updated and we know what color we should
+ // change them to!
+ std::sort(GV.begin(), GV.end(), [](const Chain *G1, const Chain *G2) {
+ if (G1->size() != G2->size())
+ return G1->size() > G2->size();
+ return G1->requiresFixup() > G2->requiresFixup();
+ });
+
+ Color PreferredColor = Parity < 0 ? Color::Even : Color::Odd;
+ while (Chain *G = getAndEraseNext(PreferredColor, GV)) {
+ // Start off by assuming we'll color to our own preferred color.
+ Color C = PreferredColor;
+ if (Parity == 0)
+ // But if we really don't care, use the chain's preferred color.
+ C = G->getPreferredColor();
+
+ DEBUG(dbgs() << " - Parity=" << Parity << ", Color="
+ << ColorNames[(int)C] << "\n");
+
+ // If we'll need a fixup FMOV, don't bother. Testing has shown that this
+ // happens infrequently and when it does it has at least a 50% chance of
+ // slowing code down instead of speeding it up.
+ if (G->requiresFixup() && C != G->getPreferredColor()) {
+ C = G->getPreferredColor();
+ DEBUG(dbgs() << " - " << G->str() << " - not worthwhile changing; "
+ "color remains " << ColorNames[(int)C] << "\n");
+ }
+
+ Changed |= colorChain(G, C, MBB);
+
+ Parity += (C == Color::Even) ? G->size() : -G->size();
+ PreferredColor = Parity < 0 ? Color::Even : Color::Odd;
+ }
+
+ return Changed;
+}
+
+int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
+ MachineBasicBlock &MBB) {
+ RegScavenger RS;
+ RS.enterBasicBlock(&MBB);
+ RS.forward(MachineBasicBlock::iterator(G->getStart()));
+
+ // Can we find an appropriate register that is available throughout the life
+ // of the chain?
+ unsigned RegClassID = G->getStart()->getDesc().OpInfo[0].RegClass;
+ BitVector AvailableRegs = RS.getRegsAvailable(TRI->getRegClass(RegClassID));
+ for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd();
+ I != E; ++I) {
+ RS.forward(I);
+ AvailableRegs &= RS.getRegsAvailable(TRI->getRegClass(RegClassID));
+
+ // Remove any registers clobbered by a regmask.
+ for (auto J : I->operands()) {
+ if (J.isRegMask())
+ AvailableRegs.clearBitsNotInMask(J.getRegMask());
+ }
+ }
+
+ // Make sure we allocate in-order, to get the cheapest registers first.
+ auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID));
+ for (auto Reg : Ord) {
+ if (!AvailableRegs[Reg])
+ continue;
+ if ((C == Color::Even && (Reg % 2) == 0) ||
+ (C == Color::Odd && (Reg % 2) == 1))
+ return Reg;
+ }
+
+ return -1;
+}
+
+bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
+ MachineBasicBlock &MBB) {
+ bool Changed = false;
+ DEBUG(dbgs() << " - colorChain(" << G->str() << ", "
+ << ColorNames[(int)C] << ")\n");
+
+ // Try and obtain a free register of the right class. Without a register
+ // to play with we cannot continue.
+ int Reg = scavengeRegister(G, C, MBB);
+ if (Reg == -1) {
+ DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n");
+ return false;
+ }
+ DEBUG(dbgs() << " - Scavenged register: " << TRI->getName(Reg) << "\n");
+
+ std::map<unsigned, unsigned> Substs;
+ for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd();
+ I != E; ++I) {
+ if (!G->contains(I) &&
+ (&*I != G->getKill() || G->isKillImmutable()))
+ continue;
+
+ // I is a member of G, or I is a mutable instruction that kills G.
+
+ std::vector<unsigned> ToErase;
+ for (auto &U : I->operands()) {
+ if (U.isReg() && U.isUse() && Substs.find(U.getReg()) != Substs.end()) {
+ unsigned OrigReg = U.getReg();
+ U.setReg(Substs[OrigReg]);
+ if (U.isKill())
+ // Don't erase straight away, because there may be other operands
+ // that also reference this substitution!
+ ToErase.push_back(OrigReg);
+ } else if (U.isRegMask()) {
+ for (auto J : Substs) {
+ if (U.clobbersPhysReg(J.first))
+ ToErase.push_back(J.first);
+ }
+ }
+ }
+ // Now it's safe to remove the substs identified earlier.
+ for (auto J : ToErase)
+ Substs.erase(J);
+
+ // Only change the def if this isn't the last instruction.
+ if (&*I != G->getKill()) {
+ MachineOperand &MO = I->getOperand(0);
+
+ bool Change = TransformAll || getColor(MO.getReg()) != C;
+ if (G->requiresFixup() && &*I == G->getLast())
+ Change = false;
+
+ if (Change) {
+ Substs[MO.getReg()] = Reg;
+ MO.setReg(Reg);
+ MRI->setPhysRegUsed(Reg);
+
+ Changed = true;
+ }
+ }
+ }
+ assert(Substs.size() == 0 && "No substitutions should be left active!");
+
+ if (G->getKill()) {
+ DEBUG(dbgs() << " - Kill instruction seen.\n");
+ } else {
+ // We didn't have a kill instruction, but we didn't seem to need to change
+ // the destination register anyway.
+ DEBUG(dbgs() << " - Destination register not changed.\n");
+ }
+ return Changed;
+}
+
+void AArch64A57FPLoadBalancing::
+scanInstruction(MachineInstr *MI, unsigned Idx,
+ std::map<unsigned, Chain*> &ActiveChains,
+ std::set<Chain*> &AllChains) {
+ // Inspect "MI", updating ActiveChains and AllChains.
+
+ if (isMul(MI)) {
+
+ for (auto &I : MI->operands())
+ maybeKillChain(I, Idx, ActiveChains);
+
+ // Create a new chain. Multiplies don't require forwarding so can go on any
+ // unit.
+ unsigned DestReg = MI->getOperand(0).getReg();
+
+ DEBUG(dbgs() << "New chain started for register "
+ << TRI->getName(DestReg) << " at " << *MI);
+
+ Chain *G = new Chain(MI, Idx, getColor(DestReg));
+ ActiveChains[DestReg] = G;
+ AllChains.insert(G);
+
+ } else if (isMla(MI)) {
+
+ // It is beneficial to keep MLAs on the same functional unit as their
+ // accumulator operand.
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned AccumReg = MI->getOperand(3).getReg();
+
+ maybeKillChain(MI->getOperand(1), Idx, ActiveChains);
+ maybeKillChain(MI->getOperand(2), Idx, ActiveChains);
+ if (DestReg != AccumReg)
+ maybeKillChain(MI->getOperand(0), Idx, ActiveChains);
+
+ if (ActiveChains.find(AccumReg) != ActiveChains.end()) {
+ DEBUG(dbgs() << "Chain found for accumulator register "
+ << TRI->getName(AccumReg) << " in MI " << *MI);
+
+ // For simplicity we only chain together sequences of MULs/MLAs where the
+ // accumulator register is killed on each instruction. This means we don't
+ // need to track other uses of the registers we want to rewrite.
+ //
+ // FIXME: We could extend to handle the non-kill cases for more coverage.
+ if (MI->getOperand(3).isKill()) {
+ // Add to chain.
+ DEBUG(dbgs() << "Instruction was successfully added to chain.\n");
+ ActiveChains[AccumReg]->add(MI, Idx, getColor(DestReg));
+ // Handle cases where the destination is not the same as the accumulator.
+ ActiveChains[DestReg] = ActiveChains[AccumReg];
+ return;
+ }
+
+ DEBUG(dbgs() << "Cannot add to chain because accumulator operand wasn't "
+ << "marked <kill>!\n");
+ maybeKillChain(MI->getOperand(3), Idx, ActiveChains);
+ }
+
+ DEBUG(dbgs() << "Creating new chain for dest register "
+ << TRI->getName(DestReg) << "\n");
+ Chain *G = new Chain(MI, Idx, getColor(DestReg));
+ ActiveChains[DestReg] = G;
+ AllChains.insert(G);
+
+ } else {
+
+ // Non-MUL or MLA instruction. Invalidate any chain in the uses or defs
+ // lists.
+ for (auto &I : MI->operands())
+ maybeKillChain(I, Idx, ActiveChains);
+
+ }
+}
+
+void AArch64A57FPLoadBalancing::
+maybeKillChain(MachineOperand &MO, unsigned Idx,
+ std::map<unsigned, Chain*> &ActiveChains) {
+ // Given an operand and the set of active chains (keyed by register),
+ // determine if a chain should be ended and remove from ActiveChains.
+ MachineInstr *MI = MO.getParent();
+
+ if (MO.isReg()) {
+
+ // If this is a KILL of a current chain, record it.
+ if (MO.isKill() && ActiveChains.find(MO.getReg()) != ActiveChains.end()) {
+ DEBUG(dbgs() << "Kill seen for chain " << TRI->getName(MO.getReg())
+ << "\n");
+ ActiveChains[MO.getReg()]->setKill(MI, Idx, /*Immutable=*/MO.isTied());
+ }
+ ActiveChains.erase(MO.getReg());
+
+ } else if (MO.isRegMask()) {
+
+ for (auto I = ActiveChains.begin(), E = ActiveChains.end();
+ I != E; ++I) {
+ if (MO.clobbersPhysReg(I->first)) {
+ DEBUG(dbgs() << "Kill (regmask) seen for chain "
+ << TRI->getName(I->first) << "\n");
+ I->second->setKill(MI, Idx, /*Immutable=*/true);
+ ActiveChains.erase(I);
+ }
+ }
+
+ }
+}
+
+Color AArch64A57FPLoadBalancing::getColor(unsigned Reg) {
+ if ((TRI->getEncodingValue(Reg) % 2) == 0)
+ return Color::Even;
+ else
+ return Color::Odd;
+}
+
+// Factory function used by AArch64TargetMachine to add the pass to the passmanager.
+FunctionPass *llvm::createAArch64A57FPLoadBalancing() {
+ return new AArch64A57FPLoadBalancing();
+}
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 5007172..4876c7d 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -1624,7 +1624,7 @@ class AddSubRegAlias<string asm, Instruction inst, RegisterClass dstRegtype,
multiclass AddSub<bit isSub, string mnemonic,
SDPatternOperator OpNode = null_frag> {
- let hasSideEffects = 0 in {
+ let hasSideEffects = 0, isReMaterializable = 1, isAsCheapAsAMove = 1 in {
// Add/Subtract immediate
def Wri : BaseAddSubImm<isSub, 0, GPR32sp, GPR32sp, addsub_shifted_imm32,
mnemonic, OpNode> {
@@ -1949,14 +1949,14 @@ class LogicalRegAlias<string asm, Instruction inst, RegisterClass regtype>
multiclass LogicalImm<bits<2> opc, string mnemonic, SDNode OpNode,
string Alias> {
- let AddedComplexity = 6 in
+ let AddedComplexity = 6, isReMaterializable = 1, isAsCheapAsAMove = 1 in
def Wri : BaseLogicalImm<opc, GPR32sp, GPR32, logical_imm32, mnemonic,
[(set GPR32sp:$Rd, (OpNode GPR32:$Rn,
logical_imm32:$imm))]> {
let Inst{31} = 0;
let Inst{22} = 0; // 64-bit version has an additional bit of immediate.
}
- let AddedComplexity = 6 in
+ let AddedComplexity = 6, isReMaterializable = 1, isAsCheapAsAMove = 1 in
def Xri : BaseLogicalImm<opc, GPR64sp, GPR64, logical_imm64, mnemonic,
[(set GPR64sp:$Rd, (OpNode GPR64:$Rn,
logical_imm64:$imm))]> {
@@ -2001,8 +2001,10 @@ class BaseLogicalRegPseudo<RegisterClass regtype, SDPatternOperator OpNode>
// Split from LogicalImm as not all instructions have both.
multiclass LogicalReg<bits<2> opc, bit N, string mnemonic,
SDPatternOperator OpNode> {
+ let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def Wrr : BaseLogicalRegPseudo<GPR32, OpNode>;
def Xrr : BaseLogicalRegPseudo<GPR64, OpNode>;
+ }
def Wrs : BaseLogicalSReg<opc, N, GPR32, logical_shifted_reg32, mnemonic,
[(set GPR32:$Rd, (OpNode GPR32:$Rn,
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index ce85b2c..b702275 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -541,6 +541,51 @@ void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
CC);
}
+// FIXME: this implementation should be micro-architecture dependent, so a
+// micro-architecture target hook should be introduced here in future.
+bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
+ if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
+ return MI->isAsCheapAsAMove();
+
+ switch (MI->getOpcode()) {
+ default:
+ return false;
+
+ // add/sub on register without shift
+ case AArch64::ADDWri:
+ case AArch64::ADDXri:
+ case AArch64::SUBWri:
+ case AArch64::SUBXri:
+ return (MI->getOperand(3).getImm() == 0);
+
+ // logical ops on immediate
+ case AArch64::ANDWri:
+ case AArch64::ANDXri:
+ case AArch64::EORWri:
+ case AArch64::EORXri:
+ case AArch64::ORRWri:
+ case AArch64::ORRXri:
+ return true;
+
+ // logical ops on register without shift
+ case AArch64::ANDWrr:
+ case AArch64::ANDXrr:
+ case AArch64::BICWrr:
+ case AArch64::BICXrr:
+ case AArch64::EONWrr:
+ case AArch64::EONXrr:
+ case AArch64::EORWrr:
+ case AArch64::EORXrr:
+ case AArch64::ORNWrr:
+ case AArch64::ORNXrr:
+ case AArch64::ORRWrr:
+ case AArch64::ORRXrr:
+ return true;
+ }
+
+ llvm_unreachable("Unknown opcode to check as cheap as a move!");
+}
+
bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const {
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index f70b82b..b27565e 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -46,6 +46,8 @@ public:
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ bool isAsCheapAsAMove(const MachineInstr *MI) const override;
+
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DstReg, unsigned &SubIdx) const override;
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 52124f6..10c646d 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -100,6 +100,8 @@ public:
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isCyclone() const { return CPUString == "cyclone"; }
+ bool isCortexA57() const { return CPUString == "cortex-a57"; }
+ bool isCortexA53() const { return CPUString == "cortex-a53"; }
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index f99b90b..722e5e7 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -59,6 +59,17 @@ EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
" to make use of cmpxchg flow-based information"),
cl::init(true));
+static cl::opt<bool>
+EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
+ cl::desc("Run early if-conversion"),
+ cl::init(true));
+
+
+static cl::opt<bool>
+EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
+ cl::desc("Work around Cortex-A53 erratum 835769"),
+ cl::init(false));
+
extern "C" void LLVMInitializeAArch64Target() {
// Register the target.
RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
@@ -176,7 +187,8 @@ bool AArch64PassConfig::addInstSelector() {
bool AArch64PassConfig::addILPOpts() {
if (EnableCCMP)
addPass(createAArch64ConditionalCompares());
- addPass(&EarlyIfConverterID);
+ if (EnableEarlyIfConversion)
+ addPass(&EarlyIfConverterID);
if (EnableStPairSuppress)
addPass(createAArch64StorePairSuppressPass());
return true;
@@ -193,6 +205,10 @@ bool AArch64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions());
+ if (TM->getOptLevel() != CodeGenOpt::None &&
+ TM->getSubtarget<AArch64Subtarget>().isCortexA57())
+ // Improve performance for some FP/SIMD code for A57.
+ addPass(createAArch64A57FPLoadBalancing());
return true;
}
@@ -206,6 +222,8 @@ bool AArch64PassConfig::addPreSched2() {
}
bool AArch64PassConfig::addPreEmitPass() {
+ if (EnableA53Fix835769)
+ addPass(createAArch64A53Fix835769());
// Relax conditional branch instructions if they're otherwise out of
// range of their destination.
addPass(createAArch64BranchRelaxation());
diff --git a/lib/Target/AArch64/Android.mk b/lib/Target/AArch64/Android.mk
index 6b29c77..d7b3317 100644
--- a/lib/Target/AArch64/Android.mk
+++ b/lib/Target/AArch64/Android.mk
@@ -15,6 +15,8 @@ aarch64_codegen_TBLGEN_TABLES := \
AArch64GenMCPseudoLowering.inc \
aarch64_codegen_SRC_FILES := \
+ AArch64A53Fix835769.cpp \
+ AArch64A57FPLoadBalancing.cpp \
AArch64AddressTypePromotion.cpp \
AArch64AdvSIMDScalarPass.cpp \
AArch64AsmPrinter.cpp \
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index 789d549..c2f0488 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -15,6 +15,7 @@ tablegen(LLVM AArch64GenDisassemblerTables.inc -gen-disassembler)
add_public_tablegen_target(AArch64CommonTableGen)
add_llvm_target(AArch64CodeGen
+ AArch64A57FPLoadBalancing.cpp
AArch64AddressTypePromotion.cpp
AArch64AdvSIMDScalarPass.cpp
AArch64AsmPrinter.cpp
@@ -25,6 +26,7 @@ add_llvm_target(AArch64CodeGen
AArch64DeadRegisterDefinitionsPass.cpp
AArch64ExpandPseudoInsts.cpp
AArch64FastISel.cpp
+ AArch64A53Fix835769.cpp
AArch64FrameLowering.cpp
AArch64ISelDAGToDAG.cpp
AArch64ISelLowering.cpp
diff --git a/test/CodeGen/AArch64/a57-csel.ll b/test/CodeGen/AArch64/a57-csel.ll
new file mode 100644
index 0000000..9d16d1a
--- /dev/null
+++ b/test/CodeGen/AArch64/a57-csel.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -mcpu=cortex-a57 -aarch64-enable-early-ifcvt=false | FileCheck %s
+
+; Check that the select is expanded into a branch sequence.
+define i64 @f(i64 %a, i64 %b, i64* %c, i64 %d, i64 %e) {
+ ; CHECK: cbz
+ %x0 = load i64* %c
+ %x1 = icmp eq i64 %x0, 0
+ %x2 = select i1 %x1, i64 %a, i64 %b
+ %x3 = add i64 %x2, %d
+ ret i64 %x3
+}
diff --git a/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
new file mode 100644
index 0000000..fb229fc
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
@@ -0,0 +1,323 @@
+; RUN: llc < %s -mcpu=cortex-a57 -aarch64-a57-fp-load-balancing-override=1 -aarch64-a57-fp-load-balancing-force-all | FileCheck %s --check-prefix CHECK --check-prefix CHECK-EVEN
+; RUN: llc < %s -mcpu=cortex-a57 -aarch64-a57-fp-load-balancing-override=2 -aarch64-a57-fp-load-balancing-force-all | FileCheck %s --check-prefix CHECK --check-prefix CHECK-ODD
+
+; Test the AArch64A57FPLoadBalancing pass. This pass relies heavily on register allocation, so
+; our test strategy is to:
+; * Force the pass to always perform register swapping even if the dest register is of the
+; correct color already (-force-all)
+; * Force the pass to ignore all hints it obtained from regalloc (-deterministic-balance),
+; and run it twice, once where it always hints odd, and once where it always hints even.
+;
+; We then use regex magic to check that in the two cases the register allocation is
+; different; this is what gives us the testing coverage and distinguishes cases where
+; the pass has done some work versus accidental regalloc.
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64"
+
+; Non-overlapping groups - shouldn't need any changing at all.
+
+; CHECK-LABEL: f1:
+; CHECK-EVEN: fmadd [[x:d[0-9]*[02468]]]
+; CHECK-ODD: fmadd [[x:d[0-9]*[13579]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd [[x]]
+; CHECK: str [[x]]
+
+define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 {
+entry:
+ %0 = load double* %p, align 8
+ %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %1 = load double* %arrayidx1, align 8
+ %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %2 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %4 = load double* %arrayidx4, align 8
+ %mul = fmul fast double %0, %1
+ %add = fadd fast double %mul, %4
+ %mul5 = fmul fast double %1, %2
+ %add6 = fadd fast double %mul5, %add
+ %mul7 = fmul fast double %1, %3
+ %sub = fsub fast double %add6, %mul7
+ %mul8 = fmul fast double %2, %3
+ %add9 = fadd fast double %mul8, %sub
+ store double %add9, double* %q, align 8
+ %arrayidx11 = getelementptr inbounds double* %p, i64 5
+ %5 = load double* %arrayidx11, align 8
+ %arrayidx12 = getelementptr inbounds double* %p, i64 6
+ %6 = load double* %arrayidx12, align 8
+ %arrayidx13 = getelementptr inbounds double* %p, i64 7
+ %7 = load double* %arrayidx13, align 8
+ %mul15 = fmul fast double %6, %7
+ %mul16 = fmul fast double %0, %5
+ %add17 = fadd fast double %mul16, %mul15
+ %mul18 = fmul fast double %5, %6
+ %add19 = fadd fast double %mul18, %add17
+ %arrayidx20 = getelementptr inbounds double* %q, i64 1
+ store double %add19, double* %arrayidx20, align 8
+ ret void
+}
+
+; Overlapping groups - coloring needed.
+
+; CHECK-LABEL: f2:
+; CHECK-EVEN: fmadd [[x:d[0-9]*[02468]]]
+; CHECK-EVEN: fmul [[y:d[0-9]*[13579]]]
+; CHECK-ODD: fmadd [[x:d[0-9]*[13579]]]
+; CHECK-ODD: fmul [[y:d[0-9]*[02468]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmadd [[y]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd [[y]]
+; CHECK: fmadd [[x]]
+; CHECK: stp [[x]], [[y]]
+
+define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 {
+entry:
+ %0 = load double* %p, align 8
+ %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %1 = load double* %arrayidx1, align 8
+ %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %2 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %4 = load double* %arrayidx4, align 8
+ %arrayidx5 = getelementptr inbounds double* %p, i64 5
+ %5 = load double* %arrayidx5, align 8
+ %arrayidx6 = getelementptr inbounds double* %p, i64 6
+ %6 = load double* %arrayidx6, align 8
+ %arrayidx7 = getelementptr inbounds double* %p, i64 7
+ %7 = load double* %arrayidx7, align 8
+ %mul = fmul fast double %0, %1
+ %add = fadd fast double %mul, %7
+ %mul8 = fmul fast double %5, %6
+ %mul9 = fmul fast double %1, %2
+ %add10 = fadd fast double %mul9, %add
+ %mul11 = fmul fast double %3, %4
+ %add12 = fadd fast double %mul11, %mul8
+ %mul13 = fmul fast double %1, %3
+ %sub = fsub fast double %add10, %mul13
+ %mul14 = fmul fast double %4, %5
+ %add15 = fadd fast double %mul14, %add12
+ %mul16 = fmul fast double %2, %3
+ %add17 = fadd fast double %mul16, %sub
+ store double %add17, double* %q, align 8
+ %arrayidx19 = getelementptr inbounds double* %q, i64 1
+ store double %add15, double* %arrayidx19, align 8
+ ret void
+}
+
+; Dest register is live on block exit - fixup needed.
+
+; CHECK-LABEL: f3:
+; CHECK-EVEN: fmadd [[x:d[0-9]*[02468]]]
+; CHECK-ODD: fmadd [[x:d[0-9]*[13579]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd [[y:d[0-9]+]], {{.*}}, [[x]]
+; CHECK: str [[y]]
+
+define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 {
+entry:
+ %0 = load double* %p, align 8
+ %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %1 = load double* %arrayidx1, align 8
+ %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %2 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %4 = load double* %arrayidx4, align 8
+ %mul = fmul fast double %0, %1
+ %add = fadd fast double %mul, %4
+ %mul5 = fmul fast double %1, %2
+ %add6 = fadd fast double %mul5, %add
+ %mul7 = fmul fast double %1, %3
+ %sub = fsub fast double %add6, %mul7
+ %mul8 = fmul fast double %2, %3
+ %add9 = fadd fast double %mul8, %sub
+ %cmp = fcmp oeq double %3, 0.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void bitcast (void (...)* @g to void ()*)() #2
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ store double %add9, double* %q, align 8
+ ret void
+}
+
+declare void @g(...) #1
+
+; Single precision version of f2.
+
+; CHECK-LABEL: f4:
+; CHECK-EVEN: fmadd [[x:s[0-9]*[02468]]]
+; CHECK-EVEN: fmul [[y:s[0-9]*[13579]]]
+; CHECK-ODD: fmadd [[x:s[0-9]*[13579]]]
+; CHECK-ODD: fmul [[y:s[0-9]*[02468]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmadd [[y]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd [[y]]
+; CHECK: fmadd [[x]]
+; CHECK: stp [[x]], [[y]]
+
+define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 {
+entry:
+ %0 = load float* %p, align 4
+ %arrayidx1 = getelementptr inbounds float* %p, i64 1
+ %1 = load float* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float* %p, i64 2
+ %2 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %p, i64 3
+ %3 = load float* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds float* %p, i64 4
+ %4 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float* %p, i64 5
+ %5 = load float* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds float* %p, i64 6
+ %6 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float* %p, i64 7
+ %7 = load float* %arrayidx7, align 4
+ %mul = fmul fast float %0, %1
+ %add = fadd fast float %mul, %7
+ %mul8 = fmul fast float %5, %6
+ %mul9 = fmul fast float %1, %2
+ %add10 = fadd fast float %mul9, %add
+ %mul11 = fmul fast float %3, %4
+ %add12 = fadd fast float %mul11, %mul8
+ %mul13 = fmul fast float %1, %3
+ %sub = fsub fast float %add10, %mul13
+ %mul14 = fmul fast float %4, %5
+ %add15 = fadd fast float %mul14, %add12
+ %mul16 = fmul fast float %2, %3
+ %add17 = fadd fast float %mul16, %sub
+ store float %add17, float* %q, align 4
+ %arrayidx19 = getelementptr inbounds float* %q, i64 1
+ store float %add15, float* %arrayidx19, align 4
+ ret void
+}
+
+; Single precision version of f3
+
+; CHECK-LABEL: f5:
+; CHECK-EVEN: fmadd [[x:s[0-9]*[02468]]]
+; CHECK-ODD: fmadd [[x:s[0-9]*[13579]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd [[y:s[0-9]+]], {{.*}}, [[x]]
+; CHECK: str [[y]]
+
+define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 {
+entry:
+ %0 = load float* %p, align 4
+ %arrayidx1 = getelementptr inbounds float* %p, i64 1
+ %1 = load float* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float* %p, i64 2
+ %2 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %p, i64 3
+ %3 = load float* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds float* %p, i64 4
+ %4 = load float* %arrayidx4, align 4
+ %mul = fmul fast float %0, %1
+ %add = fadd fast float %mul, %4
+ %mul5 = fmul fast float %1, %2
+ %add6 = fadd fast float %mul5, %add
+ %mul7 = fmul fast float %1, %3
+ %sub = fsub fast float %add6, %mul7
+ %mul8 = fmul fast float %2, %3
+ %add9 = fadd fast float %mul8, %sub
+ %cmp = fcmp oeq float %3, 0.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void bitcast (void (...)* @g to void ()*)() #2
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ store float %add9, float* %q, align 4
+ ret void
+}
+
+; Test that regmask clobbering stops a chain sequence.
+
+; CHECK-LABEL: f6:
+; CHECK-EVEN: fmadd [[x:d[0-9]*[02468]]]
+; CHECK-ODD: fmadd [[x:d[0-9]*[13579]]]
+; CHECK: fmadd [[x]]
+; CHECK: fmsub [[x]]
+; CHECK: fmadd d0, {{.*}}, [[x]]
+; CHECK: bl hh
+; CHECK: str d0
+
+define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 {
+entry:
+ %0 = load double* %p, align 8
+ %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %1 = load double* %arrayidx1, align 8
+ %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %2 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %4 = load double* %arrayidx4, align 8
+ %mul = fmul fast double %0, %1
+ %add = fadd fast double %mul, %4
+ %mul5 = fmul fast double %1, %2
+ %add6 = fadd fast double %mul5, %add
+ %mul7 = fmul fast double %1, %3
+ %sub = fsub fast double %add6, %mul7
+ %mul8 = fmul fast double %2, %3
+ %add9 = fadd fast double %mul8, %sub
+ %call = tail call double @hh(double %add9) #2
+ store double %call, double* %q, align 8
+ ret void
+}
+
+declare double @hh(double) #1
+
+; Check that we correctly deal with repeated operands.
+; The following testcase creates:
+; %D1<def> = FADDDrr %D0<kill>, %D0
+; We'll get a crash if we naively look at the first operand, remove it
+; from the substitution list then look at the second operand.
+
+; CHECK: fmadd [[x:d[0-9]+]]
+; CHECK: fadd d1, [[x]], [[x]]
+
+define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 {
+entry:
+ %0 = load double* %p, align 8
+ %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %1 = load double* %arrayidx1, align 8
+ %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %2 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %4 = load double* %arrayidx4, align 8
+ %mul = fmul fast double %0, %1
+ %add = fadd fast double %mul, %4
+ %mul5 = fmul fast double %1, %2
+ %add6 = fadd fast double %mul5, %add
+ %mul7 = fmul fast double %1, %3
+ %sub = fsub fast double %add6, %mul7
+ %mul8 = fmul fast double %2, %3
+ %add9 = fadd fast double %mul8, %sub
+ %add10 = fadd fast double %add9, %add9
+ call void @hhh(double 0.0, double %add10)
+ ret void
+}
+
+declare void @hhh(double, double)
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
diff --git a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
new file mode 100644
index 0000000..64d91ee
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -0,0 +1,534 @@
+; REQUIRES: asserts
+; The regression tests need to test for order of emitted instructions, and
+; therefore, the tests are a bit fragile/reliant on instruction scheduling. The
+; test cases have been minimized as much as possible, but still most of the test
+; cases could break if instruction scheduling heuristics for cortex-a53 change
+; RUN: llc < %s -mcpu=cortex-a53 -aarch64-fix-cortex-a53-835769=1 -stats 2>&1 \
+; RUN: | FileCheck %s --check-prefix CHECK
+; RUN: llc < %s -mcpu=cortex-a53 -aarch64-fix-cortex-a53-835769=0 -stats 2>&1 \
+; RUN: | FileCheck %s --check-prefix CHECK-NOWORKAROUND
+; The following run lines are just to verify whether or not this pass runs by
+; default for given CPUs. Given the fragility of the tests, this is only run on
+; a test case where the scheduler has not freedom at all to reschedule the
+; instructions, so the potentially massively different scheduling heuristics
+; will not break the test case.
+; RUN: llc < %s -mcpu=generic | FileCheck %s --check-prefix CHECK-BASIC-PASS-DISABLED
+; RUN: llc < %s -mcpu=cortex-a53 | FileCheck %s --check-prefix CHECK-BASIC-PASS-DISABLED
+; RUN: llc < %s -mcpu=cortex-a57 | FileCheck %s --check-prefix CHECK-BASIC-PASS-DISABLED
+; RUN: llc < %s -mcpu=cyclone | FileCheck %s --check-prefix CHECK-BASIC-PASS-DISABLED
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+entry:
+ %0 = load i64* %c, align 8
+ %mul = mul nsw i64 %0, %b
+ %add = add nsw i64 %mul, %a
+ ret i64 %add
+}
+; CHECK-LABEL: f_load_madd_64:
+; CHECK: ldr
+; CHECK-NEXT: nop
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_load_madd_64:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: madd
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_madd_64:
+; CHECK-BASIC-PASS-DISABLED: ldr
+; CHECK-BASIC-PASS-DISABLED-NEXT: madd
+
+
+define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+entry:
+ %0 = load i32* %c, align 4
+ %mul = mul nsw i32 %0, %b
+ %add = add nsw i32 %mul, %a
+ ret i32 %add
+}
+; CHECK-LABEL: f_load_madd_32:
+; CHECK: ldr
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_load_madd_32:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: madd
+
+
+define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+entry:
+ %0 = load i64* %c, align 8
+ %mul = mul nsw i64 %0, %b
+ %sub = sub nsw i64 %a, %mul
+ ret i64 %sub
+}
+; CHECK-LABEL: f_load_msub_64:
+; CHECK: ldr
+; CHECK-NEXT: nop
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_load_msub_64:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: msub
+
+
+define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+entry:
+ %0 = load i32* %c, align 4
+ %mul = mul nsw i32 %0, %b
+ %sub = sub nsw i32 %a, %mul
+ ret i32 %sub
+}
+; CHECK-LABEL: f_load_msub_32:
+; CHECK: ldr
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_load_msub_32:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: msub
+
+
+define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+entry:
+ %0 = load i64* %c, align 8
+ %mul = mul nsw i64 %0, %b
+ ret i64 %mul
+}
+; CHECK-LABEL: f_load_mul_64:
+; CHECK: ldr
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_load_mul_64:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: mul
+
+
+define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+entry:
+ %0 = load i32* %c, align 4
+ %mul = mul nsw i32 %0, %b
+ ret i32 %mul
+}
+; CHECK-LABEL: f_load_mul_32:
+; CHECK: ldr
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_load_mul_32:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: mul
+
+
+define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+entry:
+ %0 = load i64* %c, align 8
+ %mul = sub i64 0, %b
+ %sub = mul i64 %0, %mul
+ ret i64 %sub
+}
+; CHECK-LABEL: f_load_mneg_64:
+; CHECK-NOWORKAROUND-LABEL: f_load_mneg_64:
+; FIXME: only add further checks here once LLVM actually produces
+; neg instructions
+; FIXME-CHECK: ldr
+; FIXME-CHECK-NEXT: nop
+; FIXME-CHECK-NEXT: mneg
+; FIXME-CHECK-NOWORKAROUND: ldr
+; FIXME-CHECK-NOWORKAROUND-NEXT: mneg
+
+
+define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+entry:
+ %0 = load i32* %c, align 4
+ %mul = sub i32 0, %b
+ %sub = mul i32 %0, %mul
+ ret i32 %sub
+}
+; CHECK-LABEL: f_load_mneg_32:
+; CHECK-NOWORKAROUND-LABEL: f_load_mneg_32:
+; FIXME: only add further checks here once LLVM actually produces
+; neg instructions
+; FIXME-CHECK: ldr
+; FIXME-CHECK-NEXT: mneg
+; FIXME-CHECK-NOWORKAROUND: ldr
+; FIXME-CHECK-NOWORKAROUND-NEXT: mneg
+
+
+define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = sext i32 %b to i64
+ %conv1 = sext i32 %c to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %add = add nsw i64 %mul, %a
+ %0 = load i32* %d, align 4
+ %conv2 = sext i32 %0 to i64
+ %add3 = add nsw i64 %add, %conv2
+ ret i64 %add3
+}
+; CHECK-LABEL: f_load_smaddl:
+; CHECK: ldrsw
+; CHECK-NEXT: nop
+; CHECK-NEXT: smaddl
+; CHECK-NOWORKAROUND-LABEL: f_load_smaddl:
+; CHECK-NOWORKAROUND: ldrsw
+; CHECK-NOWORKAROUND-NEXT: smaddl
+
+
+define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = sext i32 %b to i64
+ %conv1 = sext i32 %c to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %sub = sub i64 %a, %mul
+ %0 = load i32* %d, align 4
+ %conv2 = sext i32 %0 to i64
+ %add = add nsw i64 %sub, %conv2
+ ret i64 %add
+}
+; CHECK-LABEL: f_load_smsubl_64:
+; CHECK: ldrsw
+; CHECK-NEXT: nop
+; CHECK-NEXT: smsubl
+; CHECK-NOWORKAROUND-LABEL: f_load_smsubl_64:
+; CHECK-NOWORKAROUND: ldrsw
+; CHECK-NOWORKAROUND-NEXT: smsubl
+
+
+define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = sext i32 %b to i64
+ %conv1 = sext i32 %c to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %0 = load i32* %d, align 4
+ %conv2 = sext i32 %0 to i64
+ %div = sdiv i64 %mul, %conv2
+ ret i64 %div
+}
+; CHECK-LABEL: f_load_smull:
+; CHECK: ldrsw
+; CHECK-NEXT: smull
+; CHECK-NOWORKAROUND-LABEL: f_load_smull:
+; CHECK-NOWORKAROUND: ldrsw
+; CHECK-NOWORKAROUND-NEXT: smull
+
+
+define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = sext i32 %b to i64
+ %conv1 = sext i32 %c to i64
+ %mul = sub nsw i64 0, %conv
+ %sub = mul i64 %conv1, %mul
+ %0 = load i32* %d, align 4
+ %conv2 = sext i32 %0 to i64
+ %div = sdiv i64 %sub, %conv2
+ ret i64 %div
+}
+; CHECK-LABEL: f_load_smnegl_64:
+; CHECK-NOWORKAROUND-LABEL: f_load_smnegl_64:
+; FIXME: only add further checks here once LLVM actually produces
+; smnegl instructions
+
+
+define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = zext i32 %b to i64
+ %conv1 = zext i32 %c to i64
+ %mul = mul i64 %conv1, %conv
+ %add = add i64 %mul, %a
+ %0 = load i32* %d, align 4
+ %conv2 = zext i32 %0 to i64
+ %add3 = add i64 %add, %conv2
+ ret i64 %add3
+}
+; CHECK-LABEL: f_load_umaddl:
+; CHECK: ldr
+; CHECK-NEXT: nop
+; CHECK-NEXT: umaddl
+; CHECK-NOWORKAROUND-LABEL: f_load_umaddl:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: umaddl
+
+
+define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = zext i32 %b to i64
+ %conv1 = zext i32 %c to i64
+ %mul = mul i64 %conv1, %conv
+ %sub = sub i64 %a, %mul
+ %0 = load i32* %d, align 4
+ %conv2 = zext i32 %0 to i64
+ %add = add i64 %sub, %conv2
+ ret i64 %add
+}
+; CHECK-LABEL: f_load_umsubl_64:
+; CHECK: ldr
+; CHECK-NEXT: nop
+; CHECK-NEXT: umsubl
+; CHECK-NOWORKAROUND-LABEL: f_load_umsubl_64:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: umsubl
+
+
+define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = zext i32 %b to i64
+ %conv1 = zext i32 %c to i64
+ %mul = mul i64 %conv1, %conv
+ %0 = load i32* %d, align 4
+ %conv2 = zext i32 %0 to i64
+ %div = udiv i64 %mul, %conv2
+ ret i64 %div
+}
+; CHECK-LABEL: f_load_umull:
+; CHECK: ldr
+; CHECK-NEXT: umull
+; CHECK-NOWORKAROUND-LABEL: f_load_umull:
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: umull
+
+
+define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+entry:
+ %conv = zext i32 %b to i64
+ %conv1 = zext i32 %c to i64
+ %mul = sub nsw i64 0, %conv
+ %sub = mul i64 %conv1, %mul
+ %0 = load i32* %d, align 4
+ %conv2 = zext i32 %0 to i64
+ %div = udiv i64 %sub, %conv2
+ ret i64 %div
+}
+; CHECK-LABEL: f_load_umnegl_64:
+; CHECK-NOWORKAROUND-LABEL: f_load_umnegl_64:
+; FIXME: only add further checks here once LLVM actually produces
+; umnegl instructions
+
+
+define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ store i64 %a, i64* %e, align 8
+ %mul = mul nsw i64 %0, %b
+ %add = add nsw i64 %mul, %a
+ ret i64 %add
+}
+; CHECK-LABEL: f_store_madd_64:
+; CHECK: str
+; CHECK-NEXT: nop
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_store_madd_64:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: madd
+
+
+define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ store i32 %a, i32* %e, align 4
+ %mul = mul nsw i32 %0, %b
+ %add = add nsw i32 %mul, %a
+ ret i32 %add
+}
+; CHECK-LABEL: f_store_madd_32:
+; CHECK: str
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_store_madd_32:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: madd
+
+
+define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ store i64 %a, i64* %e, align 8
+ %mul = mul nsw i64 %0, %b
+ %sub = sub nsw i64 %a, %mul
+ ret i64 %sub
+}
+; CHECK-LABEL: f_store_msub_64:
+; CHECK: str
+; CHECK-NEXT: nop
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_store_msub_64:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: msub
+
+
+define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ store i32 %a, i32* %e, align 4
+ %mul = mul nsw i32 %0, %b
+ %sub = sub nsw i32 %a, %mul
+ ret i32 %sub
+}
+; CHECK-LABEL: f_store_msub_32:
+; CHECK: str
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_store_msub_32:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: msub
+
+
+define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ store i64 %a, i64* %e, align 8
+ %mul = mul nsw i64 %0, %b
+ ret i64 %mul
+}
+; CHECK-LABEL: f_store_mul_64:
+; CHECK: str
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_store_mul_64:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: mul
+
+
+define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ store i32 %a, i32* %e, align 4
+ %mul = mul nsw i32 %0, %b
+ ret i32 %mul
+}
+; CHECK-LABEL: f_store_mul_32:
+; CHECK: str
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_store_mul_32:
+; CHECK-NOWORKAROUND: str
+; CHECK-NOWORKAROUND-NEXT: mul
+
+
+define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ %1 = bitcast i64* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
+ %mul = mul nsw i64 %0, %b
+ %add = add nsw i64 %mul, %a
+ ret i64 %add
+}
+; CHECK-LABEL: f_prefetch_madd_64:
+; CHECK: prfm
+; CHECK-NEXT: nop
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_madd_64:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: madd
+
+declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #2
+
+define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ %1 = bitcast i32* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1)
+ %mul = mul nsw i32 %0, %b
+ %add = add nsw i32 %mul, %a
+ ret i32 %add
+}
+; CHECK-LABEL: f_prefetch_madd_32:
+; CHECK: prfm
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_madd_32:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: madd
+
+define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ %1 = bitcast i64* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1)
+ %mul = mul nsw i64 %0, %b
+ %sub = sub nsw i64 %a, %mul
+ ret i64 %sub
+}
+; CHECK-LABEL: f_prefetch_msub_64:
+; CHECK: prfm
+; CHECK-NEXT: nop
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_msub_64:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: msub
+
+define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ %1 = bitcast i32* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1)
+ %mul = mul nsw i32 %0, %b
+ %sub = sub nsw i32 %a, %mul
+ ret i32 %sub
+}
+; CHECK-LABEL: f_prefetch_msub_32:
+; CHECK: prfm
+; CHECK-NEXT: msub
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_msub_32:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: msub
+
+define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+entry:
+ %0 = load i64* %cp, align 8
+ %1 = bitcast i64* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
+ %mul = mul nsw i64 %0, %b
+ ret i64 %mul
+}
+; CHECK-LABEL: f_prefetch_mul_64:
+; CHECK: prfm
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_mul_64:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: mul
+
+define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+entry:
+ %0 = load i32* %cp, align 4
+ %1 = bitcast i32* %e to i8*
+ tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1)
+ %mul = mul nsw i32 %0, %b
+ ret i32 %mul
+}
+; CHECK-LABEL: f_prefetch_mul_32:
+; CHECK: prfm
+; CHECK-NEXT: mul
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_mul_32:
+; CHECK-NOWORKAROUND: prfm
+; CHECK-NOWORKAROUND-NEXT: mul
+
+define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+entry:
+ %0 = load i64* %c, align 8
+ br label %block1
+
+block1:
+ %mul = mul nsw i64 %0, %b
+ %add = add nsw i64 %mul, %a
+ %tmp = ptrtoint i8* blockaddress(@fall_through, %block1) to i64
+ %ret = add nsw i64 %tmp, %add
+ ret i64 %ret
+}
+; CHECK-LABEL: fall_through
+; CHECK: ldr
+; CHECK-NEXT: nop
+; CHECK-NEXT: .Ltmp
+; CHECK-NEXT: BB
+; CHECK-NEXT: madd
+; CHECK-NOWORKAROUND-LABEL: fall_through
+; CHECK-NOWORKAROUND: ldr
+; CHECK-NOWORKAROUND-NEXT: .Ltmp
+; CHECK-NOWORKAROUND-NEXT: BB
+; CHECK-NOWORKAROUND-NEXT: madd
+
+; No checks for this, just check it doesn't crash
+define i32 @crash_check(i8** nocapture readnone %data) #0 {
+entry:
+ br label %while.cond
+
+while.cond:
+ br label %while.cond
+}
+
+attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+
+; CHECK-LABEL: ... Statistics Collected ...
+; CHECK: 11 aarch64-fix-cortex-a53-835769 - Number of Nops added to work around erratum 835769
diff --git a/test/CodeGen/AArch64/remat.ll b/test/CodeGen/AArch64/remat.ll
new file mode 100644
index 0000000..32b3ed2
--- /dev/null
+++ b/test/CodeGen/AArch64/remat.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=cortex-a57 -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=cortex-a53 -o - %s | FileCheck %s
+
+%X = type { i64, i64, i64 }
+declare void @f(%X*)
+define void @t() {
+entry:
+ %tmp = alloca %X
+ call void @f(%X* %tmp)
+; CHECK: add x0, sp, #8
+; CHECK-NEXT-NOT: mov
+ call void @f(%X* %tmp)
+; CHECK: add x0, sp, #8
+; CHECK-NEXT-NOT: mov
+ ret void
+}
diff --git a/test/CodeGen/X86/critical-anti-dep-breaker.ll b/test/CodeGen/X86/critical-anti-dep-breaker.ll
new file mode 100644
index 0000000..32d3f49
--- /dev/null
+++ b/test/CodeGen/X86/critical-anti-dep-breaker.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic -post-RA-scheduler=1 -break-anti-dependencies=critical | FileCheck %s
+
+; PR20308 ( http://llvm.org/bugs/show_bug.cgi?id=20308 )
+; The critical-anti-dependency-breaker must not use register def information from a kill inst.
+; This test case expects such an instruction to appear as a comment with def info for RDI.
+; There is an anti-dependency (WAR) hazard using RAX using default reg allocation and scheduling.
+; The post-RA-scheduler and critical-anti-dependency breaker can eliminate that hazard using R10.
+; That is the first free register that isn't used as a param in the call to "@Image".
+
+@PartClass = external global i32
+@NullToken = external global i64
+
+; CHECK-LABEL: Part_Create:
+; CHECK-DAG: # kill: RDI<def>
+; CHECK-DAG: movq PartClass@GOTPCREL(%rip), %r10
+define i32 @Part_Create(i64* %Anchor, i32 %TypeNum, i32 %F, i32 %Z, i32* %Status, i64* %PartTkn) {
+ %PartObj = alloca i64*, align 8
+ %Vchunk = alloca i64, align 8
+ %1 = load i64* @NullToken, align 4
+ store i64 %1, i64* %Vchunk, align 8
+ %2 = load i32* @PartClass, align 4
+ call i32 @Image(i64* %Anchor, i32 %2, i32 0, i32 0, i32* %Status, i64* %PartTkn, i64** %PartObj)
+ call i32 @Create(i64* %Anchor)
+ ret i32 %2
+}
+
+declare i32 @Image(i64*, i32, i32, i32, i32*, i64*, i64**)
+declare i32 @Create(i64*)