aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuentin Colombet <qcolombet@apple.com>2012-12-18 22:47:16 +0000
committerQuentin Colombet <qcolombet@apple.com>2012-12-18 22:47:16 +0000
commitb519351b87204966d6548b198b88f2ab0f4d0b4b (patch)
tree7827db8d28c1736c13627222a6c624f7dc1e96d1
parentbd7b36e780f99b808f8e334e26f3dae1bc7e8175 (diff)
downloadexternal_llvm-b519351b87204966d6548b198b88f2ab0f4d0b4b.zip
external_llvm-b519351b87204966d6548b198b88f2ab0f4d0b4b.tar.gz
external_llvm-b519351b87204966d6548b198b88f2ab0f4d0b4b.tar.bz2
Disable ARM partial flag dependency optimization at -Oz
To not over constrain the scheduler for ARM in thumb mode, some optimizations for code size reduction, specific to ARM thumb, are blocked when they add a dependency (like write after read dependency). Disables this check when code size is the priority, i.e., code is compiled with -Oz. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@170462 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp12
-rw-r--r--test/CodeGen/ARM/avoid-cpsr-rmw.ll34
2 files changed, 44 insertions, 2 deletions
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index efc9bc3..04d5c37 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Function.h" // To access Function attributes
using namespace llvm;
STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
@@ -182,11 +183,14 @@ namespace {
/// ReduceMBB - Reduce width of instructions in the specified basic block.
bool ReduceMBB(MachineBasicBlock &MBB);
+
+ bool MinimizeSize;
};
char Thumb2SizeReduce::ID = 0;
}
Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
+ MinimizeSize = false;
for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
unsigned FromOpc = ReduceTable[i].WideOpc;
if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
@@ -221,8 +225,8 @@ static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
bool
Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use,
bool FirstInSelfLoop) {
- // FIXME: Disable check for -Oz (aka OptimizeForSizeHarder).
- if (!STI->avoidCPSRPartialUpdate())
+ // Disable the check for -Oz (aka OptimizeForSizeHarder).
+ if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
return false;
if (!Def)
@@ -942,6 +946,10 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
STI = &TM.getSubtarget<ARMSubtarget>();
+ // When -Oz is set, the function carries MinSize attribute.
+ MinimizeSize =
+ MF.getFunction()->getFnAttributes().hasAttribute(Attributes::MinSize);
+
bool Modified = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
Modified |= ReduceMBB(*I);
diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
index 96e83dd..d98925e 100644
--- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll
+++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
@@ -49,3 +49,37 @@ while.body:
while.end:
ret void
}
+
+; Allow partial CPSR dependency when code size is the priority.
+; rdar://12878928
+define void @t3(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind minsize {
+entry:
+; CHECK: t3:
+ %tobool7 = icmp eq i32* %ptr2, null
+ br i1 %tobool7, label %while.end, label %while.body
+
+while.body:
+; CHECK: while.body
+; CHECK: mul r{{[0-9]+}}
+; CHECK: muls
+ %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
+ %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
+ %0 = load i32* %ptr1.addr.09, align 4
+ %arrayidx1 = getelementptr inbounds i32* %ptr1.addr.09, i32 1
+ %1 = load i32* %arrayidx1, align 4
+ %arrayidx3 = getelementptr inbounds i32* %ptr1.addr.09, i32 2
+ %2 = load i32* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds i32* %ptr1.addr.09, i32 3
+ %3 = load i32* %arrayidx4, align 4
+ %add.ptr = getelementptr inbounds i32* %ptr1.addr.09, i32 4
+ %mul = mul i32 %1, %0
+ %mul5 = mul i32 %mul, %2
+ %mul6 = mul i32 %mul5, %3
+ store i32 %mul6, i32* %ptr2.addr.08, align 4
+ %incdec.ptr = getelementptr inbounds i32* %ptr2.addr.08, i32 -1
+ %tobool = icmp eq i32* %incdec.ptr, null
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end:
+ ret void
+}