aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLang Hames <lhames@gmail.com>2011-11-08 18:56:23 +0000
committerLang Hames <lhames@gmail.com>2011-11-08 18:56:23 +0000
commit5207bf2177e9ef1e68d9408ea4b44f1c8a5ef9c0 (patch)
treec94405cc8244718db4821d7662e9479180b3d0ed
parentd752e0f7e64585839cb3a458ef52456eaebbea3c (diff)
downloadexternal_llvm-5207bf2177e9ef1e68d9408ea4b44f1c8a5ef9c0.zip
external_llvm-5207bf2177e9ef1e68d9408ea4b44f1c8a5ef9c0.tar.gz
external_llvm-5207bf2177e9ef1e68d9408ea4b44f1c8a5ef9c0.tar.bz2
Lower mem-ops to unaligned i32/i16 load/stores on ARM where supported.
Add support for trimming constants to GetDemandedBits. This fixes some funky constant generation that occurs when stores are expanded for targets that don't support unaligned stores natively. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144102 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp10
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp7
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-inline.ll15
3 files changed, 26 insertions, 6 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index e67016c..8b28ea9 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4564,6 +4564,16 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
switch (V.getOpcode()) {
default: break;
+ case ISD::Constant: {
+ const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
+ assert(CV != 0 && "Const value should be ConstSDNode.");
+ const APInt &CVal = CV->getAPIntValue();
+ APInt NewVal = CVal & Mask;
+ if (NewVal != CVal) {
+ return DAG.getConstant(NewVal, V.getValueType());
+ }
+ break;
+ }
case ISD::OR:
case ISD::XOR:
// If the LHS or RHS don't contribute bits to the or, drop them.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 84a34d5..c51e7ae 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -8171,6 +8171,13 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
}
}
+ // Lowering to i32/i16 if the size permits.
+ if (Size >= 4) {
+ return MVT::i32;
+ } else if (Size >= 2) {
+ return MVT::i16;
+ }
+
// Let the target-independent logic figure it out.
return MVT::Other;
}
diff --git a/test/CodeGen/ARM/2011-10-26-memset-inline.ll b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
index a236ffe..ff049c8 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-inline.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
@@ -1,14 +1,17 @@
; Make sure short memsets on ARM lower to stores, even when optimizing for size.
-; RUN: llc -march=arm < %s | FileCheck %s
+; RUN: llc -march=arm < %s | FileCheck %s -check-prefix=CHECK-GENERIC
+; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s -check-prefix=CHECK-UNALIGNED
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target triple = "thumbv7-apple-ios5.0.0"
-; CHECK: strb
-; CHECK-NEXT: strb
-; CHECK-NEXT: strb
-; CHECK-NEXT: strb
-; CHECK-NEXT: strb
+; CHECK-GENERIC: strb
+; CHECK-GENERIT-NEXT: strb
+; CHECK-GENERIT-NEXT: strb
+; CHECK-GENERIT-NEXT: strb
+; CHECK-GENERIT-NEXT: strb
+; CHECK-UNALIGNED: strb
+; CHECK-UNALIGNED-NEXT: str
define void @foo(i8* nocapture %c) nounwind optsize {
entry:
call void @llvm.memset.p0i8.i64(i8* %c, i8 -1, i64 5, i32 1, i1 false)