aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2013-11-15 19:09:27 +0000
committerBob Wilson <bob.wilson@apple.com>2013-11-15 19:09:27 +0000
commitcc7052343e5e955d4e2f48885c06360f9003390a (patch)
tree8832fd14ebae789ee88bab5009cc33fbc89e3ec7 /test/CodeGen/PowerPC
parent5cddda6d13ab66c462ccbd61255ad6e6f95e9f6f (diff)
downloadexternal_llvm-cc7052343e5e955d4e2f48885c06360f9003390a.zip
external_llvm-cc7052343e5e955d4e2f48885c06360f9003390a.tar.gz
external_llvm-cc7052343e5e955d4e2f48885c06360f9003390a.tar.bz2
Avoid illegal integer promotion in fastisel
Stop folding constant adds into GEP when the type size doesn't match. Otherwise, the adds' operands are effectively being promoted, changing the conditions of an overflow. Results are different when: sext(a) + sext(b) != sext(a + b) Problem originally found on x86-64, but also fixed issues with ARM and PPC, which used similar code. <rdar://problem/15292280> Patch by Duncan Exon Smith! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194840 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll17
1 files changed, 17 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll b/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
new file mode 100644
index 0000000..4bcacf0
--- /dev/null
+++ b/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
@@ -0,0 +1,17 @@
+; fastisel should not fold add with non-pointer bitwidth
+; sext(a) + sext(b) != sext(a + b)
+; RUN: llc -mtriple=powerpc64-unknown-freebsd10.0 %s -O0 -o - | FileCheck %s
+
+define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+entry:
+ %ptr.addr = alloca i8*, align 8
+ %add = add i8 64, 64 ; 0x40 + 0x40
+ %0 = load i8** %ptr.addr, align 8
+
+ ; CHECK-LABEL: gep_promotion:
+ ; CHECK: lbz {{[0-9]+}}, 0({{.*}})
+ %arrayidx = getelementptr inbounds i8* %0, i8 %add
+
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}