aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Thumb2
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Thumb2')
-rw-r--r--test/CodeGen/Thumb2/crash.ll31
-rw-r--r--test/CodeGen/Thumb2/lit.local.cfg9
-rw-r--r--test/CodeGen/Thumb2/lsr-deficiency.ll11
-rw-r--r--test/CodeGen/Thumb2/thumb2-branch.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-jtb.ll10
6 files changed, 50 insertions, 25 deletions
diff --git a/test/CodeGen/Thumb2/crash.ll b/test/CodeGen/Thumb2/crash.ll
index d8b51ec..cb4d080 100644
--- a/test/CodeGen/Thumb2/crash.ll
+++ b/test/CodeGen/Thumb2/crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -verify-machineinstrs
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
@@ -47,3 +47,32 @@ bb2: ; preds = %bb
tail call void @llvm.arm.neon.vst4.v4i32(i8* bitcast ([16 x i32]* @dbuf to i8*), <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, i32 1) nounwind
ret i32 0
}
+
+; PR12389
+; Make sure the DPair register class can spill.
+define void @pr12389(i8* %p) nounwind ssp {
+entry:
+ %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %p, i32 1)
+ tail call void asm sideeffect "", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15}"() nounwind
+ tail call void @llvm.arm.neon.vst1.v4f32(i8* %p, <4 x float> %vld1, i32 1)
+ ret void
+}
+
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
+
+declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
+
+; <rdar://problem/11101911>
+; When an strd is expanded into two str instructions, make sure the first str
+; doesn't kill the base register. This can happen if the base register is the
+; same as the data register.
+%class = type { i8*, %class*, i32 }
+define void @f11101911(%class* %this, i32 %num) ssp align 2 {
+entry:
+ %p1 = getelementptr inbounds %class* %this, i32 0, i32 1
+ %p2 = getelementptr inbounds %class* %this, i32 0, i32 2
+ tail call void asm sideeffect "", "~{r1},~{r3},~{r5},~{r11},~{r13}"() nounwind
+ store %class* %this, %class** %p1, align 4
+ store i32 %num, i32* %p2, align 4
+ ret void
+}
diff --git a/test/CodeGen/Thumb2/lit.local.cfg b/test/CodeGen/Thumb2/lit.local.cfg
index dd6c50d..cb77b09 100644
--- a/test/CodeGen/Thumb2/lit.local.cfg
+++ b/test/CodeGen/Thumb2/lit.local.cfg
@@ -1,13 +1,6 @@
config.suffixes = ['.ll', '.c', '.cpp']
-def getRoot(config):
- if not config.parent:
- return config
- return getRoot(config.parent)
-
-root = getRoot(config)
-
-targets = set(root.targets_to_build.split())
+targets = set(config.root.targets_to_build.split())
if not 'ARM' in targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb2/lsr-deficiency.ll b/test/CodeGen/Thumb2/lsr-deficiency.ll
index 9ff114e..9aaa821 100644
--- a/test/CodeGen/Thumb2/lsr-deficiency.ll
+++ b/test/CodeGen/Thumb2/lsr-deficiency.ll
@@ -3,11 +3,6 @@
; This now reduces to a single induction variable.
-; TODO: It still gets a GPR shuffle at the end of the loop
-; This is because something in instruction selection has decided
-; that comparing the pre-incremented value with zero is better
-; than comparing the post-incremented value with -4.
-
@G = external global i32 ; <i32*> [#uses=2]
@array = external global i32* ; <i32**> [#uses=1]
@@ -20,9 +15,9 @@ entry:
bb: ; preds = %bb, %entry
; CHECK: LBB0_1:
-; CHECK: cmp [[R2:r[0-9]+]], #0
-; CHECK: sub{{(.w)?}} [[REGISTER:(r[0-9]+)|(lr)]], [[R2]], #1
-; CHECK: mov [[R2]], [[REGISTER]]
+; CHECK: subs [[R2:r[0-9]+]], #1
+; CHECK: cmp.w [[R2]], #-1
+; CHECK: bne LBB0_1
%0 = phi i32 [ %.pre, %entry ], [ %3, %bb ] ; <i32> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
diff --git a/test/CodeGen/Thumb2/thumb2-branch.ll b/test/CodeGen/Thumb2/thumb2-branch.ll
index 27d8e8f..f1c097c 100644
--- a/test/CodeGen/Thumb2/thumb2-branch.ll
+++ b/test/CodeGen/Thumb2/thumb2-branch.ll
@@ -58,8 +58,8 @@ define i32 @f4(i32 %a, i32 %b, i32* %v) {
entry:
; CHECK: f4:
; CHECK: blo LBB
- %tmp = icmp ult i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp, label %return, label %cond_true
+ %tmp = icmp uge i32 %a, %b ; <i1> [#uses=1]
+ br i1 %tmp, label %cond_true, label %return
cond_true: ; preds = %entry
fence seq_cst
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index f577f79..5aa9a73 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -29,13 +29,13 @@ declare i32 @bar(...)
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
entry:
; CHECK: CountTree:
-; CHECK: it eq
-; CHECK: cmpeq
-; CHECK: bne
-; CHECK: cmp
; CHECK: itt eq
; CHECK: moveq
; CHECK: popeq
+; CHECK: bne
+; CHECK: cmp
+; CHECK: it eq
+; CHECK: cmpeq
br label %tailrecurse
tailrecurse: ; preds = %bb, %entry
@@ -83,7 +83,7 @@ define fastcc void @t2() nounwind {
entry:
; CHECK: t2:
; CHECK: cmp r0, #0
-; CHECK: beq
+; CHECK: %growMapping.exit
br i1 undef, label %bb.i.i3, label %growMapping.exit
bb.i.i3: ; preds = %entry
diff --git a/test/CodeGen/Thumb2/thumb2-jtb.ll b/test/CodeGen/Thumb2/thumb2-jtb.ll
index f5a56e5..7e1655f 100644
--- a/test/CodeGen/Thumb2/thumb2-jtb.ll
+++ b/test/CodeGen/Thumb2/thumb2-jtb.ll
@@ -3,11 +3,19 @@
; Do not use tbb / tbh if any destination is before the jumptable.
; rdar://7102917
-define i16 @main__getopt_internal_2E_exit_2E_ce(i32) nounwind {
+define i16 @main__getopt_internal_2E_exit_2E_ce(i32, i1 %b) nounwind {
+entry:
+ br i1 %b, label %codeRepl127.exitStub, label %newFuncRoot
+
newFuncRoot:
br label %_getopt_internal.exit.ce
codeRepl127.exitStub: ; preds = %_getopt_internal.exit.ce
+ ; Add an explicit edge back to before the jump table to ensure this block
+ ; is placed first.
+ br i1 %b, label %newFuncRoot, label %codeRepl127.exitStub.exit
+
+codeRepl127.exitStub.exit:
ret i16 0
parse_options.exit.loopexit.exitStub: ; preds = %_getopt_internal.exit.ce