aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/ARM')
-rw-r--r--test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll31
-rw-r--r--test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll129
-rw-r--r--test/CodeGen/ARM/2012-08-30-select.ll18
-rw-r--r--test/CodeGen/ARM/atomic-op.ll10
-rw-r--r--test/CodeGen/ARM/crash-shufflevector.ll4
-rw-r--r--test/CodeGen/ARM/domain-conv-vmovs.ll84
-rw-r--r--test/CodeGen/ARM/fast-isel-pic.ll43
-rw-r--r--test/CodeGen/ARM/fp-fast.ll60
-rw-r--r--test/CodeGen/ARM/integer_insertelement.ll35
-rw-r--r--test/CodeGen/ARM/longMAC.ll44
-rw-r--r--test/CodeGen/ARM/select.ll2
-rw-r--r--test/CodeGen/ARM/select_xform.ll14
-rw-r--r--test/CodeGen/ARM/sub-cmp-peephole.ll21
-rw-r--r--test/CodeGen/ARM/vdup.ll34
-rw-r--r--test/CodeGen/ARM/vector-extend-narrow.ll11
-rw-r--r--test/CodeGen/ARM/vget_lane.ll2
16 files changed, 531 insertions, 11 deletions
diff --git a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
index f80b44f..1769ee5 100644
--- a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
+++ b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
@@ -300,3 +300,34 @@ L.entry:
declare <4 x float> @llvm.sin.v4f32(<4 x float>) nounwind readonly
+define void @test_floor(<4 x float>* %X) nounwind {
+
+; CHECK: test_floor:
+
+; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
+; CHECK: movt [[reg0]], :upper16:{{.*}}
+; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}}
+
+; CHECK: {{v?mov(.32)?}} r0,
+; CHECK: bl {{.*}}floorf
+
+; CHECK: {{v?mov(.32)?}} r0,
+; CHECK: bl {{.*}}floorf
+
+; CHECK: {{v?mov(.32)?}} r0,
+; CHECK: bl {{.*}}floorf
+
+; CHECK: {{v?mov(.32)?}} r0,
+; CHECK: bl {{.*}}floorf
+
+; CHECK: vstmia {{.*}}
+
+L.entry:
+ %0 = load <4 x float>* @A, align 16
+ %1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* %X, align 16
+ ret void
+}
+
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readonly
+
diff --git a/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll
new file mode 100644
index 0000000..ec7f72d
--- /dev/null
+++ b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll
@@ -0,0 +1,129 @@
+; RUN: llc < %s -mcpu=cortex-a8 -march=thumb
+; Test that this doesn't crash.
+; <rdar://problem/12183003>
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios5.1.0"
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly
+
+declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind
+
+define void @findEdges(i8*) nounwind ssp {
+ %2 = icmp sgt i32 undef, 0
+ br i1 %2, label %5, label %3
+
+; <label>:3 ; preds = %5, %1
+ %4 = phi i8* [ %0, %1 ], [ %19, %5 ]
+ ret void
+
+; <label>:5 ; preds = %5, %1
+ %6 = phi i8* [ %19, %5 ], [ %0, %1 ]
+ %7 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* null, i32 1)
+ %8 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %7, 0
+ %9 = getelementptr inbounds i8* null, i32 3
+ %10 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %9, i32 1)
+ %11 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %10, 2
+ %12 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %6, i32 1)
+ %13 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 0
+ %14 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 1
+ %15 = getelementptr inbounds i8* %6, i32 3
+ %16 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %15, i32 1)
+ %17 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 1
+ %18 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 2
+ %19 = getelementptr inbounds i8* %6, i32 48
+ %20 = bitcast <16 x i8> %13 to <2 x i64>
+ %21 = bitcast <16 x i8> %8 to <2 x i64>
+ %22 = bitcast <16 x i8> %14 to <2 x i64>
+ %23 = shufflevector <2 x i64> %22, <2 x i64> undef, <1 x i32> zeroinitializer
+ %24 = bitcast <1 x i64> %23 to <8 x i8>
+ %25 = zext <8 x i8> %24 to <8 x i16>
+ %26 = sub <8 x i16> zeroinitializer, %25
+ %27 = bitcast <16 x i8> %17 to <2 x i64>
+ %28 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %26) nounwind
+ %29 = mul <8 x i16> %28, %28
+ %30 = add <8 x i16> zeroinitializer, %29
+ %31 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> undef, <8 x i16> %30) nounwind
+ %32 = bitcast <16 x i8> %11 to <2 x i64>
+ %33 = shufflevector <2 x i64> %32, <2 x i64> undef, <1 x i32> zeroinitializer
+ %34 = bitcast <1 x i64> %33 to <8 x i8>
+ %35 = zext <8 x i8> %34 to <8 x i16>
+ %36 = sub <8 x i16> %35, zeroinitializer
+ %37 = bitcast <16 x i8> %18 to <2 x i64>
+ %38 = shufflevector <2 x i64> %37, <2 x i64> undef, <1 x i32> zeroinitializer
+ %39 = bitcast <1 x i64> %38 to <8 x i8>
+ %40 = zext <8 x i8> %39 to <8 x i16>
+ %41 = sub <8 x i16> zeroinitializer, %40
+ %42 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %36) nounwind
+ %43 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %41) nounwind
+ %44 = mul <8 x i16> %42, %42
+ %45 = mul <8 x i16> %43, %43
+ %46 = add <8 x i16> %45, %44
+ %47 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %31, <8 x i16> %46) nounwind
+ %48 = bitcast <8 x i16> %47 to <2 x i64>
+ %49 = shufflevector <2 x i64> %48, <2 x i64> undef, <1 x i32> zeroinitializer
+ %50 = bitcast <1 x i64> %49 to <4 x i16>
+ %51 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %50, <4 x i16> undef) nounwind
+ %52 = tail call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %51, <4 x i32> <i32 -6, i32 -6, i32 -6, i32 -6>)
+ %53 = bitcast <4 x i16> %52 to <1 x i64>
+ %54 = shufflevector <1 x i64> %53, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %55 = bitcast <2 x i64> %54 to <8 x i16>
+ %56 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %55, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
+ %57 = shufflevector <2 x i64> %20, <2 x i64> undef, <1 x i32> <i32 1>
+ %58 = bitcast <1 x i64> %57 to <8 x i8>
+ %59 = zext <8 x i8> %58 to <8 x i16>
+ %60 = sub <8 x i16> zeroinitializer, %59
+ %61 = shufflevector <2 x i64> %21, <2 x i64> undef, <1 x i32> <i32 1>
+ %62 = bitcast <1 x i64> %61 to <8 x i8>
+ %63 = zext <8 x i8> %62 to <8 x i16>
+ %64 = sub <8 x i16> %63, zeroinitializer
+ %65 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %60) nounwind
+ %66 = mul <8 x i16> %65, %65
+ %67 = add <8 x i16> zeroinitializer, %66
+ %68 = shufflevector <2 x i64> %27, <2 x i64> undef, <1 x i32> <i32 1>
+ %69 = bitcast <1 x i64> %68 to <8 x i8>
+ %70 = zext <8 x i8> %69 to <8 x i16>
+ %71 = sub <8 x i16> zeroinitializer, %70
+ %72 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> undef) nounwind
+ %73 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %71) nounwind
+ %74 = mul <8 x i16> %72, %72
+ %75 = mul <8 x i16> %73, %73
+ %76 = add <8 x i16> %75, %74
+ %77 = shufflevector <2 x i64> %32, <2 x i64> undef, <1 x i32> <i32 1>
+ %78 = bitcast <1 x i64> %77 to <8 x i8>
+ %79 = zext <8 x i8> %78 to <8 x i16>
+ %80 = sub <8 x i16> %79, zeroinitializer
+ %81 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %80) nounwind
+ %82 = mul <8 x i16> %81, %81
+ %83 = add <8 x i16> zeroinitializer, %82
+ %84 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %76, <8 x i16> %83) nounwind
+ %85 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %67, <8 x i16> %84) nounwind
+ %86 = bitcast <8 x i16> %85 to <2 x i64>
+ %87 = shufflevector <2 x i64> %86, <2 x i64> undef, <1 x i32> <i32 1>
+ %88 = bitcast <1 x i64> %87 to <4 x i16>
+ %89 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %88, <4 x i16> undef) nounwind
+ %90 = tail call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %89, <4 x i32> <i32 -6, i32 -6, i32 -6, i32 -6>)
+ %91 = bitcast <4 x i16> %90 to <1 x i64>
+ %92 = shufflevector <1 x i64> undef, <1 x i64> %91, <2 x i32> <i32 0, i32 1>
+ %93 = bitcast <2 x i64> %92 to <8 x i16>
+ %94 = tail call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %93, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
+ %95 = bitcast <8 x i8> %56 to <1 x i64>
+ %96 = bitcast <8 x i8> %94 to <1 x i64>
+ %97 = shufflevector <1 x i64> %95, <1 x i64> %96, <2 x i32> <i32 0, i32 1>
+ %98 = bitcast <2 x i64> %97 to <16 x i8>
+ tail call void @llvm.arm.neon.vst1.v16i8(i8* null, <16 x i8> %98, i32 1)
+ %99 = icmp slt i32 undef, undef
+ br i1 %99, label %5, label %3
+}
+
+declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
+declare <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+
+declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+
+declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) nounwind readnone
diff --git a/test/CodeGen/ARM/2012-08-30-select.ll b/test/CodeGen/ARM/2012-08-30-select.ll
new file mode 100644
index 0000000..8471be5
--- /dev/null
+++ b/test/CodeGen/ARM/2012-08-30-select.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; rdar://12201387
+
+;CHECK: select_s_v_v
+;CHECK: it ne
+;CHECK-NEXT: vmovne.i32
+;CHECK: bx
+define <16 x i8> @select_s_v_v(i32 %avail, i8* %bar) {
+entry:
+ %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1)
+ %and = and i32 %avail, 1
+ %tobool = icmp eq i32 %and, 0
+ %vld1. = select i1 %tobool, <16 x i8> %vld1, <16 x i8> zeroinitializer
+ ret <16 x i8> %vld1.
+}
+
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
+
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 8967730..6e6b363 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -159,3 +159,13 @@ entry:
store i8 %3, i8* %old
ret void
}
+
+; CHECK: func4
+; This function should not need to use callee-saved registers.
+; rdar://problem/12203728
+; CHECK-NOT: r4
+define i32 @func4(i32* %p) nounwind optsize ssp {
+entry:
+ %0 = atomicrmw add i32* %p, i32 1 monotonic
+ ret i32 %0
+}
diff --git a/test/CodeGen/ARM/crash-shufflevector.ll b/test/CodeGen/ARM/crash-shufflevector.ll
index ece4234..bdc0e0e 100644
--- a/test/CodeGen/ARM/crash-shufflevector.ll
+++ b/test/CodeGen/ARM/crash-shufflevector.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7--
+; RUN: llc < %s -mtriple=armv7
declare void @g(<16 x i8>)
define void @f(<4 x i8> %param1, <4 x i8> %param2) {
@@ -7,4 +7,4 @@ define void @f(<4 x i8> %param1, <4 x i8> %param2) {
%z = shufflevector <16 x i8> %y1, <16 x i8> %y2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
call void @g(<16 x i8> %z)
ret void
-}
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/domain-conv-vmovs.ll b/test/CodeGen/ARM/domain-conv-vmovs.ll
new file mode 100644
index 0000000..e19185b
--- /dev/null
+++ b/test/CodeGen/ARM/domain-conv-vmovs.ll
@@ -0,0 +1,84 @@
+; RUN: llc -verify-machineinstrs -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a9 -mattr=+neon,+neonfp -float-abi=hard < %s | FileCheck %s
+
+define <2 x float> @test_vmovs_via_vext_lane0to0(float %arg, <2 x float> %in) {
+; CHECK: test_vmovs_via_vext_lane0to0:
+ %vec = insertelement <2 x float> %in, float %arg, i32 0
+ %res = fadd <2 x float> %vec, %vec
+
+; CHECK: vext.32 d1, d1, d0, #1
+; CHECK: vext.32 d1, d1, d1, #1
+; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1
+
+ ret <2 x float> %res
+}
+
+define <2 x float> @test_vmovs_via_vext_lane0to1(float %arg, <2 x float> %in) {
+; CHECK: test_vmovs_via_vext_lane0to1:
+ %vec = insertelement <2 x float> %in, float %arg, i32 1
+ %res = fadd <2 x float> %vec, %vec
+
+; CHECK: vext.32 d1, d1, d1, #1
+; CHECK: vext.32 d1, d1, d0, #1
+; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1
+
+ ret <2 x float> %res
+}
+
+define <2 x float> @test_vmovs_via_vext_lane1to0(float, float %arg, <2 x float> %in) {
+; CHECK: test_vmovs_via_vext_lane1to0:
+ %vec = insertelement <2 x float> %in, float %arg, i32 0
+ %res = fadd <2 x float> %vec, %vec
+
+; CHECK: vext.32 d1, d1, d1, #1
+; CHECK: vext.32 d1, d0, d1, #1
+; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1
+
+ ret <2 x float> %res
+}
+
+define <2 x float> @test_vmovs_via_vext_lane1to1(float, float %arg, <2 x float> %in) {
+; CHECK: test_vmovs_via_vext_lane1to1:
+ %vec = insertelement <2 x float> %in, float %arg, i32 1
+ %res = fadd <2 x float> %vec, %vec
+
+; CHECK: vext.32 d1, d0, d1, #1
+; CHECK: vext.32 d1, d1, d1, #1
+; CHECK: vadd.f32 {{d[0-9]+}}, d1, d1
+
+ ret <2 x float> %res
+}
+
+
+define float @test_vmovs_via_vdup(float, float %ret, float %lhs, float %rhs) {
+; CHECK: test_vmovs_via_vdup:
+
+ ; Do an operation (which will end up NEON because of +neonfp) to convince the
+ ; execution-domain pass that NEON is a good thing to use.
+ %res = fadd float %ret, %ret
+ ; It makes sense for LLVM to do the addition in d0 here, because it's going
+ ; to be returned. This means it will want a "vmov s0, s1":
+; CHECK: vdup.32 d0, d0[1]
+
+ ret float %res
+}
+
+declare float @llvm.sqrt.f32(float)
+
+declare void @bar()
+
+; This is a comp
+define float @test_ineligible(float, float %in) {
+; CHECK: test_ineligible:
+
+ %sqrt = call float @llvm.sqrt.f32(float %in)
+ %val = fadd float %sqrt, %sqrt
+
+ ; This call forces a move from a callee-saved register to the return-reg. That
+ ; move is not eligible for conversion to a d-register instructions because the
+ ; use-def chains would be messed up. Primarily a compile-test (we used to
+ ; internal fault).
+ call void @bar()
+; CHECL: bl bar
+; CHECK: vmov.f32 {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %val
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/fast-isel-pic.ll b/test/CodeGen/ARM/fast-isel-pic.ll
new file mode 100644
index 0000000..392a845
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-pic.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=arm-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARMv7
+
+@g = global i32 0, align 4
+
+define i32 @LoadGV() {
+entry:
+; THUMB: LoadGV
+; THUMB: movw [[reg0:r[0-9]+]],
+; THUMB: movt [[reg0]],
+; THUMB: add [[reg0]], pc
+; ARM: LoadGV
+; ARM: ldr [[reg1:r[0-9]+]],
+; ARM: add [[reg1]], pc, [[reg1]]
+; ARMv7: LoadGV
+; ARMv7: movw [[reg2:r[0-9]+]],
+; ARMv7: movt [[reg2]],
+; ARMv7: add [[reg2]], pc, [[reg2]]
+ %tmp = load i32* @g
+ ret i32 %tmp
+}
+
+@i = external global i32
+
+define i32 @LoadIndirectSymbol() {
+entry:
+; THUMB: LoadIndirectSymbol
+; THUMB: movw r[[reg3:[0-9]+]],
+; THUMB: movt r[[reg3]],
+; THUMB: add r[[reg3]], pc
+; THUMB: ldr r[[reg3]], [r[[reg3]]]
+; ARM: LoadIndirectSymbol
+; ARM: ldr [[reg4:r[0-9]+]],
+; ARM: ldr [[reg4]], [pc, [[reg4]]]
+; ARMv7: LoadIndirectSymbol
+; ARMv7: movw r[[reg5:[0-9]+]],
+; ARMv7: movt r[[reg5]],
+; ARMv7: add r[[reg5]], pc, r[[reg5]]
+; ARMv7: ldr r[[reg5]], [r[[reg5]]]
+ %tmp = load i32* @i
+ ret i32 %tmp
+}
diff --git a/test/CodeGen/ARM/fp-fast.ll b/test/CodeGen/ARM/fp-fast.ll
new file mode 100644
index 0000000..ec57187
--- /dev/null
+++ b/test/CodeGen/ARM/fp-fast.ll
@@ -0,0 +1,60 @@
+; RUN: llc -march=arm -mcpu=cortex-a9 -mattr=+vfp4 -enable-unsafe-fp-math < %s | FileCheck %s
+
+; CHECK: test1
+define float @test1(float %x) {
+; CHECK-NOT: vfma
+; CHECK: vmul.f32
+; CHECK-NOT: vfma
+ %t1 = fmul float %x, 3.0
+ %t2 = call float @llvm.fma.f32(float %x, float 2.0, float %t1)
+ ret float %t2
+}
+
+; CHECK: test2
+define float @test2(float %x, float %y) {
+; CHECK-NOT: vmul
+; CHECK: vfma.f32
+; CHECK-NOT: vmul
+ %t1 = fmul float %x, 3.0
+ %t2 = call float @llvm.fma.f32(float %t1, float 2.0, float %y)
+ ret float %t2
+}
+
+; CHECK: test3
+define float @test3(float %x, float %y) {
+; CHECK-NOT: vfma
+; CHECK: vadd.f32
+; CHECK-NOT: vfma
+ %t2 = call float @llvm.fma.f32(float %x, float 1.0, float %y)
+ ret float %t2
+}
+
+; CHECK: test4
+define float @test4(float %x, float %y) {
+; CHECK-NOT: vfma
+; CHECK: vsub.f32
+; CHECK-NOT: vfma
+ %t2 = call float @llvm.fma.f32(float %x, float -1.0, float %y)
+ ret float %t2
+}
+
+; CHECK: test5
+define float @test5(float %x) {
+; CHECK-NOT: vfma
+; CHECK: vmul.f32
+; CHECK-NOT: vfma
+ %t2 = call float @llvm.fma.f32(float %x, float 2.0, float %x)
+ ret float %t2
+}
+
+; CHECK: test6
+define float @test6(float %x) {
+; CHECK-NOT: vfma
+; CHECK: vmul.f32
+; CHECK-NOT: vfma
+ %t1 = fsub float -0.0, %x
+ %t2 = call float @llvm.fma.f32(float %x, float 5.0, float %t1)
+ ret float %t2
+}
+
+declare float @llvm.fma.f32(float, float, float)
diff --git a/test/CodeGen/ARM/integer_insertelement.ll b/test/CodeGen/ARM/integer_insertelement.ll
new file mode 100644
index 0000000..4f2d7e3
--- /dev/null
+++ b/test/CodeGen/ARM/integer_insertelement.ll
@@ -0,0 +1,35 @@
+; RUN: llc %s -o - -march=arm -mattr=+neon | FileCheck %s
+
+; This test checks that when inserting one (integer) element into a vector,
+; the vector is not spuriously copied. "vorr dX, dY, dY" is the way of moving
+; one DPR to another that we check for.
+
+; CHECK: @f
+; CHECK-NOT: vorr d
+; CHECK: vmov s
+; CHECK-NOT: vorr d
+; CHECK: mov pc, lr
+define <4 x i32> @f(<4 x i32> %in) {
+ %1 = insertelement <4 x i32> %in, i32 255, i32 3
+ ret <4 x i32> %1
+}
+
+; CHECK: @g
+; CHECK-NOT: vorr d
+; CHECK: vmov.16 d
+; CHECK-NOT: vorr d
+; CHECK: mov pc, lr
+define <8 x i16> @g(<8 x i16> %in) {
+ %1 = insertelement <8 x i16> %in, i16 255, i32 7
+ ret <8 x i16> %1
+}
+
+; CHECK: @h
+; CHECK-NOT: vorr d
+; CHECK: vmov.8 d
+; CHECK-NOT: vorr d
+; CHECK: mov pc, lr
+define <16 x i8> @h(<16 x i8> %in) {
+ %1 = insertelement <16 x i8> %in, i8 255, i32 15
+ ret <16 x i8> %1
+}
diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll
new file mode 100644
index 0000000..e4a00e9
--- /dev/null
+++ b/test/CodeGen/ARM/longMAC.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -march=arm | FileCheck %s
+; Check generated signed and unsigned multiply accumulate long.
+
+define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
+;CHECK: MACLongTest1:
+;CHECK: umlal
+ %conv = zext i32 %a to i64
+ %conv1 = zext i32 %b to i64
+ %mul = mul i64 %conv1, %conv
+ %add = add i64 %mul, %c
+ ret i64 %add
+}
+
+define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) {
+;CHECK: MACLongTest2:
+;CHECK: smlal
+ %conv = sext i32 %a to i64
+ %conv1 = sext i32 %b to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %add = add nsw i64 %mul, %c
+ ret i64 %add
+}
+
+define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
+;CHECK: MACLongTest3:
+;CHECK: umlal
+ %conv = zext i32 %b to i64
+ %conv1 = zext i32 %a to i64
+ %mul = mul i64 %conv, %conv1
+ %conv2 = zext i32 %c to i64
+ %add = add i64 %mul, %conv2
+ ret i64 %add
+}
+
+define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) {
+;CHECK: MACLongTest4:
+;CHECK: smlal
+ %conv = sext i32 %b to i64
+ %conv1 = sext i32 %a to i64
+ %mul = mul nsw i64 %conv, %conv1
+ %conv2 = sext i32 %c to i64
+ %add = add nsw i64 %mul, %conv2
+ ret i64 %add
+}
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index 5575566..62708ed 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -80,7 +80,7 @@ define double @f7(double %a, double %b) {
; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
; CHECK-NEON-NEXT: cmp r0, [[R3]]
; CHECK-NEON-NEXT: it eq
-; CHECK-NEON-NEXT: addeq.w {{r.*}}, [[R2]]
+; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4
; CHECK-NEON-NEXT: ldr
; CHECK-NEON: bx
diff --git a/test/CodeGen/ARM/select_xform.ll b/test/CodeGen/ARM/select_xform.ll
index cfc0e70..7507808 100644
--- a/test/CodeGen/ARM/select_xform.ll
+++ b/test/CodeGen/ARM/select_xform.ll
@@ -9,7 +9,7 @@ define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
; T2: t1:
; T2: mvn r0, #-2147483648
-; T2: addle.w r1, r1
+; T2: addle r1, r0
; T2: mov r0, r1
%tmp1 = icmp sgt i32 %c, 10
%tmp2 = select i1 %tmp1, i32 0, i32 2147483647
@@ -23,7 +23,7 @@ define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; ARM: mov r0, r1
; T2: t2:
-; T2: suble.w r1, r1, #10
+; T2: suble r1, #10
; T2: mov r0, r1
%tmp1 = icmp sgt i32 %c, 10
%tmp2 = select i1 %tmp1, i32 0, i32 10
@@ -37,7 +37,7 @@ define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
; ARM: mov r0, r3
; T2: t3:
-; T2: andge.w r3, r3, r2
+; T2: andge r3, r2
; T2: mov r0, r3
%cond = icmp slt i32 %a, %b
%z = select i1 %cond, i32 -1, i32 %x
@@ -51,7 +51,7 @@ define i32 @t4(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
; ARM: mov r0, r3
; T2: t4:
-; T2: orrge.w r3, r3, r2
+; T2: orrge r3, r2
; T2: mov r0, r3
%cond = icmp slt i32 %a, %b
%z = select i1 %cond, i32 0, i32 %x
@@ -81,7 +81,7 @@ define i32 @t6(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; T2: t6:
; T2-NOT: movge
-; T2: eorlt.w r3, r3, r2
+; T2: eorlt r3, r2
%cond = icmp slt i32 %a, %b
%tmp1 = select i1 %cond, i32 %c, i32 0
%tmp2 = xor i32 %tmp1, %d
@@ -200,7 +200,7 @@ entry:
; T2: t13
; T2: cmp r1, #10
-; T2: addgt.w r0, r0, #1
+; T2: addgt r0, #1
%cmp = icmp sgt i32 %a, 10
%conv = zext i1 %cmp to i32
%add = add i32 %conv, %c
@@ -216,7 +216,7 @@ entry:
; T2: t14
; T2: cmp r1, #10
-; T2: subgt.w r0, r0, #1
+; T2: subgt r0, #1
%cmp = icmp sgt i32 %a, 10
%conv = sext i1 %cmp to i32
%add = add i32 %conv, %c
diff --git a/test/CodeGen/ARM/sub-cmp-peephole.ll b/test/CodeGen/ARM/sub-cmp-peephole.ll
index 6fcbdee..2961b94 100644
--- a/test/CodeGen/ARM/sub-cmp-peephole.ll
+++ b/test/CodeGen/ARM/sub-cmp-peephole.ll
@@ -63,3 +63,24 @@ if.then:
if.else:
ret i32 %sub
}
+
+; If the sub/rsb instruction is predicated, we can't use the flags.
+; <rdar://problem/12263428>
+; Test case from MultiSource/Benchmarks/Ptrdist/bc/number.s
+; CHECK: bc_raise
+; CHECK: rsbeq
+; CHECK: cmp
+define i32 @bc_raise() nounwind ssp {
+entry:
+ %val.2.i = select i1 undef, i32 0, i32 undef
+ %sub.i = sub nsw i32 0, %val.2.i
+ %retval.0.i = select i1 undef, i32 %val.2.i, i32 %sub.i
+ %cmp1 = icmp eq i32 %retval.0.i, 0
+ br i1 %cmp1, label %land.lhs.true, label %if.end11
+
+land.lhs.true: ; preds = %num2long.exit
+ ret i32 17
+
+if.end11: ; preds = %num2long.exit
+ ret i32 23
+}
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index 05332e4..a8c224b 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -261,3 +261,37 @@ define void @redundantVdup(<8 x i8>* %ptr) nounwind {
store <8 x i8> %2, <8 x i8>* %ptr, align 8
ret void
}
+
+define <4 x i32> @tdupi(i32 %x, i32 %y) {
+;CHECK: tdupi
+;CHECK: vdup.32
+ %1 = insertelement <4 x i32> undef, i32 %x, i32 0
+ %2 = insertelement <4 x i32> %1, i32 %x, i32 1
+ %3 = insertelement <4 x i32> %2, i32 %x, i32 2
+ %4 = insertelement <4 x i32> %3, i32 %y, i32 3
+ ret <4 x i32> %4
+}
+
+define <4 x float> @tdupf(float %x, float %y) {
+;CHECK: tdupf
+;CHECK: vdup.32
+ %1 = insertelement <4 x float> undef, float %x, i32 0
+ %2 = insertelement <4 x float> %1, float %x, i32 1
+ %3 = insertelement <4 x float> %2, float %x, i32 2
+ %4 = insertelement <4 x float> %3, float %y, i32 3
+ ret <4 x float> %4
+}
+
+; This test checks that when splatting an element from a vector into another,
+; the value isn't moved out to GPRs first.
+define <4 x i32> @tduplane(<4 x i32> %invec) {
+;CHECK: tduplane
+;CHECK-NOT: vmov {{.*}}, d16[1]
+;CHECK: vdup.32 {{.*}}, d16[1]
+ %in = extractelement <4 x i32> %invec, i32 1
+ %1 = insertelement <4 x i32> undef, i32 %in, i32 0
+ %2 = insertelement <4 x i32> %1, i32 %in, i32 1
+ %3 = insertelement <4 x i32> %2, i32 %in, i32 2
+ %4 = insertelement <4 x i32> %3, i32 255, i32 3
+ ret <4 x i32> %4
+}
diff --git a/test/CodeGen/ARM/vector-extend-narrow.ll b/test/CodeGen/ARM/vector-extend-narrow.ll
index 8fd3db2..22af797 100644
--- a/test/CodeGen/ARM/vector-extend-narrow.ll
+++ b/test/CodeGen/ARM/vector-extend-narrow.ll
@@ -62,3 +62,14 @@ define <4 x i8> @i(<4 x i8>* %x) {
%2 = sdiv <4 x i8> zeroinitializer, %1
ret <4 x i8> %2
}
+; CHECK: j:
+define <4 x i32> @j(<4 x i8>* %in) nounwind {
+ ; CHECK: vld1
+ ; CHECK: vmovl.u8
+ ; CHECK: vmovl.u16
+ ; CHECK-NOT: vand
+ %1 = load <4 x i8>* %in, align 4
+ %2 = zext <4 x i8> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
diff --git a/test/CodeGen/ARM/vget_lane.ll b/test/CodeGen/ARM/vget_lane.ll
index 1fc885d..2ed65c9 100644
--- a/test/CodeGen/ARM/vget_lane.ll
+++ b/test/CodeGen/ARM/vget_lane.ll
@@ -200,7 +200,7 @@ define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
;CHECK: vsetQ_lane32:
-;CHECK: vmov.32
+;CHECK: vmov s
%tmp1 = load <4 x i32>* %A
%tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1
ret <4 x i32> %tmp2