aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/Thumb2
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-12-01 14:51:49 -0800
committerStephen Hines <srhines@google.com>2014-12-02 16:08:10 -0800
commit37ed9c199ca639565f6ce88105f9e39e898d82d0 (patch)
tree8fb36d3910e3ee4c4e1b7422f4f017108efc52f5 /test/CodeGen/Thumb2
parentd2327b22152ced7bc46dc629fc908959e8a52d03 (diff)
downloadexternal_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.zip
external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.gz
external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.bz2
Update aosp/master LLVM for rebase to r222494.
Change-Id: Ic787f5e0124df789bd26f3f24680f45e678eef2d
Diffstat (limited to 'test/CodeGen/Thumb2')
-rw-r--r--test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll3
-rw-r--r--test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll2
-rw-r--r--test/CodeGen/Thumb2/aapcs.ll50
-rw-r--r--test/CodeGen/Thumb2/constant-islands-new-island.ll31
-rw-r--r--test/CodeGen/Thumb2/cortex-fp.ll9
-rw-r--r--test/CodeGen/Thumb2/float-cmp.ll301
-rw-r--r--test/CodeGen/Thumb2/float-intrinsics-double.ll228
-rw-r--r--test/CodeGen/Thumb2/float-intrinsics-float.ll221
-rw-r--r--test/CodeGen/Thumb2/float-ops.ll293
-rw-r--r--test/CodeGen/Thumb2/stack_guard_remat.ll43
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt_rot.ll22
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxt_rot.ll26
12 files changed, 1217 insertions, 12 deletions
diff --git a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
index c8eac8d..59c2367 100644
--- a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
+++ b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
@@ -13,6 +13,7 @@ entry:
; CHECK-NOT: mov sp, r7
; CHECK: add sp, #8
call void @__gcov_flush() nounwind
+ call void @llvm.va_start(i8* null)
br i1 undef, label %bb5, label %bb
bb: ; preds = %bb, %entry
@@ -27,3 +28,5 @@ bb5: ; preds = %bb, %entry
declare hidden void @__gcov_flush()
declare i32 @execvp(i8*, i8**) nounwind
+
+declare void @llvm.va_start(i8*) nounwind
diff --git a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
index 524e5a6..89b7148 100644
--- a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -std-compile-opts | \
+; RUN: opt < %s -O3 | \
; RUN: llc -mtriple=thumbv7-apple-darwin10 -mattr=+neon | FileCheck %s
define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
diff --git a/test/CodeGen/Thumb2/aapcs.ll b/test/CodeGen/Thumb2/aapcs.ll
new file mode 100644
index 0000000..21af8c1
--- /dev/null
+++ b/test/CodeGen/Thumb2/aapcs.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m4 -mattr=-vfp2 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 -mattr=+vfp4,+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 -mattr=+vfp3 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP
+
+define float @float_in_reg(float %a, float %b) {
+entry:
+; CHECK-LABEL: float_in_reg:
+; SOFT: mov r0, r1
+; HARD: vmov.f32 s0, s1
+; CHECK-NEXT: bx lr
+ ret float %b
+}
+
+define double @double_in_reg(double %a, double %b) {
+entry:
+; CHECK-LABEL: double_in_reg:
+; SOFT: mov r0, r2
+; SOFT: mov r1, r3
+; SP: vmov.f32 s0, s2
+; SP: vmov.f32 s1, s3
+; DP: vmov.f64 d0, d1
+; CHECK-NEXT: bx lr
+ ret double %b
+}
+
+define float @float_on_stack(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, float %i) {
+; CHECK-LABEL: float_on_stack:
+; SOFT: ldr r0, [sp, #48]
+; HARD: vldr s0, [sp]
+; CHECK-NEXT: bx lr
+ ret float %i
+}
+
+define double @double_on_stack(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) {
+; CHECK-LABEL: double_on_stack:
+; SOFT: ldr r0, [sp, #48]
+; SOFT: ldr r1, [sp, #52]
+; HARD: vldr d0, [sp]
+; CHECK-NEXT: bx lr
+ ret double %i
+}
+
+define double @double_not_split(double %a, double %b, double %c, double %d, double %e, double %f, double %g, float %h, double %i) {
+; CHECK-LABEL: double_not_split:
+; SOFT: ldr r0, [sp, #48]
+; SOFT: ldr r1, [sp, #52]
+; HARD: vldr d0, [sp]
+; CHECK-NEXT: bx lr
+ ret double %i
+}
diff --git a/test/CodeGen/Thumb2/constant-islands-new-island.ll b/test/CodeGen/Thumb2/constant-islands-new-island.ll
new file mode 100644
index 0000000..8ed657e
--- /dev/null
+++ b/test/CodeGen/Thumb2/constant-islands-new-island.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf %s -o - | FileCheck %s
+
+; Check that new water is created by splitting the basic block right after the
+; load instruction. Previously, new water was created before the load
+; instruction, which caused the pass to fail to converge.
+
+define void @test(i1 %tst) {
+; CHECK-LABEL: test:
+; CHECK: vldr {{s[0-9]+}}, [[CONST:\.LCPI[0-9]+_[0-9]+]]
+; CHECK-NEXT: b.w [[CONTINUE:\.LBB[0-9]+_[0-9]+]]
+
+; CHECK: [[CONST]]:
+; CHECK-NEXT: .long
+
+; CHECK: [[CONTINUE]]:
+
+entry:
+ call i32 @llvm.arm.space(i32 2000, i32 undef)
+ br i1 %tst, label %true, label %false
+
+true:
+ %val = phi float [12345.0, %entry], [undef, %false]
+ call void @bar(float %val)
+ ret void
+
+false:
+ br label %true
+}
+
+declare void @bar(float)
+declare i32 @llvm.arm.space(i32, i32)
diff --git a/test/CodeGen/Thumb2/cortex-fp.ll b/test/CodeGen/Thumb2/cortex-fp.ll
index e63970a..5548492e 100644
--- a/test/CodeGen/Thumb2/cortex-fp.ll
+++ b/test/CodeGen/Thumb2/cortex-fp.ll
@@ -1,13 +1,15 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXM3
; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXM4
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXM7
; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXA8
define float @foo(float %a, float %b) {
entry:
; CHECK-LABEL: foo:
-; CORTEXM3: blx ___mulsf3
+; CORTEXM3: bl ___mulsf3
; CORTEXM4: vmul.f32 s
+; CORTEXM7: vmul.f32 s
; CORTEXA8: vmul.f32 d
%0 = fmul float %a, %b
ret float %0
@@ -17,8 +19,9 @@ define double @bar(double %a, double %b) {
entry:
; CHECK-LABEL: bar:
%0 = fmul double %a, %b
-; CORTEXM3: blx ___muldf3
-; CORTEXM4: blx ___muldf3
+; CORTEXM3: bl ___muldf3
+; CORTEXM4: {{bl|b.w}} ___muldf3
+; CORTEXM7: vmul.f64 d
; CORTEXA8: vmul.f64 d
ret double %0
}
diff --git a/test/CodeGen/Thumb2/float-cmp.ll b/test/CodeGen/Thumb2/float-cmp.ll
new file mode 100644
index 0000000..88d6c3b
--- /dev/null
+++ b/test/CodeGen/Thumb2/float-cmp.ll
@@ -0,0 +1,301 @@
+; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=NONE
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP
+
+
+
+define i1 @cmp_f_false(float %a, float %b) {
+; CHECK-LABEL: cmp_f_false:
+; NONE: movs r0, #0
+; HARD: movs r0, #0
+ %1 = fcmp false float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_oeq(float %a, float %b) {
+; CHECK-LABEL: cmp_f_oeq:
+; NONE: bl __aeabi_fcmpeq
+; HARD: vcmpe.f32
+; HARD: moveq r0, #1
+ %1 = fcmp oeq float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ogt(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ogt:
+; NONE: bl __aeabi_fcmpgt
+; HARD: vcmpe.f32
+; HARD: movgt r0, #1
+ %1 = fcmp ogt float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_oge(float %a, float %b) {
+; CHECK-LABEL: cmp_f_oge:
+; NONE: bl __aeabi_fcmpge
+; HARD: vcmpe.f32
+; HARD: movge r0, #1
+ %1 = fcmp oge float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_olt(float %a, float %b) {
+; CHECK-LABEL: cmp_f_olt:
+; NONE: bl __aeabi_fcmplt
+; HARD: vcmpe.f32
+; HARD: movmi r0, #1
+ %1 = fcmp olt float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ole(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ole:
+; NONE: bl __aeabi_fcmple
+; HARD: vcmpe.f32
+; HARD: movls r0, #1
+ %1 = fcmp ole float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_one(float %a, float %b) {
+; CHECK-LABEL: cmp_f_one:
+; NONE: bl __aeabi_fcmpgt
+; NONE: bl __aeabi_fcmplt
+; HARD: vcmpe.f32
+; HARD: movmi r0, #1
+; HARD: movgt r0, #1
+ %1 = fcmp one float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ord(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ord:
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movvc r0, #1
+ %1 = fcmp ord float %a, %b
+ ret i1 %1
+}define i1 @cmp_f_ueq(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ueq:
+; NONE: bl __aeabi_fcmpeq
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: moveq r0, #1
+; HARD: movvs r0, #1
+ %1 = fcmp ueq float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ugt(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ugt:
+; NONE: bl __aeabi_fcmpgt
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movhi r0, #1
+ %1 = fcmp ugt float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_uge(float %a, float %b) {
+; CHECK-LABEL: cmp_f_uge:
+; NONE: bl __aeabi_fcmpge
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movpl r0, #1
+ %1 = fcmp uge float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ult(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ult:
+; NONE: bl __aeabi_fcmplt
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movlt r0, #1
+ %1 = fcmp ult float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_ule(float %a, float %b) {
+; CHECK-LABEL: cmp_f_ule:
+; NONE: bl __aeabi_fcmple
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movle r0, #1
+ %1 = fcmp ule float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_une(float %a, float %b) {
+; CHECK-LABEL: cmp_f_une:
+; NONE: bl __aeabi_fcmpeq
+; HARD: vcmpe.f32
+; HARD: movne r0, #1
+ %1 = fcmp une float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_uno(float %a, float %b) {
+; CHECK-LABEL: cmp_f_uno:
+; NONE: bl __aeabi_fcmpun
+; HARD: vcmpe.f32
+; HARD: movvs r0, #1
+ %1 = fcmp uno float %a, %b
+ ret i1 %1
+}
+define i1 @cmp_f_true(float %a, float %b) {
+; CHECK-LABEL: cmp_f_true:
+; NONE: movs r0, #1
+; HARD: movs r0, #1
+ %1 = fcmp true float %a, %b
+ ret i1 %1
+}
+
+define i1 @cmp_d_false(double %a, double %b) {
+; CHECK-LABEL: cmp_d_false:
+; NONE: movs r0, #0
+; HARD: movs r0, #0
+ %1 = fcmp false double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_oeq(double %a, double %b) {
+; CHECK-LABEL: cmp_d_oeq:
+; NONE: bl __aeabi_dcmpeq
+; SP: bl __aeabi_dcmpeq
+; DP: vcmpe.f64
+; DP: moveq r0, #1
+ %1 = fcmp oeq double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_ogt(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ogt:
+; NONE: bl __aeabi_dcmpgt
+; SP: bl __aeabi_dcmpgt
+; DP: vcmpe.f64
+; DP: movgt r0, #1
+ %1 = fcmp ogt double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_oge(double %a, double %b) {
+; CHECK-LABEL: cmp_d_oge:
+; NONE: bl __aeabi_dcmpge
+; SP: bl __aeabi_dcmpge
+; DP: vcmpe.f64
+; DP: movge r0, #1
+ %1 = fcmp oge double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_olt(double %a, double %b) {
+; CHECK-LABEL: cmp_d_olt:
+; NONE: bl __aeabi_dcmplt
+; SP: bl __aeabi_dcmplt
+; DP: vcmpe.f64
+; DP: movmi r0, #1
+ %1 = fcmp olt double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_ole(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ole:
+; NONE: bl __aeabi_dcmple
+; SP: bl __aeabi_dcmple
+; DP: vcmpe.f64
+; DP: movls r0, #1
+ %1 = fcmp ole double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_one(double %a, double %b) {
+; CHECK-LABEL: cmp_d_one:
+; NONE: bl __aeabi_dcmpgt
+; NONE: bl __aeabi_dcmplt
+; SP: bl __aeabi_dcmpgt
+; SP: bl __aeabi_dcmplt
+; DP: vcmpe.f64
+; DP: movmi r0, #1
+; DP: movgt r0, #1
+ %1 = fcmp one double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_ord(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ord:
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movvc r0, #1
+ %1 = fcmp ord double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_ugt(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ugt:
+; NONE: bl __aeabi_dcmpgt
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmpgt
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movhi r0, #1
+ %1 = fcmp ugt double %a, %b
+ ret i1 %1
+}
+
+define i1 @cmp_d_ult(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ult:
+; NONE: bl __aeabi_dcmplt
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmplt
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movlt r0, #1
+ %1 = fcmp ult double %a, %b
+ ret i1 %1
+}
+
+
+define i1 @cmp_d_uno(double %a, double %b) {
+; CHECK-LABEL: cmp_d_uno:
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movvs r0, #1
+ %1 = fcmp uno double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_true(double %a, double %b) {
+; CHECK-LABEL: cmp_d_true:
+; NONE: movs r0, #1
+; HARD: movs r0, #1
+ %1 = fcmp true double %a, %b
+ ret i1 %1
+}
+define i1 @cmp_d_ueq(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ueq:
+; NONE: bl __aeabi_dcmpeq
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmpeq
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: moveq r0, #1
+; DP: movvs r0, #1
+ %1 = fcmp ueq double %a, %b
+ ret i1 %1
+}
+
+define i1 @cmp_d_uge(double %a, double %b) {
+; CHECK-LABEL: cmp_d_uge:
+; NONE: bl __aeabi_dcmpge
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmpge
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movpl r0, #1
+ %1 = fcmp uge double %a, %b
+ ret i1 %1
+}
+
+define i1 @cmp_d_ule(double %a, double %b) {
+; CHECK-LABEL: cmp_d_ule:
+; NONE: bl __aeabi_dcmple
+; NONE: bl __aeabi_dcmpun
+; SP: bl __aeabi_dcmple
+; SP: bl __aeabi_dcmpun
+; DP: vcmpe.f64
+; DP: movle r0, #1
+ %1 = fcmp ule double %a, %b
+ ret i1 %1
+}
+
+define i1 @cmp_d_une(double %a, double %b) {
+; CHECK-LABEL: cmp_d_une:
+; NONE: bl __aeabi_dcmpeq
+; SP: bl __aeabi_dcmpeq
+; DP: vcmpe.f64
+; DP: movne r0, #1
+ %1 = fcmp une double %a, %b
+ ret i1 %1
+}
diff --git a/test/CodeGen/Thumb2/float-intrinsics-double.ll b/test/CodeGen/Thumb2/float-intrinsics-double.ll
new file mode 100644
index 0000000..01a23bd
--- /dev/null
+++ b/test/CodeGen/Thumb2/float-intrinsics-double.ll
@@ -0,0 +1,228 @@
+; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP -check-prefix=FP-ARMv8
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
+
+declare double @llvm.sqrt.f64(double %Val)
+define double @sqrt_d(double %a) {
+; CHECK-LABEL: sqrt_d:
+; SOFT: {{(bl|b)}} sqrt
+; HARD: vsqrt.f64 d0, d0
+ %1 = call double @llvm.sqrt.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.powi.f64(double %Val, i32 %power)
+define double @powi_d(double %a, i32 %b) {
+; CHECK-LABEL: powi_d:
+; SOFT: {{(bl|b)}} __powidf2
+; HARD: b __powidf2
+ %1 = call double @llvm.powi.f64(double %a, i32 %b)
+ ret double %1
+}
+
+declare double @llvm.sin.f64(double %Val)
+define double @sin_d(double %a) {
+; CHECK-LABEL: sin_d:
+; SOFT: {{(bl|b)}} sin
+; HARD: b sin
+ %1 = call double @llvm.sin.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.cos.f64(double %Val)
+define double @cos_d(double %a) {
+; CHECK-LABEL: cos_d:
+; SOFT: {{(bl|b)}} cos
+; HARD: b cos
+ %1 = call double @llvm.cos.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.pow.f64(double %Val, double %power)
+define double @pow_d(double %a, double %b) {
+; CHECK-LABEL: pow_d:
+; SOFT: {{(bl|b)}} pow
+; HARD: b pow
+ %1 = call double @llvm.pow.f64(double %a, double %b)
+ ret double %1
+}
+
+declare double @llvm.exp.f64(double %Val)
+define double @exp_d(double %a) {
+; CHECK-LABEL: exp_d:
+; SOFT: {{(bl|b)}} exp
+; HARD: b exp
+ %1 = call double @llvm.exp.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.exp2.f64(double %Val)
+define double @exp2_d(double %a) {
+; CHECK-LABEL: exp2_d:
+; SOFT: {{(bl|b)}} exp2
+; HARD: b exp2
+ %1 = call double @llvm.exp2.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.log.f64(double %Val)
+define double @log_d(double %a) {
+; CHECK-LABEL: log_d:
+; SOFT: {{(bl|b)}} log
+; HARD: b log
+ %1 = call double @llvm.log.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.log10.f64(double %Val)
+define double @log10_d(double %a) {
+; CHECK-LABEL: log10_d:
+; SOFT: {{(bl|b)}} log10
+; HARD: b log10
+ %1 = call double @llvm.log10.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.log2.f64(double %Val)
+define double @log2_d(double %a) {
+; CHECK-LABEL: log2_d:
+; SOFT: {{(bl|b)}} log2
+; HARD: b log2
+ %1 = call double @llvm.log2.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.fma.f64(double %a, double %b, double %c)
+define double @fma_d(double %a, double %b, double %c) {
+; CHECK-LABEL: fma_d:
+; SOFT: {{(bl|b)}} fma
+; HARD: vfma.f64
+ %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
+ ret double %1
+}
+
+; FIXME: the FPv4-SP version is less efficient than the no-FPU version
+declare double @llvm.fabs.f64(double %Val)
+define double @abs_d(double %a) {
+; CHECK-LABEL: abs_d:
+; NONE: bic r1, r1, #-2147483648
+; SP: bl __aeabi_dcmpgt
+; SP: bl __aeabi_dcmpun
+; SP: bl __aeabi_dsub
+; DP: vabs.f64 d0, d0
+ %1 = call double @llvm.fabs.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.copysign.f64(double %Mag, double %Sgn)
+define double @copysign_d(double %a, double %b) {
+; CHECK-LABEL: copysign_d:
+; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
+; SOFT: bfi r1, [[REG]], #31, #1
+; VFP: lsrs [[REG:r[0-9]+]], r3, #31
+; VFP: bfi r1, [[REG]], #31, #1
+; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
+; NEON: vshl.i64 [[REG]], [[REG]], #32
+; NEON: vbsl [[REG]], d
+ %1 = call double @llvm.copysign.f64(double %a, double %b)
+ ret double %1
+}
+
+declare double @llvm.floor.f64(double %Val)
+define double @floor_d(double %a) {
+; CHECK-LABEL: floor_d:
+; SOFT: {{(bl|b)}} floor
+; VFP4: b floor
+; FP-ARMv8: vrintm.f64
+ %1 = call double @llvm.floor.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.ceil.f64(double %Val)
+define double @ceil_d(double %a) {
+; CHECK-LABEL: ceil_d:
+; SOFT: {{(bl|b)}} ceil
+; VFP4: b ceil
+; FP-ARMv8: vrintp.f64
+ %1 = call double @llvm.ceil.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.trunc.f64(double %Val)
+define double @trunc_d(double %a) {
+; CHECK-LABEL: trunc_d:
+; SOFT: {{(bl|b)}} trunc
+; FFP4: b trunc
+; FP-ARMv8: vrintz.f64
+ %1 = call double @llvm.trunc.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.rint.f64(double %Val)
+define double @rint_d(double %a) {
+; CHECK-LABEL: rint_d:
+; SOFT: {{(bl|b)}} rint
+; VFP4: b rint
+; FP-ARMv8: vrintx.f64
+ %1 = call double @llvm.rint.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.nearbyint.f64(double %Val)
+define double @nearbyint_d(double %a) {
+; CHECK-LABEL: nearbyint_d:
+; SOFT: {{(bl|b)}} nearbyint
+; VFP4: b nearbyint
+; FP-ARMv8: vrintr.f64
+ %1 = call double @llvm.nearbyint.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.round.f64(double %Val)
+define double @round_d(double %a) {
+; CHECK-LABEL: round_d:
+; SOFT: {{(bl|b)}} round
+; VFP4: b round
+; FP-ARMv8: vrinta.f64
+ %1 = call double @llvm.round.f64(double %a)
+ ret double %1
+}
+
+declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
+define double @fmuladd_d(double %a, double %b, double %c) {
+; CHECK-LABEL: fmuladd_d:
+; SOFT: bl __aeabi_dmul
+; SOFT: bl __aeabi_dadd
+; VFP4: vmul.f64
+; VFP4: vadd.f64
+; FP-ARMv8: vmla.f64
+ %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ ret double %1
+}
+
+declare i16 @llvm.convert.to.fp16.f64(double %a)
+define i16 @d_to_h(double %a) {
+; CHECK-LABEL: d_to_h:
+; SOFT: bl __aeabi_d2h
+; VFP4: bl __aeabi_d2h
+; FP-ARMv8: vcvt{{[bt]}}.f16.f64
+ %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
+ ret i16 %1
+}
+
+declare double @llvm.convert.from.fp16.f64(i16 %a)
+define double @h_to_d(i16 %a) {
+; CHECK-LABEL: h_to_d:
+; NONE: bl __gnu_h2f_ieee
+; NONE: bl __aeabi_f2d
+; SP: vcvt{{[bt]}}.f32.f16
+; SP: bl __aeabi_f2d
+; VFPv4: vcvt{{[bt]}}.f32.f16
+; VFPv4: vcvt.f64.f32
+; FP-ARMv8: vcvt{{[bt]}}.f64.f16
+ %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
+ ret double %1
+}
diff --git a/test/CodeGen/Thumb2/float-intrinsics-float.ll b/test/CodeGen/Thumb2/float-intrinsics-float.ll
new file mode 100644
index 0000000..ec1bcd3
--- /dev/null
+++ b/test/CodeGen/Thumb2/float-intrinsics-float.ll
@@ -0,0 +1,221 @@
+; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP -check-prefix=FP-ARMv8 -check-prefix=VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=FP-ARMv8 -check-prefix=VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4 -check-prefix=NO-VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8 -check-prefix=VMLA
+
+declare float @llvm.sqrt.f32(float %Val)
+define float @sqrt_f(float %a) {
+; CHECK-LABEL: sqrt_f:
+; SOFT: bl sqrtf
+; HARD: vsqrt.f32 s0, s0
+ %1 = call float @llvm.sqrt.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.powi.f32(float %Val, i32 %power)
+define float @powi_f(float %a, i32 %b) {
+; CHECK-LABEL: powi_f:
+; SOFT: bl __powisf2
+; HARD: b __powisf2
+ %1 = call float @llvm.powi.f32(float %a, i32 %b)
+ ret float %1
+}
+
+declare float @llvm.sin.f32(float %Val)
+define float @sin_f(float %a) {
+; CHECK-LABEL: sin_f:
+; SOFT: bl sinf
+; HARD: b sinf
+ %1 = call float @llvm.sin.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.cos.f32(float %Val)
+define float @cos_f(float %a) {
+; CHECK-LABEL: cos_f:
+; SOFT: bl cosf
+; HARD: b cosf
+ %1 = call float @llvm.cos.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.pow.f32(float %Val, float %power)
+define float @pow_f(float %a, float %b) {
+; CHECK-LABEL: pow_f:
+; SOFT: bl powf
+; HARD: b powf
+ %1 = call float @llvm.pow.f32(float %a, float %b)
+ ret float %1
+}
+
+declare float @llvm.exp.f32(float %Val)
+define float @exp_f(float %a) {
+; CHECK-LABEL: exp_f:
+; SOFT: bl expf
+; HARD: b expf
+ %1 = call float @llvm.exp.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.exp2.f32(float %Val)
+define float @exp2_f(float %a) {
+; CHECK-LABEL: exp2_f:
+; SOFT: bl exp2f
+; HARD: b exp2f
+ %1 = call float @llvm.exp2.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.log.f32(float %Val)
+define float @log_f(float %a) {
+; CHECK-LABEL: log_f:
+; SOFT: bl logf
+; HARD: b logf
+ %1 = call float @llvm.log.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.log10.f32(float %Val)
+define float @log10_f(float %a) {
+; CHECK-LABEL: log10_f:
+; SOFT: bl log10f
+; HARD: b log10f
+ %1 = call float @llvm.log10.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.log2.f32(float %Val)
+define float @log2_f(float %a) {
+; CHECK-LABEL: log2_f:
+; SOFT: bl log2f
+; HARD: b log2f
+ %1 = call float @llvm.log2.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.fma.f32(float %a, float %b, float %c)
+define float @fma_f(float %a, float %b, float %c) {
+; CHECK-LABEL: fma_f:
+; SOFT: bl fmaf
+; HARD: vfma.f32
+ %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
+ ret float %1
+}
+
+declare float @llvm.fabs.f32(float %Val)
+define float @abs_f(float %a) {
+; CHECK-LABEL: abs_f:
+; SOFT: bic r0, r0, #-2147483648
+; HARD: vabs.f32
+ %1 = call float @llvm.fabs.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.copysign.f32(float %Mag, float %Sgn)
+define float @copysign_f(float %a, float %b) {
+; CHECK-LABEL: copysign_f:
+; NONE: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
+; NONE: bfi r{{[0-9]+}}, [[REG]], #31, #1
+; SP: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
+; SP: bfi r{{[0-9]+}}, [[REG]], #31, #1
+; VFP: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
+; VFP: bfi r{{[0-9]+}}, [[REG]], #31, #1
+; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
+; NEON: vbsl [[REG]], d
+ %1 = call float @llvm.copysign.f32(float %a, float %b)
+ ret float %1
+}
+
+declare float @llvm.floor.f32(float %Val)
+define float @floor_f(float %a) {
+; CHECK-LABEL: floor_f:
+; SOFT: bl floorf
+; VFP4: b floorf
+; FP-ARMv8: vrintm.f32
+ %1 = call float @llvm.floor.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.ceil.f32(float %Val)
+define float @ceil_f(float %a) {
+; CHECK-LABEL: ceil_f:
+; SOFT: bl ceilf
+; VFP4: b ceilf
+; FP-ARMv8: vrintp.f32
+ %1 = call float @llvm.ceil.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.trunc.f32(float %Val)
+define float @trunc_f(float %a) {
+; CHECK-LABEL: trunc_f:
+; SOFT: bl truncf
+; VFP4: b truncf
+; FP-ARMv8: vrintz.f32
+ %1 = call float @llvm.trunc.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.rint.f32(float %Val)
+define float @rint_f(float %a) {
+; CHECK-LABEL: rint_f:
+; SOFT: bl rintf
+; VFP4: b rintf
+; FP-ARMv8: vrintx.f32
+ %1 = call float @llvm.rint.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.nearbyint.f32(float %Val)
+define float @nearbyint_f(float %a) {
+; CHECK-LABEL: nearbyint_f:
+; SOFT: bl nearbyintf
+; VFP4: b nearbyintf
+; FP-ARMv8: vrintr.f32
+ %1 = call float @llvm.nearbyint.f32(float %a)
+ ret float %1
+}
+
+declare float @llvm.round.f32(float %Val)
+define float @round_f(float %a) {
+; CHECK-LABEL: round_f:
+; SOFT: bl roundf
+; VFP4: b roundf
+; FP-ARMv8: vrinta.f32
+ %1 = call float @llvm.round.f32(float %a)
+ ret float %1
+}
+
+; FIXME: why does cortex-m4 use vmla, while cortex-a7 uses vmul+vadd?
+; (these should be equivalent, even the rounding is the same)
+declare float @llvm.fmuladd.f32(float %a, float %b, float %c)
+define float @fmuladd_f(float %a, float %b, float %c) {
+; CHECK-LABEL: fmuladd_f:
+; SOFT: bl __aeabi_fmul
+; SOFT: bl __aeabi_fadd
+; VMLA: vmla.f32
+; NO-VMLA: vmul.f32
+; NO-VMLA: vadd.f32
+ %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ ret float %1
+}
+
+declare i16 @llvm.convert.to.fp16.f32(float %a)
+define i16 @f_to_h(float %a) {
+; CHECK-LABEL: f_to_h:
+; SOFT: bl __gnu_f2h_ieee
+; HARD: vcvt{{[bt]}}.f16.f32
+ %1 = call i16 @llvm.convert.to.fp16.f32(float %a)
+ ret i16 %1
+}
+
+declare float @llvm.convert.from.fp16.f32(i16 %a)
+define float @h_to_f(i16 %a) {
+; CHECK-LABEL: h_to_f:
+; SOFT: bl __gnu_h2f_ieee
+; HARD: vcvt{{[bt]}}.f32.f16
+ %1 = call float @llvm.convert.from.fp16.f32(i16 %a)
+ ret float %1
+}
diff --git a/test/CodeGen/Thumb2/float-ops.ll b/test/CodeGen/Thumb2/float-ops.ll
new file mode 100644
index 0000000..d383065
--- /dev/null
+++ b/test/CodeGen/Thumb2/float-ops.ll
@@ -0,0 +1,293 @@
+; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=NONE
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=VFP4-ALL
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=FP-ARMv8
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP4-ALL -check-prefix=VFP4-DP
+
+define float @add_f(float %a, float %b) {
+entry:
+; CHECK-LABEL: add_f:
+; NONE: bl __aeabi_fadd
+; HARD: vadd.f32 s0, s0, s1
+ %0 = fadd float %a, %b
+ ret float %0
+}
+
+define double @add_d(double %a, double %b) {
+entry:
+; CHECK-LABEL: add_d:
+; NONE: bl __aeabi_dadd
+; SP: bl __aeabi_dadd
+; DP: vadd.f64 d0, d0, d1
+ %0 = fadd double %a, %b
+ ret double %0
+}
+
+define float @sub_f(float %a, float %b) {
+entry:
+; CHECK-LABEL: sub_f:
+; NONE: bl __aeabi_fsub
+; HARD: vsub.f32 s
+ %0 = fsub float %a, %b
+ ret float %0
+}
+
+define double @sub_d(double %a, double %b) {
+entry:
+; CHECK-LABEL: sub_d:
+; NONE: bl __aeabi_dsub
+; SP: bl __aeabi_dsub
+; DP: vsub.f64 d0, d0, d1
+ %0 = fsub double %a, %b
+ ret double %0
+}
+
+define float @mul_f(float %a, float %b) {
+entry:
+; CHECK-LABEL: mul_f:
+; NONE: bl __aeabi_fmul
+; HARD: vmul.f32 s
+ %0 = fmul float %a, %b
+ ret float %0
+}
+
+define double @mul_d(double %a, double %b) {
+entry:
+; CHECK-LABEL: mul_d:
+; NONE: bl __aeabi_dmul
+; SP: bl __aeabi_dmul
+; DP: vmul.f64 d0, d0, d1
+ %0 = fmul double %a, %b
+ ret double %0
+}
+
+define float @div_f(float %a, float %b) {
+entry:
+; CHECK-LABEL: div_f:
+; NONE: bl __aeabi_fdiv
+; HARD: vdiv.f32 s
+ %0 = fdiv float %a, %b
+ ret float %0
+}
+
+define double @div_d(double %a, double %b) {
+entry:
+; CHECK-LABEL: div_d:
+; NONE: bl __aeabi_ddiv
+; SP: bl __aeabi_ddiv
+; DP: vdiv.f64 d0, d0, d1
+ %0 = fdiv double %a, %b
+ ret double %0
+}
+
+define float @rem_f(float %a, float %b) {
+entry:
+; CHECK-LABEL: rem_f:
+; NONE: bl fmodf
+; HARD: b fmodf
+ %0 = frem float %a, %b
+ ret float %0
+}
+
+define double @rem_d(double %a, double %b) {
+entry:
+; CHECK-LABEL: rem_d:
+; NONE: bl fmod
+; HARD: b fmod
+ %0 = frem double %a, %b
+ ret double %0
+}
+
+define float @load_f(float* %a) {
+entry:
+; CHECK-LABEL: load_f:
+; NONE: ldr r0, [r0]
+; HARD: vldr s0, [r0]
+ %0 = load float* %a, align 4
+ ret float %0
+}
+
+define double @load_d(double* %a) {
+entry:
+; CHECK-LABEL: load_d:
+; NONE: ldm.w r0, {r0, r1}
+; HARD: vldr d0, [r0]
+ %0 = load double* %a, align 8
+ ret double %0
+}
+
+define void @store_f(float* %a, float %b) {
+entry:
+; CHECK-LABEL: store_f:
+; NONE: str r1, [r0]
+; HARD: vstr s0, [r0]
+ store float %b, float* %a, align 4
+ ret void
+}
+
+define void @store_d(double* %a, double %b) {
+entry:
+; CHECK-LABEL: store_d:
+; NONE: mov r1, r3
+; NONE: str r2, [r0]
+; NONE: str r1, [r0, #4]
+; HARD: vstr d0, [r0]
+ store double %b, double* %a, align 8
+ ret void
+}
+
+define double @f_to_d(float %a) {
+; CHECK-LABEL: f_to_d:
+; NONE: bl __aeabi_f2d
+; SP: bl __aeabi_f2d
+; DP: vcvt.f64.f32 d0, s0
+ %1 = fpext float %a to double
+ ret double %1
+}
+
+define float @d_to_f(double %a) {
+; CHECK-LABEL: d_to_f:
+; NONE: bl __aeabi_d2f
+; SP: bl __aeabi_d2f
+; DP: vcvt.f32.f64 s0, d0
+ %1 = fptrunc double %a to float
+ ret float %1
+}
+
+define i32 @f_to_si(float %a) {
+; CHECK-LABEL: f_to_si:
+; NONE: bl __aeabi_f2iz
+; HARD: vcvt.s32.f32 s0, s0
+; HARD: vmov r0, s0
+ %1 = fptosi float %a to i32
+ ret i32 %1
+}
+
+define i32 @d_to_si(double %a) {
+; CHECK-LABEL: d_to_si:
+; NONE: bl __aeabi_d2iz
+; SP: vmov r0, r1, d0
+; SP: bl __aeabi_d2iz
+; DP: vcvt.s32.f64 s0, d0
+; DP: vmov r0, s0
+ %1 = fptosi double %a to i32
+ ret i32 %1
+}
+
+define i32 @f_to_ui(float %a) {
+; CHECK-LABEL: f_to_ui:
+; NONE: bl __aeabi_f2uiz
+; HARD: vcvt.u32.f32 s0, s0
+; HARD: vmov r0, s0
+ %1 = fptoui float %a to i32
+ ret i32 %1
+}
+
+define i32 @d_to_ui(double %a) {
+; CHECK-LABEL: d_to_ui:
+; NONE: bl __aeabi_d2uiz
+; SP: vmov r0, r1, d0
+; SP: bl __aeabi_d2uiz
+; DP: vcvt.u32.f64 s0, d0
+; DP: vmov r0, s0
+ %1 = fptoui double %a to i32
+ ret i32 %1
+}
+
+define float @si_to_f(i32 %a) {
+; CHECK-LABEL: si_to_f:
+; NONE: bl __aeabi_i2f
+; HARD: vcvt.f32.s32 s0, s0
+ %1 = sitofp i32 %a to float
+ ret float %1
+}
+
+define double @si_to_d(i32 %a) {
+; CHECK-LABEL: si_to_d:
+; NONE: bl __aeabi_i2d
+; SP: bl __aeabi_i2d
+; DP: vcvt.f64.s32 d0, s0
+ %1 = sitofp i32 %a to double
+ ret double %1
+}
+
+define float @ui_to_f(i32 %a) {
+; CHECK-LABEL: ui_to_f:
+; NONE: bl __aeabi_ui2f
+; HARD: vcvt.f32.u32 s0, s0
+ %1 = uitofp i32 %a to float
+ ret float %1
+}
+
+define double @ui_to_d(i32 %a) {
+; CHECK-LABEL: ui_to_d:
+; NONE: bl __aeabi_ui2d
+; SP: bl __aeabi_ui2d
+; DP: vcvt.f64.u32 d0, s0
+ %1 = uitofp i32 %a to double
+ ret double %1
+}
+
+define float @bitcast_i_to_f(i32 %a) {
+; CHECK-LABEL: bitcast_i_to_f:
+; NONE-NOT: mov
+; HARD: vmov s0, r0
+ %1 = bitcast i32 %a to float
+ ret float %1
+}
+
+define double @bitcast_i_to_d(i64 %a) {
+; CHECK-LABEL: bitcast_i_to_d:
+; NONE-NOT: mov
+; HARD: vmov d0, r0, r1
+ %1 = bitcast i64 %a to double
+ ret double %1
+}
+
+define i32 @bitcast_f_to_i(float %a) {
+; CHECK-LABEL: bitcast_f_to_i:
+; NONE-NOT: mov
+; HARD: vmov r0, s0
+ %1 = bitcast float %a to i32
+ ret i32 %1
+}
+
+define i64 @bitcast_d_to_i(double %a) {
+; CHECK-LABEL: bitcast_d_to_i:
+; NONE-NOT: mov
+; HARD: vmov r0, r1, d0
+ %1 = bitcast double %a to i64
+ ret i64 %1
+}
+
+define float @select_f(float %a, float %b, i1 %c) {
+; CHECK-LABEL: select_f:
+; NONE: tst.w r2, #1
+; NONE: moveq r0, r1
+; HARD: tst.w r0, #1
+; VFP4-ALL: vmovne.f32 s1, s0
+; VFP4-ALL: vmov.f32 s0, s1
+; FP-ARMv8: vseleq.f32 s0, s1, s0
+ %1 = select i1 %c, float %a, float %b
+ ret float %1
+}
+
+define double @select_d(double %a, double %b, i1 %c) {
+; CHECK-LABEL: select_d:
+; NONE: ldr.w [[REG:r[0-9]+]], [sp]
+; NONE: ands [[REG]], [[REG]], #1
+; NONE: moveq r0, r2
+; NONE: moveq r1, r3
+; SP: ands r0, r0, #1
+; SP-DAG: vmov [[ALO:r[0-9]+]], [[AHI:r[0-9]+]], d0
+; SP-DAG: vmov [[BLO:r[0-9]+]], [[BHI:r[0-9]+]], d1
+; SP: itt ne
+; SP-DAG: movne [[BLO]], [[ALO]]
+; SP-DAG: movne [[BHI]], [[AHI]]
+; SP: vmov d0, [[BLO]], [[BHI]]
+; DP: tst.w r0, #1
+; VFP4-DP: vmovne.f64 d1, d0
+; VFP4-DP: vmov.f64 d0, d1
+; FP-ARMV8: vseleq.f64 d0, d1, d0
+ %1 = select i1 %c, double %a, double %b
+ ret double %1
+}
diff --git a/test/CodeGen/Thumb2/stack_guard_remat.ll b/test/CodeGen/Thumb2/stack_guard_remat.ll
new file mode 100644
index 0000000..c8ea871
--- /dev/null
+++ b/test/CodeGen/Thumb2/stack_guard_remat.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=PIC
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=static -no-integrated-as | FileCheck %s -check-prefix=STATIC
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=dynamic-no-pic -no-integrated-as | FileCheck %s -check-prefix=DYNAMIC-NO-PIC
+
+;PIC: foo2
+;PIC: movw [[R0:r[0-9]+]], :lower16:(L___stack_chk_guard$non_lazy_ptr-([[LABEL0:LPC[0-9_]+]]+4))
+;PIC: movt [[R0]], :upper16:(L___stack_chk_guard$non_lazy_ptr-([[LABEL0]]+4))
+;PIC: [[LABEL0]]:
+;PIC: add [[R0]], pc
+;PIC: ldr [[R1:r[0-9]+]], {{\[}}[[R0]]{{\]}}
+;PIC: ldr {{r[0-9]+}}, {{\[}}[[R1]]{{\]}}
+
+;STATIC: foo2
+;STATIC: movw [[R0:r[0-9]+]], :lower16:___stack_chk_guard
+;STATIC: movt [[R0]], :upper16:___stack_chk_guard
+;STATIC: ldr {{r[0-9]+}}, {{\[}}[[R0]]{{\]}}
+
+;DYNAMIC-NO-PIC: foo2
+;DYNAMIC-NO-PIC: movw [[R0:r[0-9]+]], :lower16:L___stack_chk_guard$non_lazy_ptr
+;DYNAMIC-NO-PIC: movt [[R0]], :upper16:L___stack_chk_guard$non_lazy_ptr
+;DYNAMIC-NO-PIC: ldr {{r[0-9]+}}, {{\[}}[[R0]]{{\]}}
+
+; Function Attrs: nounwind ssp
+define i32 @test_stack_guard_remat() #0 {
+ %a1 = alloca [256 x i32], align 4
+ %1 = bitcast [256 x i32]* %a1 to i8*
+ call void @llvm.lifetime.start(i64 1024, i8* %1)
+ %2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
+ call void @foo3(i32* %2) #3
+ call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
+ call void @llvm.lifetime.end(i64 1024, i8* %1)
+ ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+
+declare void @foo3(i32*)
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture)
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
index cef3490..02a8c47 100644
--- a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
@@ -2,15 +2,15 @@
; RUN: | FileCheck %s
define i32 @test0(i8 %A) {
-; CHECK: test0
+; CHECK-LABEL: test0:
; CHECK: sxtb r0, r0
%B = sext i8 %A to i32
ret i32 %B
}
define signext i8 @test1(i32 %A) {
-; CHECK: test1
-; CHECK: sxtb.w r0, r0, ror #8
+; CHECK-LABEL: test1:
+; CHECK: sbfx r0, r0, #8, #8
%B = lshr i32 %A, 8
%C = shl i32 %A, 24
%D = or i32 %B, %C
@@ -19,9 +19,8 @@ define signext i8 @test1(i32 %A) {
}
define signext i32 @test2(i32 %A, i32 %X) {
-; CHECK: test2
-; CHECK: lsrs r0, r0, #8
-; CHECK: sxtab r0, r1, r0
+; CHECK-LABEL: test2:
+; CHECK: sxtab r0, r1, r0, ror #8
%B = lshr i32 %A, 8
%C = shl i32 %A, 24
%D = or i32 %B, %C
@@ -30,3 +29,14 @@ define signext i32 @test2(i32 %A, i32 %X) {
%G = add i32 %F, %X
ret i32 %G
}
+
+define i32 @test3(i32 %A, i32 %X) {
+; CHECK-LABEL: test3:
+; CHECK: sxtah r0, r0, r1, ror #8
+ %X.hi = lshr i32 %X, 8
+ %X.trunc = trunc i32 %X.hi to i16
+ %addend = sext i16 %X.trunc to i32
+
+ %sum = add i32 %A, %addend
+ ret i32 %sum
+}
diff --git a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
index bcd4a0f..4afea89 100644
--- a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
@@ -24,8 +24,8 @@ define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
}
define zeroext i32 @test3(i32 %A.u) {
-; A8: test3
-; A8: uxth.w r0, r0, ror #8
+; A8-LABEL: test3
+; A8: ubfx r0, r0, #8, #16
%B.u = lshr i32 %A.u, 8
%C.u = shl i32 %A.u, 24
%D.u = or i32 %B.u, %C.u
@@ -33,3 +33,25 @@ define zeroext i32 @test3(i32 %A.u) {
%F.u = zext i16 %E.u to i32
ret i32 %F.u
}
+
+define i32 @test4(i32 %A, i32 %X) {
+; A8-LABEL: test4:
+; A8: uxtab r0, r0, r1, ror #16
+ %X.hi = lshr i32 %X, 16
+ %X.trunc = trunc i32 %X.hi to i8
+ %addend = zext i8 %X.trunc to i32
+
+ %sum = add i32 %A, %addend
+ ret i32 %sum
+}
+
+define i32 @test5(i32 %A, i32 %X) {
+; A8-LABEL: test5:
+; A8: uxtah r0, r0, r1, ror #8
+ %X.hi = lshr i32 %X, 8
+ %X.trunc = trunc i32 %X.hi to i16
+ %addend = zext i16 %X.trunc to i32
+
+ %sum = add i32 %A, %addend
+ ret i32 %sum
+}