aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorAna Pazos <apazos@codeaurora.org>2013-11-15 23:32:10 +0000
committerAna Pazos <apazos@codeaurora.org>2013-11-15 23:32:10 +0000
commita53bf06f7a998f9ea9e13ba844efc2460a2185dd (patch)
treebe6f3669038eaf8a146f4a68ed8af835fbe1ea94 /test/CodeGen/AArch64
parent6bc810a49983e12006ba7a0dba61f7b2534b8f26 (diff)
downloadexternal_llvm-a53bf06f7a998f9ea9e13ba844efc2460a2185dd.zip
external_llvm-a53bf06f7a998f9ea9e13ba844efc2460a2185dd.tar.gz
external_llvm-a53bf06f7a998f9ea9e13ba844efc2460a2185dd.tar.bz2
Implemented aarch64 Neon scalar vmulx_lane intrinsics
Implemented aarch64 Neon scalar vfma_lane intrinsics Implemented aarch64 Neon scalar vfms_lane intrinsics Implemented legacy vmul_n_f64, vmul_lane_f64, vmul_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. Implemented legacy vfma_lane_f64, vfms_lane_f64, vfma_laneq_f64, vfms_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194888 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll108
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll124
-rw-r--r--test/CodeGen/AArch64/neon-scalar-mul.ll18
3 files changed, 238 insertions, 12 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
new file mode 100644
index 0000000..8ce42de
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
@@ -0,0 +1,108 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare float @llvm.fma.f32(float, float, float)
+declare double @llvm.fma.f64(double, double, double)
+
+define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmla_ss4S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
+ ret float %tmp2
+}
+
+define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmla_ss4S_swap
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
+ ret float %tmp2
+}
+
+define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
+ ; CHECK: test_fmla_ss2S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
+ ret float %tmp2
+}
+
+define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) {
+ ; CHECK: test_fmla_ddD
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
+ ret double %tmp2
+}
+
+define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmla_dd2D
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
+ ret double %tmp2
+}
+
+define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmla_dd2D_swap
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
+ ret double %tmp2
+}
+
+define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmls_ss4S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
+ ret float %tmp3
+}
+
+define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmls_ss4S_swap
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a)
+ ret float %tmp3
+}
+
+
+define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
+ ; CHECK: test_fmls_ss2S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
+ ret float %tmp3
+}
+
+define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
+ ; CHECK: test_fmls_ddD
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
+ ret double %tmp3
+}
+
+define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmls_dd2D
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
+ ret double %tmp3
+}
+
+define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmls_dd2D_swap
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a)
+ ret double %tmp3
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
new file mode 100644
index 0000000..968ad3e
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
@@ -0,0 +1,124 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) {
+ ; CHECK: test_fmul_lane_ss2S
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) {
+ ; CHECK: test_fmul_lane_ss2S_swap
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) {
+ ; CHECK: test_fmul_lane_ss4S
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) {
+ ; CHECK: test_fmul_lane_ss4S_swap
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define double @test_fmul_lane_ddD(double %a, <1 x double> %v) {
+ ; CHECK: test_fmul_lane_ddD
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+
+define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) {
+ ; CHECK: test_fmul_lane_dd2D
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) {
+ ; CHECK: test_fmul_lane_dd2D_swap
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %tmp1, %a;
+ ret double %tmp2;
+}
+
+declare float @llvm.aarch64.neon.vmulx.f32(float, float)
+
+define float @test_fmulx_lane_f32(float %a, <2 x float> %v) {
+ ; CHECK: test_fmulx_lane_f32
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) {
+ ; CHECK: test_fmulx_laneq_f32
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) {
+ ; CHECK: test_fmulx_laneq_f32_swap
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %tmp1, float %a)
+ ret float %tmp2;
+}
+
+declare double @llvm.aarch64.neon.vmulx.f64(double, double)
+
+define double @test_fmulx_lane_f64(double %a, <1 x double> %v) {
+ ; CHECK: test_fmulx_lane_f64
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_0
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <2 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+
+define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_1
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_1_swap
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %tmp1, double %a)
+ ret double %tmp2;
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-mul.ll b/test/CodeGen/AArch64/neon-scalar-mul.ll
index a58294b..4992a51 100644
--- a/test/CodeGen/AArch64/neon-scalar-mul.ll
+++ b/test/CodeGen/AArch64/neon-scalar-mul.ll
@@ -49,25 +49,19 @@ declare <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32>, <1 x i32>)
define float @test_vmulxs_f32(float %a, float %b) {
; CHECK: test_vmulxs_f32
; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = insertelement <1 x float> undef, float %a, i32 0
- %2 = insertelement <1 x float> undef, float %b, i32 0
- %3 = call <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float> %1, <1 x float> %2)
- %4 = extractelement <1 x float> %3, i32 0
- ret float %4
+ %1 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %b)
+ ret float %1
}
define double @test_vmulxd_f64(double %a, double %b) {
; CHECK: test_vmulxd_f64
; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
- %1 = insertelement <1 x double> undef, double %a, i32 0
- %2 = insertelement <1 x double> undef, double %b, i32 0
- %3 = call <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double> %1, <1 x double> %2)
- %4 = extractelement <1 x double> %3, i32 0
- ret double %4
+ %1 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %b)
+ ret double %1
}
-declare <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float>, <1 x float>)
-declare <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double>, <1 x double>)
+declare float @llvm.aarch64.neon.vmulx.f32(float, float)
+declare double @llvm.aarch64.neon.vmulx.f64(double, double)
define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) {
; CHECK: test_vqdmlalh_s16