From 5758c3c832daf4c0b37042684f822fa1896966ac Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Fri, 15 Nov 2013 21:28:10 +0000 Subject: [AArch64] Fix the scalar NEON ACLE functions so that they return float/double rather than the vector equivalent. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194853 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/AArch64/neon-scalar-cvt.ll | 40 +++++++++++++-------------------- 1 file changed, 16 insertions(+), 24 deletions(-) (limited to 'test') diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll index a7f0ac0..2fe25b8 100644 --- a/test/CodeGen/AArch64/neon-scalar-cvt.ll +++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll @@ -5,96 +5,88 @@ define float @test_vcvts_f32_s32(i32 %a) { ; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}} entry: %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0 - %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i) - %0 = extractelement <1 x float> %vcvtf1.i, i32 0 + %0 = call float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i) ret float %0 } -declare <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>) +declare float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>) define double @test_vcvtd_f64_s64(i64 %a) { ; CHECK: test_vcvtd_f64_s64 ; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}} entry: %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i) - %0 = extractelement <1 x double> %vcvtf1.i, i32 0 + %0 = call double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i) ret double %0 } -declare <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>) +declare double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>) define float @test_vcvts_f32_u32(i32 %a) { ; CHECK: test_vcvts_f32_u32 ; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}} entry: %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0 - %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i) - %0 = extractelement <1 x float> %vcvtf1.i, i32 0 + %0 = call float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i) ret float %0 } -declare <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>) +declare float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>) define double @test_vcvtd_f64_u64(i64 %a) { ; CHECK: test_vcvtd_f64_u64 ; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}} entry: %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i) - %0 = extractelement <1 x double> %vcvtf1.i, i32 0 + %0 = call double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i) ret double %0 } -declare <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>) +declare double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>) define float @test_vcvts_n_f32_s32(i32 %a) { ; CHECK: test_vcvts_n_f32_s32 ; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 - %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1) - %0 = extractelement <1 x float> %vcvtf1, i32 0 + %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1) ret float %0 } -declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32) +declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32) define double @test_vcvtd_n_f64_s64(i64 %a) { ; CHECK: test_vcvtd_n_f64_s64 ; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1) - %0 = extractelement <1 x double> %vcvtf1, i32 0 + %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1) ret double %0 } -declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32) +declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32) define float @test_vcvts_n_f32_u32(i32 %a) { ; CHECK: test_vcvts_n_f32_u32 ; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 - %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1) - %0 = extractelement <1 x float> %vcvtf1, i32 0 + %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1) ret float %0 } -declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32) +declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32) define double @test_vcvtd_n_f64_u64(i64 %a) { ; CHECK: test_vcvtd_n_f64_u64 ; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1) - %0 = extractelement <1 x double> %vcvtf1, i32 0 + %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1) ret double %0 } -declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32) +declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32) define i32 @test_vcvts_n_s32_f32(float %a) { ; CHECK: test_vcvts_n_s32_f32 -- cgit v1.1