From 243896adcf7c22bb54ce136b0e89fa1fa8c4925f Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Sun, 1 Dec 2013 04:38:36 +0000 Subject: Merging r195941: ------------------------------------------------------------------------ r195941 | haoliu | 2013-11-28 18:11:22 -0800 (Thu, 28 Nov 2013) | 4 lines AArch64: The pattern match should check the range of the immediate value. Or we can generate some illegal instructions. E.g. shrn2 v0.4s, v1.2d, #35. The legal range should be in [1, 16]. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_34@196033 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/AArch64/neon-diagnostics.ll | 11 ++++ test/CodeGen/AArch64/neon-scalar-cvt.ll | 16 +++--- test/CodeGen/AArch64/neon-scalar-shift-imm.ll | 72 +++++++++++++-------------- 3 files changed, 55 insertions(+), 44 deletions(-) (limited to 'test') diff --git a/test/CodeGen/AArch64/neon-diagnostics.ll b/test/CodeGen/AArch64/neon-diagnostics.ll index 1de1cfa..f546aa7 100644 --- a/test/CodeGen/AArch64/neon-diagnostics.ll +++ b/test/CodeGen/AArch64/neon-diagnostics.ll @@ -11,3 +11,14 @@ entry: ret <2 x float> %add } +define <4 x i32> @test_vshrn_not_match(<2 x i32> %a, <2 x i64> %b) { +; CHECK: test_vshrn_not_match +; CHECK-NOT: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #35 + %1 = bitcast <2 x i32> %a to <1 x i64> + %2 = ashr <2 x i64> %b, + %vshrn_n = trunc <2 x i64> %2 to <2 x i32> + %3 = bitcast <2 x i32> %vshrn_n to <1 x i64> + %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> + %4 = bitcast <2 x i64> %shuffle.i to <4 x i32> + ret <4 x i32> %4 +} \ No newline at end of file diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll index 2fe25b8..a06d5d6 100644 --- a/test/CodeGen/AArch64/neon-scalar-cvt.ll +++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll @@ -90,10 +90,10 @@ declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32) define i32 @test_vcvts_n_s32_f32(float %a) { ; CHECK: test_vcvts_n_s32_f32 -; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #0 +; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %fcvtzs = insertelement <1 x float> undef, float %a, i32 0 - %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 0) + %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 1) %0 = extractelement <1 x i32> %fcvtzs1, i32 0 ret i32 %0 } @@ -102,10 +102,10 @@ declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32) define i64 @test_vcvtd_n_s64_f64(double %a) { ; CHECK: test_vcvtd_n_s64_f64 -; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #0 +; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %fcvtzs = insertelement <1 x double> undef, double %a, i32 0 - %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 0) + %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 1) %0 = extractelement <1 x i64> %fcvtzs1, i32 0 ret i64 %0 } @@ -114,10 +114,10 @@ declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32) define i32 @test_vcvts_n_u32_f32(float %a) { ; CHECK: test_vcvts_n_u32_f32 -; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #0 +; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32 entry: %fcvtzu = insertelement <1 x float> undef, float %a, i32 0 - %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 0) + %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 32) %0 = extractelement <1 x i32> %fcvtzu1, i32 0 ret i32 %0 } @@ -126,10 +126,10 @@ declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32) define i64 @test_vcvtd_n_u64_f64(double %a) { ; CHECK: test_vcvtd_n_u64_f64 -; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #0 +; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64 entry: %fcvtzu = insertelement <1 x double> undef, double %a, i32 0 - %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 0) + %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 64) %0 = extractelement <1 x i64> %fcvtzu1, i32 0 ret i64 %0 } diff --git a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll index 693db13..6224361 100644 --- a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll +++ b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll @@ -316,10 +316,10 @@ entry: define i8 @test_vqshrnh_n_s16(i16 %a) { ; CHECK: test_vqshrnh_n_s16 -; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vsqshrn = insertelement <1 x i16> undef, i16 %a, i32 0 - %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 15) + %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 8) %0 = extractelement <1 x i8> %vsqshrn1, i32 0 ret i8 %0 } @@ -328,10 +328,10 @@ declare <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16>, i32) define i16 @test_vqshrns_n_s32(i32 %a) { ; CHECK: test_vqshrns_n_s32 -; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vsqshrn = insertelement <1 x i32> undef, i32 %a, i32 0 - %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 31) + %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 16) %0 = extractelement <1 x i16> %vsqshrn1, i32 0 ret i16 %0 } @@ -340,10 +340,10 @@ declare <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32>, i32) define i32 @test_vqshrnd_n_s64(i64 %a) { ; CHECK: test_vqshrnd_n_s64 -; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vsqshrn = insertelement <1 x i64> undef, i64 %a, i32 0 - %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 63) + %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 32) %0 = extractelement <1 x i32> %vsqshrn1, i32 0 ret i32 %0 } @@ -352,10 +352,10 @@ declare <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64>, i32) define i8 @test_vqshrnh_n_u16(i16 %a) { ; CHECK: test_vqshrnh_n_u16 -; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vuqshrn = insertelement <1 x i16> undef, i16 %a, i32 0 - %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 15) + %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 8) %0 = extractelement <1 x i8> %vuqshrn1, i32 0 ret i8 %0 } @@ -364,10 +364,10 @@ declare <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16>, i32) define i16 @test_vqshrns_n_u32(i32 %a) { ; CHECK: test_vqshrns_n_u32 -; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vuqshrn = insertelement <1 x i32> undef, i32 %a, i32 0 - %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 31) + %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 16) %0 = extractelement <1 x i16> %vuqshrn1, i32 0 ret i16 %0 } @@ -376,10 +376,10 @@ declare <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32>, i32) define i32 @test_vqshrnd_n_u64(i64 %a) { ; CHECK: test_vqshrnd_n_u64 -; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vuqshrn = insertelement <1 x i64> undef, i64 %a, i32 0 - %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 63) + %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 32) %0 = extractelement <1 x i32> %vuqshrn1, i32 0 ret i32 %0 } @@ -388,10 +388,10 @@ declare <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64>, i32) define i8 @test_vqrshrnh_n_s16(i16 %a) { ; CHECK: test_vqrshrnh_n_s16 -; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vsqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0 - %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 15) + %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 8) %0 = extractelement <1 x i8> %vsqrshrn1, i32 0 ret i8 %0 } @@ -400,10 +400,10 @@ declare <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16>, i32) define i16 @test_vqrshrns_n_s32(i32 %a) { ; CHECK: test_vqrshrns_n_s32 -; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vsqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0 - %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 31) + %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 16) %0 = extractelement <1 x i16> %vsqrshrn1, i32 0 ret i16 %0 } @@ -412,10 +412,10 @@ declare <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32>, i32) define i32 @test_vqrshrnd_n_s64(i64 %a) { ; CHECK: test_vqrshrnd_n_s64 -; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vsqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0 - %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 63) + %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 32) %0 = extractelement <1 x i32> %vsqrshrn1, i32 0 ret i32 %0 } @@ -424,10 +424,10 @@ declare <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64>, i32) define i8 @test_vqrshrnh_n_u16(i16 %a) { ; CHECK: test_vqrshrnh_n_u16 -; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vuqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0 - %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 15) + %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 8) %0 = extractelement <1 x i8> %vuqrshrn1, i32 0 ret i8 %0 } @@ -436,10 +436,10 @@ declare <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16>, i32) define i16 @test_vqrshrns_n_u32(i32 %a) { ; CHECK: test_vqrshrns_n_u32 -; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vuqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0 - %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 31) + %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 16) %0 = extractelement <1 x i16> %vuqrshrn1, i32 0 ret i16 %0 } @@ -448,10 +448,10 @@ declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32) define i32 @test_vqrshrnd_n_u64(i64 %a) { ; CHECK: test_vqrshrnd_n_u64 -; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vuqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0 - %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 63) + %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 32) %0 = extractelement <1 x i32> %vuqrshrn1, i32 0 ret i32 %0 } @@ -460,10 +460,10 @@ declare <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64>, i32) define i8 @test_vqshrunh_n_s16(i16 %a) { ; CHECK: test_vqshrunh_n_s16 -; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vsqshrun = insertelement <1 x i16> undef, i16 %a, i32 0 - %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 15) + %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 8) %0 = extractelement <1 x i8> %vsqshrun1, i32 0 ret i8 %0 } @@ -472,10 +472,10 @@ declare <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16>, i32) define i16 @test_vqshruns_n_s32(i32 %a) { ; CHECK: test_vqshruns_n_s32 -; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vsqshrun = insertelement <1 x i32> undef, i32 %a, i32 0 - %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 31) + %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 16) %0 = extractelement <1 x i16> %vsqshrun1, i32 0 ret i16 %0 } @@ -484,10 +484,10 @@ declare <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32>, i32) define i32 @test_vqshrund_n_s64(i64 %a) { ; CHECK: test_vqshrund_n_s64 -; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vsqshrun = insertelement <1 x i64> undef, i64 %a, i32 0 - %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 63) + %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 32) %0 = extractelement <1 x i32> %vsqshrun1, i32 0 ret i32 %0 } @@ -496,10 +496,10 @@ declare <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64>, i32) define i8 @test_vqrshrunh_n_s16(i16 %a) { ; CHECK: test_vqrshrunh_n_s16 -; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #15 +; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #8 entry: %vsqrshrun = insertelement <1 x i16> undef, i16 %a, i32 0 - %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 15) + %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 8) %0 = extractelement <1 x i8> %vsqrshrun1, i32 0 ret i8 %0 } @@ -508,10 +508,10 @@ declare <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16>, i32) define i16 @test_vqrshruns_n_s32(i32 %a) { ; CHECK: test_vqrshruns_n_s32 -; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #31 +; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #16 entry: %vsqrshrun = insertelement <1 x i32> undef, i32 %a, i32 0 - %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 31) + %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 16) %0 = extractelement <1 x i16> %vsqrshrun1, i32 0 ret i16 %0 } @@ -520,10 +520,10 @@ declare <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32>, i32) define i32 @test_vqrshrund_n_s64(i64 %a) { ; CHECK: test_vqrshrund_n_s64 -; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #63 +; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #32 entry: %vsqrshrun = insertelement <1 x i64> undef, i64 %a, i32 0 - %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 63) + %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 32) %0 = extractelement <1 x i32> %vsqrshrun1, i32 0 ret i32 %0 } -- cgit v1.1