aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/avx2-vector-shifts.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/avx2-vector-shifts.ll')
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll55
1 files changed, 33 insertions, 22 deletions
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 5592e6c..4ae2905 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -9,7 +9,7 @@ entry:
}
; CHECK-LABEL: test_sllw_1:
-; CHECK: vpsllw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsllw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
@@ -24,12 +24,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
entry:
- %shl = shl <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = shl <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
; CHECK-LABEL: test_sllw_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsllw $15, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
@@ -39,7 +39,7 @@ entry:
}
; CHECK-LABEL: test_slld_1:
-; CHECK: vpslld $0, %ymm0, %ymm0
+; CHECK-NOT: vpslld $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
@@ -54,12 +54,12 @@ entry:
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
entry:
- %shl = shl <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = shl <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
; CHECK-LABEL: test_slld_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpslld $31, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
@@ -69,7 +69,7 @@ entry:
}
; CHECK-LABEL: test_sllq_1:
-; CHECK: vpsllq $0, %ymm0, %ymm0
+; CHECK-NOT: vpsllq $0, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
@@ -84,12 +84,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
entry:
- %shl = shl <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
+ %shl = shl <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
ret <4 x i64> %shl
}
; CHECK-LABEL: test_sllq_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsllq $63, %ymm0, %ymm0
; CHECK: ret
; AVX2 Arithmetic Shift
@@ -101,7 +101,7 @@ entry:
}
; CHECK-LABEL: test_sraw_1:
-; CHECK: vpsraw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsraw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
@@ -116,7 +116,7 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
entry:
- %shl = ashr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = ashr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
@@ -131,7 +131,7 @@ entry:
}
; CHECK-LABEL: test_srad_1:
-; CHECK: vpsrad $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrad $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
@@ -146,7 +146,7 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
entry:
- %shl = ashr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = ashr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
@@ -163,7 +163,7 @@ entry:
}
; CHECK-LABEL: test_srlw_1:
-; CHECK: vpsrlw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrlw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
@@ -178,12 +178,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
entry:
- %shl = lshr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = lshr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
; CHECK-LABEL: test_srlw_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrlw $15, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
@@ -193,7 +193,7 @@ entry:
}
; CHECK-LABEL: test_srld_1:
-; CHECK: vpsrld $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrld $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
@@ -208,12 +208,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
entry:
- %shl = lshr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = lshr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
; CHECK-LABEL: test_srld_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrld $31, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
@@ -223,7 +223,7 @@ entry:
}
; CHECK-LABEL: test_srlq_1:
-; CHECK: vpsrlq $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrlq $0, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
@@ -238,10 +238,21 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
entry:
- %shl = lshr <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
+ %shl = lshr <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
ret <4 x i64> %shl
}
; CHECK-LABEL: test_srlq_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrlq $63, %ymm0, %ymm0
; CHECK: ret
+
+; CHECK-LABEL: @srl_trunc_and_v4i64
+; CHECK: vpand
+; CHECK-NEXT: vpsrlvd
+; CHECK: ret
+define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
+ %and = and <4 x i64> %y, <i64 8, i64 8, i64 8, i64 8>
+ %trunc = trunc <4 x i64> %and to <4 x i32>
+ %sra = lshr <4 x i32> %x, %trunc
+ ret <4 x i32> %sra
+}