diff options
author | Pirama Arumuga Nainar <pirama@google.com> | 2015-04-10 22:08:18 +0000 |
---|---|---|
committer | Android Git Automerger <android-git-automerger@android.com> | 2015-04-10 22:08:18 +0000 |
commit | 13a7db5b9c4f5e543d037be68ec3428216bfd550 (patch) | |
tree | 1b2c9792582e12f5af0b1512e3094425f0dc0df9 /test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll | |
parent | 0eb46f5d1e06a4284663d636a74b06adc3a161d7 (diff) | |
parent | 31195f0bdca6ee2a5e72d07edf13e1d81206d949 (diff) | |
download | external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.zip external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.gz external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.bz2 |
am 31195f0b: Merge "Update aosp/master llvm for rebase to r233350"
* commit '31195f0bdca6ee2a5e72d07edf13e1d81206d949':
Update aosp/master llvm for rebase to r233350
Diffstat (limited to 'test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll')
-rw-r--r-- | test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll index 7301b7c..6bd6a50 100644 --- a/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll +++ b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll @@ -3,7 +3,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) { ; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw ; CHECK: vpmovsxbw (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1) ret <16 x i16> %2 } @@ -11,7 +11,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) { define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd ; CHECK: vpmovsxbd (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1) ret <8 x i32> %2 } @@ -19,7 +19,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq ; CHECK: vpmovsxbq (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1) ret <4 x i64> %2 } @@ -27,7 +27,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) { define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd ; CHECK: vpmovsxwd (%rdi), %ymm0 - %1 = load <8 x i16>* %a, align 1 + %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1) ret <8 x i32> %2 } @@ -35,7 +35,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq ; CHECK: vpmovsxwq (%rdi), %ymm0 - %1 = load <8 x i16>* %a, align 1 + %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1) ret <4 x i64> %2 } @@ -43,7 +43,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq ; CHECK: vpmovsxdq (%rdi), %ymm0 - %1 = load <4 x i32>* %a, align 1 + %1 = load <4 x i32>, <4 x i32>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1) ret <4 x i64> %2 } @@ -51,7 +51,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) { define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) { ; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw ; CHECK: vpmovzxbw (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1) ret <16 x i16> %2 } @@ -59,7 +59,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) { define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd ; CHECK: vpmovzxbd (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1) ret <8 x i32> %2 } @@ -67,7 +67,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq ; CHECK: vpmovzxbq (%rdi), %ymm0 - %1 = load <16 x i8>* %a, align 1 + %1 = load <16 x i8>, <16 x i8>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1) ret <4 x i64> %2 } @@ -75,7 +75,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) { define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd ; CHECK: vpmovzxwd (%rdi), %ymm0 - %1 = load <8 x i16>* %a, align 1 + %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1) ret <8 x i32> %2 } @@ -83,7 +83,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq ; CHECK: vpmovzxwq (%rdi), %ymm0 - %1 = load <8 x i16>* %a, align 1 + %1 = load <8 x i16>, <8 x i16>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1) ret <4 x i64> %2 } @@ -91,7 +91,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) { define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) { ; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq ; CHECK: vpmovzxdq (%rdi), %ymm0 - %1 = load <4 x i32>* %a, align 1 + %1 = load <4 x i32>, <4 x i32>* %a, align 1 %2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1) ret <4 x i64> %2 } |