aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/ARM/vicmp.ll
diff options
context:
space:
mode:
authorPirama Arumuga Nainar <pirama@google.com>2015-04-10 22:08:18 +0000
committerAndroid Git Automerger <android-git-automerger@android.com>2015-04-10 22:08:18 +0000
commit13a7db5b9c4f5e543d037be68ec3428216bfd550 (patch)
tree1b2c9792582e12f5af0b1512e3094425f0dc0df9 /test/CodeGen/ARM/vicmp.ll
parent0eb46f5d1e06a4284663d636a74b06adc3a161d7 (diff)
parent31195f0bdca6ee2a5e72d07edf13e1d81206d949 (diff)
downloadexternal_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.zip
external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.gz
external_llvm-13a7db5b9c4f5e543d037be68ec3428216bfd550.tar.bz2
am 31195f0b: Merge "Update aosp/master llvm for rebase to r233350"
* commit '31195f0bdca6ee2a5e72d07edf13e1d81206d949': Update aosp/master llvm for rebase to r233350
Diffstat (limited to 'test/CodeGen/ARM/vicmp.ll')
-rw-r--r--test/CodeGen/ARM/vicmp.ll40
1 files changed, 20 insertions, 20 deletions
diff --git a/test/CodeGen/ARM/vicmp.ll b/test/CodeGen/ARM/vicmp.ll
index bebb320..21b104a 100644
--- a/test/CodeGen/ARM/vicmp.ll
+++ b/test/CodeGen/ARM/vicmp.ll
@@ -10,8 +10,8 @@ define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vcnei8:
;CHECK: vceq.i8
;CHECK-NEXT: vmvn
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = icmp ne <8 x i8> %tmp1, %tmp2
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@@ -21,8 +21,8 @@ define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: vcnei16:
;CHECK: vceq.i16
;CHECK-NEXT: vmvn
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = icmp ne <4 x i16> %tmp1, %tmp2
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@@ -32,8 +32,8 @@ define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: vcnei32:
;CHECK: vceq.i32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = icmp ne <2 x i32> %tmp1, %tmp2
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@@ -43,8 +43,8 @@ define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: vcneQi8:
;CHECK: vceq.i8
;CHECK-NEXT: vmvn
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = icmp ne <16 x i8> %tmp1, %tmp2
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@@ -54,8 +54,8 @@ define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: vcneQi16:
;CHECK: vceq.i16
;CHECK-NEXT: vmvn
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = icmp ne <8 x i16> %tmp1, %tmp2
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@@ -65,8 +65,8 @@ define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: vcneQi32:
;CHECK: vceq.i32
;CHECK-NEXT: vmvn
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = icmp ne <4 x i32> %tmp1, %tmp2
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@@ -75,8 +75,8 @@ define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: vcltQs8:
;CHECK: vcgt.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = icmp slt <16 x i8> %tmp1, %tmp2
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@@ -85,8 +85,8 @@ define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: vcles16:
;CHECK: vcge.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = icmp sle <4 x i16> %tmp1, %tmp2
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@@ -95,8 +95,8 @@ define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: vcltu16:
;CHECK: vcgt.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = icmp ult <4 x i16> %tmp1, %tmp2
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@@ -105,8 +105,8 @@ define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: vcleQu32:
;CHECK: vcge.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = icmp ule <4 x i32> %tmp1, %tmp2
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4