diff options
Diffstat (limited to 'test/CodeGen/R600/xor.ll')
-rw-r--r-- | test/CodeGen/R600/xor.ll | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll index 1526e28..089db59 100644 --- a/test/CodeGen/R600/xor.ll +++ b/test/CodeGen/R600/xor.ll @@ -11,8 +11,8 @@ ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) { - %a = load <2 x i32> addrspace(1) * %in0 - %b = load <2 x i32> addrspace(1) * %in1 + %a = load <2 x i32>, <2 x i32> addrspace(1) * %in0 + %b = load <2 x i32>, <2 x i32> addrspace(1) * %in1 %result = xor <2 x i32> %a, %b store <2 x i32> %result, <2 x i32> addrspace(1)* %out ret void @@ -30,8 +30,8 @@ define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) { - %a = load <4 x i32> addrspace(1) * %in0 - %b = load <4 x i32> addrspace(1) * %in1 + %a = load <4 x i32>, <4 x i32> addrspace(1) * %in0 + %b = load <4 x i32>, <4 x i32> addrspace(1) * %in1 %result = xor <4 x i32> %a, %b store <4 x i32> %result, <4 x i32> addrspace(1)* %out ret void @@ -40,15 +40,15 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in ; FUNC-LABEL: {{^}}xor_i1: ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}} -; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0 -; SI-DAG: v_cmp_ge_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 1.0 +; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 0, {{v[0-9]+}} +; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1.0, {{v[0-9]+}} ; SI: s_xor_b64 [[XOR:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]] ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, [[XOR]] ; SI: buffer_store_dword [[RESULT]] ; SI: s_endpgm define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) { - %a = load float addrspace(1) * %in0 - %b = load float addrspace(1) * %in1 + %a = load float, float addrspace(1) * %in0 + %b = load float, float addrspace(1) * %in1 %acmp = fcmp oge float %a, 0.000000e+00 %bcmp = fcmp oge float %b, 1.000000e+00 %xor = xor i1 %acmp, %bcmp @@ -64,8 +64,8 @@ define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float ad ; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]] ; SI: buffer_store_byte [[RESULT]] define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { - %a = load i1 addrspace(1)* %in0 - %b = load i1 addrspace(1)* %in1 + %a = load i1, i1 addrspace(1)* %in0 + %b = load i1, i1 addrspace(1)* %in1 %xor = xor i1 %a, %b store i1 %xor, i1 addrspace(1)* %out ret void @@ -74,8 +74,8 @@ define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace ; FUNC-LABEL: {{^}}vector_xor_i32: ; SI: v_xor_b32_e32 define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) { - %a = load i32 addrspace(1)* %in0 - %b = load i32 addrspace(1)* %in1 + %a = load i32, i32 addrspace(1)* %in0 + %b = load i32, i32 addrspace(1)* %in1 %result = xor i32 %a, %b store i32 %result, i32 addrspace(1)* %out ret void @@ -100,8 +100,8 @@ define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) { ; FUNC-LABEL: {{^}}vector_not_i32: ; SI: v_not_b32 define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) { - %a = load i32 addrspace(1)* %in0 - %b = load i32 addrspace(1)* %in1 + %a = load i32, i32 addrspace(1)* %in0 + %b = load i32, i32 addrspace(1)* %in1 %result = xor i32 %a, -1 store i32 %result, i32 addrspace(1)* %out ret void @@ -112,8 +112,8 @@ define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 ; SI: v_xor_b32_e32 ; SI: s_endpgm define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) { - %a = load i64 addrspace(1)* %in0 - %b = load i64 addrspace(1)* %in1 + %a = load i64, i64 addrspace(1)* %in0 + %b = load i64, i64 addrspace(1)* %in1 %result = xor i64 %a, %b store i64 %result, i64 addrspace(1)* %out ret void @@ -140,8 +140,8 @@ define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) { ; SI: v_not_b32 ; SI: v_not_b32 define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) { - %a = load i64 addrspace(1)* %in0 - %b = load i64 addrspace(1)* %in1 + %a = load i64, i64 addrspace(1)* %in0 + %b = load i64, i64 addrspace(1)* %in1 %result = xor i64 %a, -1 store i64 %result, i64 addrspace(1)* %out ret void @@ -163,7 +163,7 @@ if: br label %endif else: - %2 = load i64 addrspace(1)* %in + %2 = load i64, i64 addrspace(1)* %in br label %endif endif: |